seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
37116819870 | import csv
f = open(r'G:\ResearchWork\G_extrac\jiyi-nowear.gcode','r')
lines = f.readlines()
#print(lines)
c = open('extract_infor_1.csv', 'w', newline="")
writer=csv.writer(c)
#删除单元间空格
def filterNan(item):
return item != ''
#循环处理每一行
#line为字符串
#lines为总字符串列表
#items为单行字符串列表
#item为单行字符串
#item2为重组的单行字符串列表
for line in lines:
line = line[0:]#引用字符串
items = line.split(' ') #以空格区分拆分字符串 #将一个字符串转换为一个列表
items = list(filter(filterNan, items))#清除空字符,提取的内容为“符号”,”字母+数字“,“单词“
items = items[0:]
item2 = list(range(8))
#将头部为X,Y,Z和F的单元提出,并按序排列
for item in items:
if item[0] == 'X':
item2[0]=item[0];
item2[1]=item[1:];#不能写作item=item.qppend()
elif item[0] == 'Y':
item2[2] = item[0];
item2[3] = item[1:];
elif item[0] == 'Z':
item2[4] = item[0];
item2[5] = item[1:];
elif item[0] == 'F':
item2[6] = item[0];
item2[7] = item[1:];
elif item[0] == 'G':
continue
for i in range(8):
if item2[i] == i:
item2[i] = '';
writer.writerow(item2)
f.close()
c.close() | Sautumn-Huang/using_Python_doing_G_code_extract | G_extrac/G_Info_Extra.py | G_Info_Extra.py | py | 1,477 | python | zh | code | 0 | github-code | 36 |
5460426589 | from functools import partial
from ._derived import Derived
from . import utilities
class Operation(Derived):
__slots__ = ('opkwargs', 'opfn')
def __init__(self,
*terms,
op = None,
**kwargs,
):
if type(op) is tuple:
sops, op = op[:-1], op[-1]
for sop in sops:
terms = Operation(*terms, op = sop)
if not type(terms) is tuple:
terms = terms,
self.opfn = partial(op, **kwargs)
self.opfn.__name__ = op.__name__
self.opkwargs = kwargs
super().__init__(*terms, op = op, **kwargs)
def evaluate(self):
return self.opfn(*self._resolve_terms())
def _titlestr(self):
return self.opfn.__name__
def _kwargstr(self):
kwargs = self.kwargs.copy()
del kwargs['op']
if kwargs:
return utilities.kwargstr(**kwargs)
else:
return ''
| lmoresi/funcy | funcy/_operation.py | _operation.py | py | 972 | python | en | code | 0 | github-code | 36 |
42243061570 | import cant_utils as cu
import numpy as np
import matplotlib.pyplot as plt
import glob
import bead_util as bu
import tkinter
import tkinter.filedialog
import os, sys
from scipy.optimize import curve_fit
import bead_util as bu
from scipy.optimize import minimize_scalar as minimize
import pickle as pickle
import time
####################################################
####### Input parameters for data processing #######
TESTING = True
ddict = bu.load_dir_file( "/home/charles/opt_lev_analysis/scripts/dirfiles/dir_file_june2017.txt" )
#print ddict
pow_axis = 4
cant_axis = 1 # stage control axis
straighten_axis = 2 # axis with coherent drive to straighten
fit_pows = True
load_charge_cal = True
maxfiles = 1000
plot_forward_backward = False #True
#subtract_background = True
drivefreq = 18.0
cant_volts_to_um = 8.0 # 80 um / 10 V
#fig_title = ('Force vs. Cantilever Position: %s Hz, %s - %s, ' + bead) % (drivefreq, gas, num)
#dirs = [1,2,3,4,5,6,7]
dirs = [8,9,10,11,12,13,14,15,16]
tf_path = './trans_funcs/Hout_20160808.p'
step_cal_path = './calibrations/step_cal_20160808.p'
thermal_cal_file_path = '/data/20160808/bead1/1_5mbar_zcool_final.h5'
def poly2(x, a, b, c):
return a * (x - b)**2 + c
def proc_dir(d):
dv = ddict[d]
dir_obj = cu.Data_dir(dv[0], [0,0,dv[-1]], dv[1])
dir_obj.load_dir(cu.simple_loader, maxfiles = maxfiles)
amps = []
for fil_obj in dir_obj.fobjs:
fil_obj.psd()
stagestuff = fil_obj.get_stage_settings(axis=straighten_axis)
amp = stagestuff[2] * cant_volts_to_um
amps.append(amp)
uamps = np.unique(amps)
if len(uamps) > 1:
print('STUPIDITYERROR: Multiple dirve amplitudes in directory')
newlist = []
for i in [0,1,2]:
if i == straighten_axis:
newlist.append(uamps[0])
else:
newlist.append(0.0)
dir_obj.drive_amplitude = newlist
return dir_obj
dir_objs = list(map(proc_dir, dirs))
colors_yeay = bu.get_color_map( len(dir_objs) )
psds = {}
pows = {}
bpows = {}
for ind, obj in enumerate(dir_objs):
psd = []
col = colors_yeay[ind]
amp = obj.drive_amplitude[straighten_axis]
filcount = 0
for fobj in obj.fobjs:
filcount += 1
fobj.psd()
if not len(psd):
freqs = fobj.other_psd_freqs
psd = fobj.other_psds[pow_axis-3]
else:
psd += fobj.other_psds[pow_axis-3]
psd = psd / float(filcount)
psds[amp] = psd
ind = np.argmin(np.abs(freqs - drivefreq))
totpow = np.sum(psd[ind-1:ind+2])
pows[amp] = totpow
badind = int(ind*1.5)
totbadpow = np.sum(psd[badind-1:badind+2])
bpows[amp] = totbadpow
amps = list(pows.keys())
amps.sort()
powsarr = []
bpowsarr = []
for amp in amps:
powsarr.append(pows[amp])
bpowsarr.append(bpows[amp])
if fit_pows:
p0 = [1, 0, 0]
popt, pcov = curve_fit(poly2, amps, powsarr, p0 = p0, maxfev = 10000)
fitpoints = np.linspace(amps[0], amps[-1], 100)
fit = poly2(fitpoints, *popt)
plt.plot(amps, powsarr, 'o')
plt.plot(fitpoints, fit, color='r', linewidth=1.5)
title = 'Best fit straightening amplitude: %0.2g um' % popt[1]
plt.title(title)
else:
plt.plot(amps, powsarr)
plt.plot(amps, bpowsarr)
plt.show()
| charlesblakemore/opt_lev_analysis | scripts/general_analysis/not_yet_updated/straighten_cantilever_withpower.py | straighten_cantilever_withpower.py | py | 3,333 | python | en | code | 1 | github-code | 36 |
8616084326 | """
HomeWork 14 - task 5
Dmytro Verovkin
robot_dreams
5. (необов'язкове виконання) Створити клас Bot та TelegramBot із першого завдання за допомогою функції type
"""
def bot_init_function(self, name):
self.name = name
def bot_say_name_function(self):
print(self.name)
def bot_send_message_function(self, message):
print(message)
Bot = type(
'Bot',
(),
{
'__init__': bot_init_function,
'say_name': bot_say_name_function,
'send_message': bot_send_message_function,
}
)
def tg_bot_init_function(self, name, url=None, chat_id=None):
super(type(self), self).__init__(name)
self.url = url
self.chat_id = chat_id
def tg_bot_send_message_function(self, message):
print(f"{self.name} bot says {message} to chat {self.chat_id} using {self.url}")
def tg_bot_set_url_function(self, url):
self.url = url
def tg_bot_set_chat_id_function(self, chat_id):
self.chat_id = chat_id
TelegramBot = type(
'TelegramBot',
(Bot,),
{
'__init__': tg_bot_init_function,
'send_message': tg_bot_send_message_function,
'set_url': tg_bot_set_url_function,
'set_chat_id': tg_bot_set_chat_id_function,
}
)
some_bot = Bot('Marvin')
some_bot.say_name()
# >> "Marvin"
some_bot.send_message("Hello")
# >> > "Hello"
telegram_bot = TelegramBot("TG")
telegram_bot.say_name()
# >> > "TG"
telegram_bot.send_message('Hello')
# >> > "TG bot says Hello to chat None using None"
telegram_bot.set_chat_id(1)
telegram_bot.send_message('Hello')
# >> > "TG bot says Hello to chat 1 using None"
| verovkin/robot_dreams | 18/task5.py | task5.py | py | 1,656 | python | en | code | 0 | github-code | 36 |
4724184190 | import sys
sys.path.append('/usr/local/lib/python3.7/site-packages')
import mido
import time
outport = mido.open_output('VirtualDevice Bus 1')
note_sequence = [57, 59, 60, 62, 57, 59, 55, 57]
for note in note_sequence:
time.sleep(0.25)
outport.send(mido.Message('note_on', note=note, velocity = 100))
time.sleep(0.25)
outport.send(mido.Message('note_off', note=note, velocity = 100))
| krispenney/midi | test.py | test.py | py | 404 | python | en | code | 0 | github-code | 36 |
19643341301 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# PyArtForms - Python generative art forms paint algorithms (artificial artist)
# experimental 'smears' paint algorithms, v1.0 - core algorithm definitions
# (c)2017-2021 MoNsTeR/GDC, Noniewicz.com, Noniewicz.art.pl, Jakub Noniewicz
# #01 [...] 'cruel red smears', not only red
# #02 [tested, ok] circle worms
# #03 [tested, ok] crazy trangles --- finish: more par, opt less random?
# #04 [tested, ok] self-crossed filled polygons
# #05 [...] star flowers --- finish: a lot here
# #06 [...] circle ripples - co-centered circle groups --- finish: par + misc
# #07 [tested, ok] random rectangles - grayish and colorish rects mess --- finish: new colorer proper
# #08 [tested, ok] just rectangles, may flux --- finish: new params, more variants in defs
# #09 [tested, ok] 'Warp' effect - triangle rays from center, opt center point shifted rnd
# #10 [...] long beziers
# #11 [tested, ok] horizontal gradients with suprizes
# #12 [tested, ok/so-so] opart-like boxes/circles/triangles
# #13 [tested, ok] opart-like single big poly (like #04?)
# #14 [tested, ok/so-so] opart-like cicrles xor-cut by triangles
# #15 [tested, ok, predictable] opart-like or color circle-interference patterns
# #16 [tested, ok, predictable] opart-like circles
# #17 [tested, ok] scottish-like grid --- finish: postproc satur 75 up + light 10 up? | 0,0 missing in rnd issue
# #18 [tested, ok] slim colorful circles --- finish: more par or var th*, more with self-call
# #19 [tested, ok, predictable] opart-like grid
# #20 [tested, ok, predictable] opart-like / papercut-like / video feedback-like 'dragon' effect
# #21 [tested, ok, predictable] opart-like scaled and pasted frames
# #22 [...] pie slice effects --- finish: reduce total count, mimosrod opt?
# #23 [tested, ok, predictable] Sierpinski's triangle fractal
# #24 [tested, ok, predictable] rotated traingles --- finish: reduce total count, more par ver, mimosrod opt, a scale par
# #25 [tested, ok] waves#1 --- finish: more par
# #26 [tested, ok] waves#2 --- finish: more par, simplify code
# #27 [tested, ok] multishaped polygon mess --- finish: more par
# future fun:
# #28 [...]
# #29 [...]
# #30 [...]
# #31
# #32
# cre: 20180430
# upd: 20180501, 02, 03
# cre: 20180805, 07, 08
# upd: 20180928, 29
# upd: 20181019, 20
# upd: 20190105, 06, 12, 13, 18, 19, 21, 22
# upd: 20190306, 11, 29, 30
# upd: 20190414, 15, 17, 18, 22, 24, 26, 27
# upd: 20200507, 10
# upd: 20210106, 15, 16, 19, 20, 21, 22
# upd: 20210515, 16, 22, 23, 24, 25, 26, 27
# upd: 20210606, 07, 10, 11, 12, 13, 14, 17, 18, 19, 20
# see:
# https://pillow.readthedocs.io/en/stable/
# note: now required at least pillow version 5.3.0, tested on 7.2.0, my prev was 5.0.0
# TODO:
# - ?
from PIL import Image, ImageDraw, ImageChops, ImageOps #, ImageMorph, ImageMath # test
import random, math, string, os, sys, copy
from bezier import make_bezier
from drawtools import *
from color_defs import *
"""
import PIL
print('PIL',PIL.__version__)
"""
# ---
def mazy1(draw, params):
""" ? """
w, h, cnt = init_common(params)
mar = 0
if 'mar' in params:
mar = params['mar']
v = 0
if 'v' in params:
v = params['v']
ts = [t/100.0 for t in range(101)] # par?
sc = float(h) / 3507 # lame par!
wx = int(float(params['penw']) * sc)
if wx <= 0:
wx = 1
for n in range(cnt):
po = [(random.randint(0+mar, w-mar), random.randint(0+mar, h-mar)),
(random.randint(0+mar, w-mar), random.randint(0+mar, h-mar)),
(random.randint(0+mar, w-mar), random.randint(0+mar, h-mar)),
(random.randint(0+mar, w-mar), random.randint(0+mar, h-mar))]
if 'color' in params:
if params['color'] == 'rg':
color = gradient2((255,255,0), (255,0,0), random.randint(0, 255), 255)
else:
color = new_colorer(params['color'], n, cnt)
else:
color = (0,0,0)
if 'addalpha' in params:
color = add_alpha(color, params['addalpha'])
r = color[0]
g = color[1]
b = color[2]
if params['prefill'] == True:
bezier = make_bezier(po)
points = bezier(ts)
draw.polygon(points, fill=color, outline=None)
for m in range(params['m']):
if params['keep'] == True:
po0 = po[0]
po3 = po[3]
vsc = int(v*sc)
po[:] = [(xy[0]+random.randint(0, vsc)-random.randint(0, vsc), xy[1]+random.randint(0, vsc)-random.randint(0, vsc)) for xy in po]
if params['keep'] == True:
po[0] = po0
po[3] = po3
old = False
if params['mode'] == 'red':
color = (r ^ random.randint(0, 48), 0, 0)
old = True
if params['mode'] == 'black':
rr = random.randint(0, 48)
color = (rr, rr, rr)
old = True
if old == False:
color = new_colorer(params['mode'], n, cnt)
if 'addblack' in params: # todo: (re)use
if params['addblack'] == True and random.randint(0, 100) > 80:
color = (0,0,0)
if 'addalpha' in params:
color = add_alpha(color, params['addalpha'])
bezier = make_bezier(po)
points = bezier(ts)
draw.line(points, fill=color, width=wx)
def mazy2(draw, params):
""" circle worms """
w, h, cnt = init_common(params)
cntm = params['m']
if cntm <= 0:
cntm = 1
sc = 50 # dflt
if 'sc' in params:
sc = params['sc']
if sc > 0:
v = int(h/sc)
else:
v = 0
for n in range(cnt):
r1 = random.randint(int(h*0.15), int(h*0.45))
po = [(random.randint(-r1, w+r1), random.randint(-r1, h+r1)),
(random.randint(-r1, w+r1), random.randint(-r1, h+r1))]
r0 = random.randint(int(r1*0.7), int(r1*0.99))
if r0 < cntm:
r0 = cntm
de = 1/cntm
for m in range(cntm):
#v = int((cntm-m)/cntm * h/20) # test4
po[:] = [(xy[0]+random.randint(0, v)-random.randint(0, v), xy[1]+random.randint(0, v)-random.randint(0, v)) for xy in po]
color = new_colorer(params['color'], m, cntm)
if 'addalpha' in params:
if params['addalpha'] > 0:
color = add_alpha(color, params['addalpha'])
circle(draw, po[0][0], po[0][1], int(r0*(1-m*de)), fill=color, outline=None)
def mazy3(draw, params):
""" crazy trangles """
w, h, cnt = init_common(params)
def r(p, d):
return int(p/2+random.randint(int(-p/d), int(p/d)))
d = 0.5 # par, 1.3, 2.2 ? # todo: ext par
da = 0.06 # dflt, how quickly they get smaller in center mode, 0.5 ok too
if 'da' in params:
da = params['da']
for n in range(cnt):
if params['mode'] == 'center':
po = [(r(w, d), r(h, d)), (r(w, d), r(h, d)), (r(w, d), r(h, d))]
d = d + da
if params['mode'] == 'xcenter':
d = 2.2 # par
po = [(int(w/2), int(h/2)), (r(w, d), r(h, d)), (r(w, d), r(h, d))]
if params['mode'] == 'rnd':
d = 2.2 # par
po = [(r(w, d), r(h, d)), (r(w, d), r(h, d)), (r(w, d), r(h, d))]
color = new_colorer(params['color'], n, cnt)
if 'addalpha' in params:
if params['addalpha'] > 0:
color = add_alpha(color, params['addalpha'])
triangle(draw, po, fill=color, outline=None)
def mazy4(draw, params):
""" self-crossed filled polygons """
w, h, cnt = init_common(params)
sc = 2.1 # dflt
if 'sc' in params:
sc = params['sc']
if sc <= 0:
sc = 1
sx = int(w/sc)
sy = int(h/sc)
p_cnt = 20 # dflt
if 'pc' in params:
p_cnt = params['pc']
mode = 'center'
if 'mode' in params:
mode = params['mode']
for n in range(cnt):
if mode == 'center':
w0 = w/2
h0 = h/2
else:
w0 = random.randint(0, w)
h0 = random.randint(0, h)
po = []
for p in range(p_cnt):
po.extend((w0+random.randint(-sx, sx), h0+random.randint(-sy, sy)))
color = new_colorer(params['color'], n, cnt)
if 'addalpha' in params:
color = add_alpha(color, params['addalpha'])
draw.polygon(po, fill=color, outline=None)
def mazy5(draw, params):
""" star flowers """
w, h, cnt = init_common(params) # cnt unused
colors = params['colors']
c = math.pi/180
dg = h*0.037 # thickness, par
#dg = h*0.01 # TEST interesting...
r0 = h/2*0.93 # base radius, par
#r0 = h/2*1.5 # TEST
rOut = float(h)*0.77 # outer circle radius, par
#rOut = float(h)*0.3 # TEST
sc = float(h)/2480 # par
step = 10 # par (?)
n = 10 # count of all 'stars', const, par
for i in range(n):
a = random.randint(4, 28) # number of 'spikes', par
rv = random.randint(20, int(300/a*2)) # 'spike' amplitude, [todo: correlate with a - less if a big] par
if i == 0:
x0 = w/2
y0 = h/2
else:
axy = c*(i-1)*360/8 # par
x0 = w/2 + rOut * math.cos(axy)
y0 = h/2 + rOut * math.sin(axy)
bands = 16 # par r decrease steps, also related to num colors
#bands = len(colors)*3 # test
for m in range(bands):
points = []
for n in range(int(360*step)):
angle = c*float(n)/float(step)
r = r0 + sc * (rv * math.sin(angle*a)) - m*dg
x = x0 + r * math.cos(angle)
y = y0 + r * math.sin(angle)
points.extend((x, y))
color = colors[m%len(colors)] # TODO: fix: not new not old
if 'addalpha' in params:
color = add_alpha(color, params['addalpha'])
draw.polygon(points, fill=color, outline=params['outline'])
def mazy6(draw, params):
""" circle ripples - co-centered circle groups """
w, h, cnt = init_common(params)
useblack = False
if 'useblack' in params:
useblack = params['useblack']
n_r_max = 16 # par
r_min = int(h/25) # par
r_max = int(h/7) # par
#r_max = int(h/3) # par
#r_max = int(h/15) # par
# todo: start w big r_mx then go to lower? == start with big 1st
# todo: mix color modes maybe?
for m in range(cnt):
x = random.randint(int(w/2-w/3), int(w/2+w/3))
y = random.randint(int(h/2-h/3), int(h/2+h/3))
r = random.randint(r_min, r_max)
n_r = random.randint(3, n_r_max)
for n in range(n_r):
nn = n_r - n
ro = int(r*(1+nn*nn*0.015)) # par
if n & 1 and useblack == True:
color = (0, 0, 0)
else:
color = new_colorer(params['mode'], n, n_r)
try:
color
except NameError:
print('ERROR: undefined color mode, using black', params['mode'])
color = (0,0,0)
#color = add_alpha(color, 100) # todo
circle(draw, x, y, ro, fill=color, outline=None)
def mazy7(draw, params):
""" random rectangles - grayish and colorish rects mess """
w, h, cnt = init_common(params)
hdiv = int(h/30) # dflt
if 'div' in params:
d = int(params['div'])
if d <= 0:
d = 1
hdiv = int(h/d)
for m in range(cnt):
x1 = random.randint(int(w/2-w/3), int(w/2+w/3))
y1 = random.randint(int(h/2-h/3), int(h/2+h/3))
w1 = 0
h1 = 0
if params['mode'] == 'dec': # big2small any
sc = (m+1)/cnt
if sc == 0:
sc = 1
wm = int(w/8 * 1/sc)
hm = int(w/8 * 1/sc)
w1 = random.randint(int(w/35), wm)
h1 = random.randint(int(w/35), hm)
if params['mode'] == 'decp': # big2small rect prop
sc = (m+1)/cnt
if sc == 0:
sc = 1
wm = int(w/7 * 1/sc)
hm = int(h/7 * 1/sc)
w1 = random.randint(int(w/35), wm)
h1 = random.randint(int(h/35), hm)
if params['mode'] == 'const': # const small sqare
w1 = hdiv
h1 = hdiv
color = (0,0,0)
# todo: new colorer proper
if params['cmode'] == 'std':
color = gradient2((255,255,255), (0,0,0), m, cnt)
if params['cmode'] == 'inv': # or inverse
color = gradient2((0,0,0), (255,255,255), m, cnt)
if params['cmode'] == 'rnd': # or rnd
ci = random.randint(0, 255)
color = (ci,ci,ci)
if params['cmode'] == 'color': # color
color = colors_happy[random.randint(0, len(colors_happy)-1)]
if params['cmode'] == 'wryb':
color = colors_fwd[random.randint(0, len(colors_fwd)-1)]
if params['cmode'] == 'BeachTowels':
color = colors_BeachTowels[random.randint(0, len(colors_BeachTowels)-1)]
if params['cmode'] == 'MoonlightBytes6':
color = colors_MoonlightBytes6[random.randint(0, len(colors_MoonlightBytes6)-1)]
if params['cmode'] == 'RainbowDash':
color = colors_RainbowDash[random.randint(0, len(colors_RainbowDash)-1)]
if 'addalpha' in params:
color = add_alpha(color, params['addalpha'])
rect(draw, x1, y1, w1, h1, fill=color, outline=None)
def mazy8(draw, params):
""" Block grid with random colors """
w, h, cnt = init_common(params) # cnt unused
xcnt = params['xcnt']
ycnt = params['ycnt']
# todo: new par use ext
alpha_flux_p = 50
alpha_flux_p = None
alpha_flux_vmin = 20
alpha_flux_vmax = 90-40
# todo: opt dodatkowe 'cienkie'
flux_p = None
v = 0
if 'flux_p' in params:
flux_p = params['flux_p']
if 'v' in params:
v = params['v']
border = 0
if 'border' in params:
border = params['border']
ou = None
if 'ou' in params:
ou = params['ou']
w1 = int(w/xcnt)
h1 = int(h/ycnt)
max_c = len(get_colors(params['color']))
for y in range(ycnt-border*2):
for x in range(xcnt-border*2):
x1 = x*w1 + int(w1/2) + border*w1
y1 = y*h1 + int(h1/2) + border*h1
ci = random.randint(0, max_c-1)
color = new_colorer(params['color'], ci, -1)
if alpha_flux_p != None and alpha_flux_p > 0: # rnd flux
if random.randint(0, 100) > alpha_flux_p:
ar = random.randint(alpha_flux_vmin, alpha_flux_vmax)
color = add_alpha(color, ar)
vx = vy = vw = vh = 0
if flux_p != None and flux_p > 0: # rnd flux
if random.randint(0, 100) > flux_p:
vx = float(w1)*(random.randint(0, v)-random.randint(0, v))/100.0
vy = float(h1)*(random.randint(0, v)-random.randint(0, v))/100.0
vw = float(w1)*(random.randint(0, v)-random.randint(0, v))/100.0
vh = float(h1)*(random.randint(0, v)-random.randint(0, v))/100.0
rect(draw, x1+vx, y1+vy, w1+vw, h1+vh, fill=color, outline=ou)
def mazy9(draw, params):
""" 'Warp' effect - triangle rays from center, opt center point shifted rnd """
w, h, cnt = init_common(params)
w2 = int(w/2)
h2 = int(h/2)
c = math.pi/180
v = 0
if 'v' in params:
v = params['v']
rndc = False
if 'rndc' in params:
rndc = params['rndc']
po = [(w2, h2), (0, 0), (0, 0)]
da = c * float(360)/cnt
r = w
for n in range(cnt):
if v > 0:
po[0] = (w2+random.randint(int(-v), int(v)), h2+random.randint(int(-v), int(v)))
x = w2 + r * math.cos(da*n)
y = h2 + r * math.sin(da*n)
po[1] = (x, y)
x = w2 + r * math.cos(da*(n+1))
y = h2 + r * math.sin(da*(n+1))
po[2] = (x, y)
if params['color'] == 'red' or params['color'] == 'bw': # todo: more? + both modes as one?
ci = random.randint(0, 255)
color = new_colorer(params['color'], ci, 255)
else:
cx = get_colors(params['color'])
if cx == None:
raise Exception('Undefined color: '+params['color']) # todo: err only, no raise/crash
cx_len = len(cx)
if rndc == True:
color = cx[random.randint(0, cx_len-1)]
else:
color = cx[n%cx_len]
triangle(draw, po, fill=color, outline=None)
def mazy10(draw, params):
""" Random bezier threads or aeas """
w, h, cnt = init_common(params)
mode = params['mode']
# todo: fix make threads no-lame
# todo: for closed make internal pts bigger while 1st+last with margin?
# todo: 1-2 bezier stripes then rnd mutate?
#np = 1800 #par
np = 5000 #par
ts = [t/float(np) for t in range(np+1)]
sc = float(h) / 3507 # todo: not like that?
wx = int(float(params['penw']) * sc)
if wx <= 0:
wx = 1
def rwh():
ex = 1
if params['open'] == True:
return (random.randint(-w*ex, w*(ex+1)), random.randint(-h*ex, h*(ex+1)))
else:
return (random.randint(0, w), random.randint(0, h))
for n in range(cnt):
po = [rwh()]
for x in range(params['complexity']):
po.extend([rwh()])
if params['color'] == 'blue_const':
color = (16,48,255)
if params['color'] == 'happy':
color = colors_happy[n%len(colors_happy)]
if params['color'] == 'rg':
color = gradient2((255,255,0), (255,0,0), random.randint(0, 255), 255)
if params['color'] == 'red':
color = gradient2((0,0,0), (255,0,0), random.randint(0, 255), 255)
if params['color'] == 'wryb':
color = colors_fwd[n%len(colors_fwd)]
# todo: new colorer proper
if 'addalpha' in params:
color = add_alpha(color, params['addalpha'])
bezier = make_bezier(po)
points = bezier(ts)
if params['mode'] == 'line':
draw.line(points, fill=color, width=wx)
if params['mode'] == 'fill':
draw.polygon(points, fill=color, outline=None)
def mazy11(draw, params):
""" Horizontal gradients with suprizes """
w, h, cnt = init_common(params)
cx = get_colors(params['color'])
csize = len(cx)
dy = float(h)/cnt
if dy*cnt < h: # lame fix for small images
cnt += 3
steps = 256 # const, max rational limit for RGB24 gradient
if steps > w:
steps = w
dx = float(w)/steps
for n in range(cnt):
n1 = random.randint(0, csize-1)
n2 = n%csize
n3 = random.randint(0, csize-1)
color1 = cx[n1]
color2 = cx[n2]
color3 = cx[n3]
for step in range(steps):
color = gradient(color1, color2, color3, step, steps)
x = step*dx
y = n*dy
xy = [(x, y), (x+dx, y+dy)]
draw.rectangle(xy, fill=color, outline=None)
def mazy12(draw, params):
""" Opart-like boxes/circles/triangles """
w, h, cnt = init_common(params)
c = math.pi/180
o = params['o']
v = False
if 'v' in params:
v = params['v']
rc = 1.0
if 'rc' in params:
rc = params['rc']
w0 = w/2
h0 = h/2
r = int(h/2/2 * rc)
for i in range(cnt):
a = c*i/cnt*360
x = int(w0+r*math.cos(a))
y = int(h0+r*math.sin(a))
if v:
va = random.randint(int(-h0/8), int(h0/8)) # par
vx = random.randint(int(-w0/8), int(w0/8)) # par
vy = random.randint(int(-h0/8), int(h0/8)) # par
else:
va = 0
vx = 0
vy = 0
if i&1 == 0:
co = (0,0,0)
ou = (255,255,255)
else:
co = (255,255,255)
ou = (0,0,0)
if o == 'box':
rect(draw, x+vx, y+vy, r+va, r+va, fill=co, outline=ou)
if o == 'cir':
circle(draw, x+vx, y+vy, r+va, fill=co, outline=ou)
if o == 'tri':
vx1 = random.randint(int(-w0/2), int(w0/2)) # par
vx2 = random.randint(int(-w0/2), int(w0/2)) # par
vx3 = random.randint(int(-w0/2), int(w0/2)) # par
vy1 = random.randint(int(-h0/2), int(h0/2)) # par
vy2 = random.randint(int(-h0/2), int(h0/2)) # par
vy3 = random.randint(int(-h0/2), int(h0/2)) # par
points = [(x+vx1, y+vy1), (x+vx2, y+vy2), (x+vx3, y+vy3)]
triangle(draw, points, fill=co, outline=ou)
def mazy13(draw, params):
""" Opart-like single big poly """
w, h, cnt = init_common(params)
w0 = w/2
h0 = h/2
sc = 1.0
sx = int(w/sc)
sy = int(h/sc)
po = []
for n in range(cnt):
newp = (w0+random.randint(-sx, sx), h0+random.randint(-sy, sy))
po.append(newp)
color = params['color']
draw.polygon(po, fill=color, outline=None)
def mazy14(draw, params):
""" Opart-like cicrles xor-cut by triangles """
w, h, cnt = init_common(params)
c = math.pi/180
if w > h:
sc = w/2*1.5/cnt
else:
sc = h/2*1.5/cnt
if 'm' in params:
cnt2 = params['m']
if cnt2 < 4:
cnt2 = 4
else:
cnt2 = 4 # some min
v = 0
if 'div' in params:
v = params['div']
if v > 0:
v = w/v
im1 = Image.new('RGB', (params['w'], params['h']), params['Background'])
im2 = Image.new('RGB', (params['w'], params['h']), params['color']) # note: 2nd image is reversed
draw1 = ImageDraw.Draw(im1)
draw2 = ImageDraw.Draw(im2)
for n in range(cnt): # centered circles 1st
r = int(sc*(cnt-n))
if n&1 == 0:
co = params['Background']
else:
co = params['color']
circle(draw1, int(w/2), int(h/2), r, fill=co, outline=None)
po = [(int(w/2), int(h/2)), (0, 0), (0, 0)]
da = float(360)/cnt2
r = w
for n in range(cnt2):
if v > 0:
v1 = random.randint(int(-v), int(v))
v2 = random.randint(int(-v), int(v))
po[0] = (int(w/2)+v1, int(h/2)+v2)
x = w/2 + r * math.cos(c*da*n)
y = h/2 + r * math.sin(c*da*n)
po[1] = (x, y)
x = w/2 + r * math.cos(c*da*(n+1))
y = h/2 + r * math.sin(c*da*(n+1))
po[2] = (x, y)
if n&1 == 0:
color = params['Background']
else:
color = params['color']
triangle(draw2, po, fill=color, outline=None)
imout = ImageChops.difference(im1, im2)
params['im'].paste(imout, (0, 0))
draw1 = None
draw2 = None
im1 = None
im2 = None
imout = None
def mazy15(draw, params):
""" opart-like or color circle-interference patterns, predictable (no rnd parts) """
w, h, cnt = init_common(params)
c = math.pi/180
scc = 1.5 # par
if w > h:
sc = w/2*scc/cnt # note: off-screen to fill all
else:
sc = h/2*scc/cnt
ys1 = 0
xs1 = 0
if 'xs1' in params:
xs1 = params['xs1']
if 'ys1' in params:
ys1 = params['ys1']
ys2 = 0
xs2 = 0
if 'xs2' in params:
xs2 = params['xs2']
if 'ys2' in params:
ys2 = params['ys2']
colorer = None
if 'colorer' in params:
colorer = params['colorer']
def draw_it(draw, xs, ys, r):
if n&1 == 0:
if colorer == None:
co = params['Background']
else:
co = new_colorer(colorer, n, cnt)
else:
if colorer == None:
co = params['color']
else:
co = new_colorer(colorer, n, cnt)
circle(draw, int(w/2+xs), int(h/2+ys), r, fill=co, outline=None)
im1 = Image.new('RGB', (params['w'], params['h']), params['Background'])
im2 = Image.new('RGB', (params['w'], params['h']), params['color']) # note: 2nd image is reversed in 'polarity' for better difference effect
draw1 = ImageDraw.Draw(im1)
draw2 = ImageDraw.Draw(im2)
for n in range(cnt): # circles #1
r = int(sc*(cnt-n))
if 'mode' in params:
if params['mode'] == 'linear':
if 'xs1v' in params:
xs1 = xs1 + params['xs1v']
if 'ys1v' in params:
ys1 = ys1 + params['ys1v']
if params['mode'] == 'circle':
a0 = c*n/cnt*360
if 'xs1v' in params:
xs1 = params['xs1v']*math.cos(a0)
if 'ys1v' in params:
ys1 = params['ys1v']*math.sin(a0)
draw_it(draw1, xs1, ys1, r)
for n in range(cnt): # circles #2
r = int(sc*(cnt-n))
if 'mode' in params:
if params['mode'] == 'linear':
if 'xs2v' in params:
xs2 = xs2 + params['xs2v']
if 'ys2v' in params:
ys2 = ys2 + params['ys2v']
if params['mode'] == 'circle':
a0 = c*n/cnt*360
if 'xs2v' in params:
xs2 = params['xs2v']*math.cos(a0)
if 'ys2v' in params:
ys2 = params['ys2v']*math.sin(a0)
draw_it(draw2, xs2, ys2, r)
if colorer == None:
imout = ImageChops.difference(im1, im2) # only difference is cool for bw
else:
imout = ImageChops.blend(im1, im2, 0.5) # only blend for color now
params['im'].paste(imout, (0, 0))
im1 = None
im2 = None
imout = None
def mazy16(draw, params):
""" Opart-like circles, predictable (no rnd parts) """
w, h, cnt = init_common(params)
c = math.pi/180
if w > h:
sc = w/2
else:
sc = h/2
rcoef = params['rcoef']
acoef = params['acoef']
rscale = params['rscale']
for n in range(cnt):
r = int(sc * (cnt-n)/cnt*rcoef)
if n&1 == 0:
co = params['Background']
ou = params['color']
else:
co = params['color']
ou = params['Background']
#ou = None
a0 = c*n/cnt*360 * acoef
xs2 = rscale*sc/2*math.cos(a0)
ys2 = rscale*sc/2*math.sin(a0)
circle(draw, int(w/2+xs2), int(h/2+ys2), r, fill=co, outline=ou)
def mazy17(draw, params):
""" Scottish-like grid """
w, h, cnt = init_common(params)
vv = params['v']
v = int(w*vv)
for z in range(cnt):
ndx = random.randint(0, cnt)
color = new_colorer(params['color'], ndx, cnt)
if 'addalpha' in params:
if params['addalpha'] > 0:
color = add_alpha(color, params['addalpha'])
x = random.randint(0, w)
y = random.randint(0, h)
lw = random.randint(1, v)
xy = [(0, y), (w, y+lw)]
draw.rectangle(xy, fill=color, outline=None)
xy = [(x, 0), (x+lw, h)]
draw.rectangle(xy, fill=color, outline=None)
def mazy18(draw, params):
""" Random circle bundles """
w, h, cnt = init_common(params)
if 'multi' in params:
multi = params['multi']
for p in multi:
p['w'] = params['w']
p['h'] = params['h']
p['call'] = params['call']
p['color'] = params['color']
p['name'] = params['name']
mazy18(draw, p)
return
v = params['v']
r0v = w
if 'r0v' in params:
r0v = params['r0v']
for n in range(cnt):
x = random.randint(0, w)
y = random.randint(0, h)
r0 = random.randint(0, r0v)
for m in range(params['m']):
r = r0 + random.randint(-v, v)
color = new_colorer(params['color'], n, cnt) # note: alpha for outline does not seem to work
thn = 3 # par
thd = 0.2 # par
thw = 2 # par
for th in range(thn):
circle_w(draw, x, y, r+th*thd, fill=None, outline=color, width=thw) # note width, v 5.3.0+
# was
# for th in range(5):
# circle(draw, x, y, r+th*0.15, fill=None, outline=color)
def mazy19(draw, params):
""" Chequered opart grids with x variations, predictable (no rnd parts) """
w, h, cnt = init_common(params)
c = math.pi/180
nx = cnt
ny = params['m']
dx = int(2*w/nx)
dy = int(2*h/ny)
c_white = (255,255,255)
c_black = (0,0,0)
if 'c_white' in params:
c_white = params['c_white']
if 'c_black' in params:
c_black = params['c_black']
if params['mode'] == 'exp':
fncx = []
coef = 17 # par / const 17 good for 40
for x in range(coef): # precalc
fx = 2.0*math.exp(-x/4)
fncx.append(fx)
if params['mode'] == 'sin':
fncx2 = []
coef2 = nx # ?
for x in range(coef2): # precalc
fx = abs(1.1*math.sin(x/coef2*360*2*c)) #par x2
fncx2.append(fx)
dxmap = []
f = 0
x = 0
while f < w+dx: # fill whole width
fx = 0
if x > 0:
if params['mode'] == 'grid':
fx = dx
if params['mode'] == 'lin':
fx = dx*f/(w+dx)*1.01
if params['mode'] == 'exp':
if x < coef:
fx = dx * fncx[x]
else:
if x < 2*coef:
ndx = coef-(x-coef)-1
fx = dx * fncx[ndx]
else:
fx = dx
if params['mode'] == 'sin':
if x < coef2:
fx = dx * fncx2[x]
else:
fx = dx
if fx < 1:
fx = 1
f = f + fx
dxmap.append(f)
x += 1
for y in range(ny):
for x in range(len(dxmap)-1):
b = ((x&1) == 1 and (y&1) == 1) or ((x&1) == 0 and (y&1) == 0)
if b == True:
cx = c_white
else:
cx = c_black
xp = dxmap[x]
xy = [(xp, y*dy), (xp+(dxmap[x+1]-dxmap[x]), y*dy+dy)]
draw.rectangle(xy, fill=cx, outline=None)
def mazy20(draw, params):
""" opart-like / papercut-like / video feedback-like 'dragon' effect, predictable (no rnd parts) """
w, h, cnt = init_common(params)
da = params['da']
dd = 10 # dflt
if 'dd' in params:
dd = params['dd']
if dd < 1:
dd = 1
dx = int(w/dd)
dy = int(h/dd)
sc = 0.75 # dflt
if 'sc' in params:
sc = params['sc']
nw = int(sc*w)
nh = int(sc*h)
xy = [(dx, dy), (w-dx, h-dy)]
draw.rectangle(xy, fill=params['Foreground'], outline=None)
xy = [(dx*2, dy*2), (w-dx*2, h-dy*2)]
draw.rectangle(xy, fill=params['Background'], outline=None)
for n in range(cnt):
im1 = params['im']
im1 = im1.resize((nw, nh), Image.BICUBIC)
im1 = im1.rotate(da, Image.BICUBIC)
params['im'].paste(im1, (int((w-nw)/2), int((h-nh)/2)))
im1 = None
if 'invert' in params:
if params['invert'] == True:
params['im'] = invert_image(params['im'])
def mazy21(draw, params):
""" opart-like scaled and pasted frames, predictable (no rnd parts) """
w, h, cnt = init_common(params)
dx = int(w/10)
dy = int(h/10)
sc = 0.666
nw = int(sc*w)
nh = int(sc*h)
mode = 0
if 'mode' in params:
mode = params['mode']
xy = [(dx, dy), (w-dx, h-dy)]
draw.rectangle(xy, fill=params['Foreground'], outline=None)
xy = [(dx*2, dy*2), (w-dx*2, h-dy*2)]
draw.rectangle(xy, fill=params['Background'], outline=None)
for n in range(cnt):
im1 = params['im'].resize((nw, nh), Image.BICUBIC)
xx = int(nw/2)
yy = int(nh/2)
if mode == 0:
params['im'].paste(im1, (0+int(nw/2/2), 0+int(nh/2/2))) #center
if mode == 1:
params['im'].paste(im1, (0, 0)) #l/u
if mode == 2:
params['im'].paste(im1, (xx, yy)) #r/d
if mode == 3 or mode == 4: # l/u + r/d - extravagant
if n&1 == 0:
params['im'].paste(im1, (0, 0)) #l/u
else:
params['im'].paste(im1, (xx, yy)) #r/d
if mode == 4 or mode == 5:
params['im'].paste(im1, (0+int(nw/3), 0+int(nh/3))) # lame
if mode == 6: # maxxx fract like
nn = n&3
if nn == 0:
params['im'].paste(im1, (0, 0))
if nn == 1:
params['im'].paste(im1, (xx, 0))
if nn == 2:
params['im'].paste(im1, (0, yy))
if nn == 3:
params['im'].paste(im1, (xx, yy))
im1 = None
if 'invert' in params:
if params['invert'] == True:
params['im'] = invert_image(params['im'])
def mazy22(draw, params):
""" pie slice effects """
w, h, cnt = init_common(params)
colorer = params['color']
do_rnd = False
if 'rnd' in params:
do_rnd = params['rnd']
drc = 0.97
if 'drc' in params:
drc = params['drc']
a_s = 0
a_e = 35
if 'a_e' in params:
a_e = params['a_e']
da = 12
if 'da' in params:
da = params['da']
#note: some nice: drc a_e da
#0.97, 90, 12
#0.97, 35, 12
#0.97, 10, 12
#0.97, 90, 45 # good for colorsets
#0.97, 90 90 # special
#0.97, 90 89 # special + good for colorsets
#0.97, 90 85 # special + good for colorsets
#0.97, 90 80 # special + good for colorsets
#0.9, 90, 45
radius = h/2 * 1.0 # par
for i in range(cnt):
if do_rnd:
a_s = random.randint(0, 360) # par x2
a_e = random.randint(0, 360) # par x2
drc = random.randint(92, 98)/100 # par x2
if i == cnt-1:
a_s = 0
a_e = 360
if params['Background'] == (255,255,255) and do_rnd and params['color'] == 'bw': # rev bw color in this special case
color = new_colorer(colorer, cnt-1-i, cnt)
else:
color = new_colorer(colorer, i, cnt)
draw.pieslice((w/2-radius, h/2-radius, w/2+radius, h/2+radius), a_s, a_e, fill=color)
radius = radius * drc
if not do_rnd:
a_s = a_s + da
a_e = a_e + da
def mazy23(draw, params):
""" Sierpinski's triangle fractal, predictable (no rnd parts) """
# https://en.wikipedia.org/wiki/Sierpi%C5%84ski_triangle
w, h, cnt = init_common(params)
limit0 = cnt
margin = 5/100
if 'margin' in params:
margin = params['margin']
dd = h*margin
color = (255, 255, 255)
if 'color1' in params:
color = params['color1']
color2 = (0, 0, 0)
if 'color2' in params:
color2 = params['color2']
colorer = None
if 'colorer' in params:
colorer = params['colorer']
colorer_mode = None
if 'colorer_mode' in params:
colorer_mode = params['colorer_mode']
def m23(draw, limit, a, htr, ofsx, ofsy):
if limit <= 0:
return
a /= 2
htr = 0.5 * math.sqrt(3) * a
c2 = color2
if colorer != None and colorer_mode == 0:
c2 = new_colorer(colorer, limit, limit0) # mode=0
xx1 = wo+a/4 +(0.5) # note: 0.5 'visual' fix
xx2 = wo+a/2
xx3 = wo+a-a/4 -(0.5)
yy1 = h-dd-htr/2 +(0.5)
yy2 = h-dd -(0.5)
fix_x = ofsx
fix_y = ofsy
po = [(int(xx1+fix_x), int(yy1+fix_y)), (int(xx2+fix_x), int(yy2+fix_y)), (int(xx3+fix_x), int(yy1+fix_y))]
if colorer != None and colorer_mode == 1:
c2 = new_colorer(colorer, limit+0, limit0) # mode=1
triangle(draw, po, fill=c2, outline=None)
m23(draw, limit-1, a, htr, fix_x, fix_y)
fix_x = a + ofsx
fix_y = ofsy
po = [(int(xx1+fix_x), int(yy1+fix_y)), (int(xx2+fix_x), int(yy2+fix_y)), (int(xx3+fix_x), int(yy1+fix_y))]
if colorer != None and colorer_mode == 1:
c2 = new_colorer(colorer, limit+1, limit0) # mode=1
triangle(draw, po, fill=c2, outline=None)
m23(draw, limit-1, a, htr, fix_x, fix_y)
fix_x = a/2 + ofsx
fix_y = -htr + ofsy
po = [(int(xx1+fix_x), int(yy1+fix_y)), (int(xx2+fix_x), int(yy2+fix_y)), (int(xx3+fix_x), int(yy1+fix_y))]
if colorer != None and colorer_mode == 1:
c2 = new_colorer(colorer, limit+2, limit0) # mode=1
triangle(draw, po, fill=c2, outline=None)
m23(draw, limit-1, a, htr, fix_x, fix_y)
a = h-dd-dd # start side len, todo: try par >> w?
wo = (w-a)/2
htr = 0.5 * math.sqrt(3) * a # start triangle h
po = [(wo, h-dd), (wo+a/2, h-dd-htr), (wo+a, h-dd)]
triangle(draw, po, fill=color, outline=None) # main
po = [(wo+a/4, h-dd-htr/2), (wo+a/2, h-dd), (wo+a-a/4, h-dd-htr/2)]
triangle(draw, po, fill=color2, outline=None) # 1st cut
m23(draw, limit0-1, a, htr, 0, 0) # recurent inside
def mazy24(draw, params):
""" rotated traingles, predictable (no rnd parts) """
w, h, cnt = init_common(params)
cx = w/2
cy = h/2 + h/12 # 'y center' slightly moved down, nicer this way
c = math.pi/180
colorer = params['colorer']
ou = None
if 'ou' in params:
ou = params['ou']
a_sc = 0.93 # par
a_base = 1.0
if 'a_base' in params:
a_base = params['a_base']
an_sc = 1.0
if 'an_sc' in params:
an_sc = params['an_sc']
a = h*a_base
for i in range(cnt):
htr = 0.5 * math.sqrt(3) * a
po = [(cx-a/2, cy-htr/2), (cx, cy+htr/2), (cx+a/2, cy-htr/2)]
ce = (1/3*(po[0][0]+po[1][0]+po[2][0]), 1/3*(po[0][1]+po[1][1]+po[2][1])) # actual triangle center is here (triangle centroid)
an = i/cnt * 360 * c * an_sc
po_ = [rotate_point(po[0], ce[0], ce[1], an), rotate_point(po[1], ce[0], ce[1], an), rotate_point(po[2], ce[0], ce[1], an)]
color = new_colorer(colorer, i, cnt)
if 'addalpha' in params:
color = add_alpha(color, params['addalpha'])
triangle(draw, po_, fill=color, outline=ou)
a = a * a_sc
def mazy25(draw, params):
""" waves#1 """
# todo: par + more like it?
w, h, cnt = init_common(params)
c = math.pi/180
fd = 100.0*params['f0']
div = float(cnt*2+4+(-4)) # par
if div == 0:
div = 1
if params['horizontal'] == True:
rn = w
dx = h/div
else:
rn = h
dx = w/div
mofs0 = 0 # par, was 2
rnd_color = True # par
rnd_color = False
for z in range(cnt):
if rnd_color:
ndx = random.randint(0, cnt)
else:
ndx = z
color = new_colorer(params['color'], ndx, cnt)
if 'addalpha' in params:
color = add_alpha(color, params['addalpha'])
aofs1 = random.randint(0, 360)
aofs2 = random.randint(0, 360)
aofs3 = random.randint(0, 360)
aofs4 = random.randint(0, 360)
fofs1 = random.randint(0, 100)/fd*1
fofs2 = random.randint(0, 100)/fd*1
fofs3 = random.randint(0, 100)/fd*2
fofs4 = random.randint(0, 100)/fd*2
mofs1 = (z+mofs0)*dx
y = 0
for n in range(rn):
nsc = float(n)/float(rn)*360*10 # par 10
x_in = mofs1 + dx * (1 + (math.sin(c*(nsc*fofs1+aofs1))+2*math.sin(c*(nsc*fofs3+aofs3)))/3)
x_out = mofs1 + dx * (1 + (math.sin(c*(nsc*fofs2+aofs2))+2*math.sin(c*(nsc*fofs4+aofs4)))/3)
if params['horizontal'] == True:
xy = [(y, x_in), (y, h - x_out)]
else:
xy = [(x_in, y), (w - x_out, y)]
draw.rectangle(xy, fill=color, outline=None) # 1px rects?
y += 1
def mazy26(draw, params):
""" waves#2 """
if 'par1' in params and 'par2' in params:
mazy26(draw, params['par1'])
mazy26(draw, params['par2'])
return
w, h, cnt = init_common(params)
# todo: uproscic kod (czemu 2x?) | exp par
w = params['w']
h = params['h']
cnt = params['n']
random.seed()
c = math.pi/180
sc = 3.0 #par was 4
if params['horizontal'] == True:
rn = w
dx = h/float(cnt)*sc
else:
rn = h
dx = w/float(cnt)*sc
for z in range(cnt):
ndx = random.randint(0, cnt)
color = new_colorer(params['color'], ndx, cnt)
if 'addalpha' in params:
color = add_alpha(color, params['addalpha'])
aofs1 = random.randint(0, 360)
aofs2 = random.randint(0, 360)
aofs3 = random.randint(0, 360)
aofs4 = random.randint(0, 360)
fofs1 = random.randint(0, 100)/100.0*1 # par
fofs2 = random.randint(0, 100)/100.0*1 # par
fofs3 = random.randint(0, 100)/100.0*2 # par
fofs4 = random.randint(0, 100)/100.0*2 # par
mofs1 = float(z*dx)
am1 = 1.0 # par
am2 = 1.0 # par
am3 = 3.0 # par was 2
am4 = 3.0 # par was 2
y = 0
points1 = []
points2 = []
points1a = []
points2a = []
for n in range(rn):
nsc = float(n)/float(rn)*360*10 # par 10
x_in = int(mofs1 + dx * (1 + (am1*math.sin(c*(nsc*fofs1+aofs1))+am3*math.sin(c*(nsc*fofs3+aofs3)))))
x_out = int(mofs1 + dx * (1 + (am2*math.sin(c*(nsc*fofs2+aofs2))+am4*math.sin(c*(nsc*fofs4+aofs4)))))
if params['horizontal'] == True:
points1.extend((y, x_in))
points2.extend((y, x_out))
else:
points1.extend((x_in, y))
points2.extend((x_out, y))
y += 1
lw = random.randint(1, int(w/30)) #par, opt big->small?
points1a[:] = [xy for xy in points1]
points2a[:] = [xy for xy in points2]
for a in range(int(len(points1a)/2)):
ndx = int(len(points1a)/2)-1-a
if params['horizontal'] == True:
points1.extend((points1a[ndx*2], lw+points1a[ndx*2+1]))
else:
points1.extend((lw+points1a[ndx*2], points1a[ndx*2+1]))
for a in range(int(len(points2a)/2)):
ndx = int(len(points2a)/2)-1-a
if params['horizontal'] == True:
points2.extend((points2a[ndx*2], lw+points2a[ndx*2+1]))
else:
points2.extend((lw+points2a[ndx*2], points2a[ndx*2+1]))
draw.polygon(points1, fill=color, outline=color)
draw.polygon(points2, fill=color, outline=color)
def mazy27(draw, params):
""" multishaped polygon mess """
w, h, cnt = init_common(params)
colorer = params['colorer']
addalpha = 0
if 'addalpha' in params:
addalpha = params['addalpha']
saturation_factor = 1.5 # dflt
if 'saturation' in params:
saturation_factor = params['saturation']
minsides = 3
maxsides = 8
if 'minsides' in params:
minsides = params['minsides']
if 'maxsides' in params:
maxsides = params['maxsides']
if minsides < 3:
minsides = 3
if maxsides < 3:
maxsides = 3
maxangle = 90
if 'maxangle' in params:
maxangle = params['maxangle']
rmin = int(h/30)
if 'rmin' in params:
rmin = params['rmin']
rmax = h/4 # h h/2 # h/4
if 'rmax' in params:
rmax = params['rmax']
rsc = 0.997 # 0
if 'rsc' in params:
rsc = params['rsc']
ofs = int(h/4) # centers this much off-screen
for n in range(cnt):
color = new_colorer(colorer, n, cnt)
if addalpha > 0:
color = add_alpha(color, addalpha)
r = random.randint(rmin, int(rmax))
cx = random.randint(-ofs, w+ofs)
cy = random.randint(-ofs, h+ofs)
if maxangle > 0:
a = random.randint(0, maxangle)
else:
a = 0
sides = random.randint(minsides, maxsides)
nsided(draw, sides, cx, cy, r, a, color, None)
if rsc > 0:
rmax = rmax * rsc
if rmax < rmin:
rmax = rmin
if addalpha > 0 and saturation_factor > 0:
params['im'] = enhace(params['im'], saturation_factor)
# future fun
def mazy28(draw, params):
""" ? """
# fin or remove
w, h, cnt = init_common(params)
c = math.pi/180
cnt = 400
sx = int(w/cnt)
color = (0xd4,0x8a,0x3e)
aofs1 = 0
fofs1 = 3
fofs3 = 1
am1 = 3
am3 = 1
dx = sx/3
po = [(w,0), (0, 0)]
x = 0
y = h
for n in range(cnt):
aofs2 = random.randint(0, 90)
aofs3 = random.randint(0, 180)
fofs2 = random.randint(1, 5)
am2 = random.randint(1, 15)
nsc = float(n)/float(cnt)*360*3 # par
f = int(dx * (2 + (am1*math.sin(c*(nsc*fofs1+aofs1))+am2*math.sin(c*(nsc*fofs2+aofs2))+am3*math.sin(c*(nsc*fofs3+aofs3)))))
po.extend((x, y))
y -= f
x += sx
draw.polygon(po, fill=color, outline=None)
def mazy29(draw, params):
""" ? """
# note: hard one, probably will fail
w, h, cnt = init_common(params)
c = math.pi/180
color = (255,255,255)
hb = int(h/10)
y0 = (hb/2)
cnt = 300
dx = int(w/cnt)
bcnt = 5
def f1(p):
x = p[0]
y = p[1] + hb*2
return (x, y)
po = [] # build one model block
po.append((0,y0))
for n in range(cnt):
pn = (dx*n, y0)
po.append(pn)
po.append((w, y0))
po.append((w, y0+hb))
for n in range(cnt):
po.append((w-dx*n, y0+hb))
po.append((0, y0+hb))
poall = [None] * bcnt # copy block bcnt times
for n in range(bcnt):
if n == 0:
poall[n] = copy.deepcopy(po)
else:
po[:] = (f1(p) for p in po)
poall[n] = copy.deepcopy(po)
s1 = 1
s2 = 1 + cnt + 2
# test sin wave - remove later?
for n in range(bcnt):
for x in range(cnt):
#fn = 30*math.sin(c*x/cnt*360*4
fn = 0
for f in range(10):
fn += (20+f*2)*math.sin(c*x/cnt*360*(1+f*2))
tup = poall[n][s1+x]
tup = (tup[0], tup[1]+fn) # change y-s
poall[n][s1+x] = tup
tup = poall[n][s2+cnt-x]
tup = (tup[0], tup[1]+fn) # change y-s
poall[n][s2+cnt-x] = tup
"""
def dist(p, sm):
start = sm['start']
ds = math.sqrt((start[0]-p[0])*(start[0]-p[0])+(start[1]-p[1])*(start[1]-p[1]))
is_affecting = ds < sm['radius']
inte = sm['intensity']*math.exp(-ds*0.05)*1000
intex = inte*math.cos(c*sm['angle'])
intey = inte*math.sin(c*sm['angle'])
return ds, inte, intex, intey, is_affecting
def apply_smear(sm):
for m in range(cnt):
for n in range(bcnt):
tup = poall[n][s1+m]
ds, inte, intex, intey, is_affecting = dist(tup, sm)
if is_affecting:
tup = (tup[0]+intex, tup[1]+intey) # change
poall[n][s1+m] = tup
tup = poall[n][s2+cnt-m]
ds, inte, intex, intey, is_affecting = dist(tup, sm)
if is_affecting:
tup = (tup[0]+intex, tup[1]+intey) # change
poall[n][s2+cnt-m] = tup
# todo: smear with intensity ~ 1/range and params: angle + radius + strength + len
sme1 = {'start': (w/2, h/2), 'angle': 45, 'radius': 400, 'intensity': 100, 'length': 300}
apply_smear(sme1)
sme2 = {'start': (w/4, h/4), 'angle': 15, 'radius': 400, 'intensity': 100, 'length': 300}
apply_smear(sme2)
"""
for n in range(bcnt):
#po = poall[n]
po = poall[bcnt-1-n] # rev order for test
color = (255,255, int(255*n/bcnt))
draw.polygon(po, fill=color, outline=None)
#circle(draw, sme1['start'][0], sme1['start'][1], sme1['radius'], fill=None, outline=(0,0,255))
#circle(draw, sme2['start'][0], sme2['start'][1], sme2['radius'], fill=None, outline=(0,0,255))
def mazy30(draw, params):
""" ? """
w, h, cnt = init_common(params)
w2 = int(w/2)
h2 = int(h/2)
cnt = 100*2 # par
r0 = h*0.3 # par
v = 200+100 # par
sc = 1.0 # par
rv = 200 # par
asc = 10 # par 6 50
po = []
for n in range(cnt):
a = math.pi/180 * 360 * float(n/cnt) # par
rv = random.randint(100,500) # test
r = r0 + sc * (rv * math.sin(asc*a))
#r = r0
po.append((int(w2+r*math.cos(a)), int(h2+r*math.sin(a))))
def fr0(p):
# par x2
if random.randint(0, 100) > 80:
return (p[0]+random.randint(-v,v), p[1]+random.randint(-v,v))
#return (p[0]+random.randint(-v,v), p[1])
else:
return p
def fr(p):
return p
#return (p[0]+random.randint(-v,v), p[1]+random.randint(-v,v))
po[:] = (fr(xy) for xy in po)
draw.polygon(po, fill=(255, 255, 0), outline=None) # par
po.append((po[0][0], po[0][1]))
draw.line(po, fill=(255,0,0), width=3) # par
# opt
if False:
ts = [t/2000.0 for t in range(2001)]
#ts = [t/20.0 for t in range(21)]
bezier = make_bezier(po)
points = bezier(ts)
draw.polygon(points, fill=(255, 0, 0), outline=(255,255,255))
def mazy31(draw, params):
""" ? """
w, h, cnt = init_common(params)
# ...
return 0
def mazy32(draw, params):
""" ? """
w, h, cnt = init_common(params)
# ...
return 0
| monstergdc/pyartforms | playgroud/smears.py | smears.py | py | 49,566 | python | en | code | 4 | github-code | 36 |
23966001733 | # coding=utf-8
import pytest
from mockito import expect, mock, verify, verifyNoUnwantedInteractions, verifyStubbedInvocationsAreUsed, when
# noinspection PyProtectedMember
from elib_run._run import _run
@pytest.mark.parametrize(
'mute',
[True, False]
)
def test_exit(mute, caplog):
caplog.set_level(10, 'elib_run.process')
context = mock(
{
'mute': mute,
'process_output_as_str': 'dummy_output',
'process_logger': mock(),
}
)
when(context.process_logger).debug(...)
with pytest.raises(SystemExit):
_run._exit(context)
if mute:
assert 'dummy_output' in caplog.text
else:
assert '' == caplog.text
@pytest.mark.parametrize('return_code', (0, 1))
@pytest.mark.parametrize('mute', (True, False))
@pytest.mark.parametrize('failure_ok', (True, False))
def test_check_error(return_code, mute, failure_ok, caplog):
caplog.set_level(10)
context = mock(
{
'return_code': return_code,
'mute': mute,
'result_buffer': '',
'failure_ok': failure_ok,
'cmd_as_string': 'dummy_cmd',
'process_logger': mock(),
}
)
when(_run)._exit(context)
result = _run.check_error(context)
if return_code is 0:
if mute:
expected_buffer = f': success: {return_code}'
else:
expected_buffer = f'{context.cmd_as_string}: success: {context.return_code}'
assert expected_buffer in caplog.text
assert result is 0
else:
if mute:
expected_buffer = f': command failed: {context.return_code}'
else:
expected_buffer = f'{context.cmd_as_string}: command failed: {context.return_code}'
assert expected_buffer in caplog.text
assert repr(context) in caplog.text
if not failure_ok:
verify(_run)._exit(context)
else:
verify(_run, times=0)._exit(...)
@pytest.mark.parametrize(
'filters',
(None, ['some'], ['some', 'string'], 'some string')
)
def test_sanitize_filters(filters):
result = _run._sanitize_filters(filters)
if filters is None:
assert result is None
elif isinstance(filters, str):
assert [filters] == result
else:
assert result is filters
@pytest.mark.parametrize(
'filters',
([False], [None], [True], [1], [1.1], [['list']], [{'k': 'v'}], True, False, 1.1, 1, ('tuple',), {'k': 'v'})
)
def test_sanitize_filters_wrong_value(filters):
with pytest.raises(TypeError):
_run._sanitize_filters(filters)
def test_parse_exe_no_args():
when(_run).find_executable(...).thenReturn('dummy')
result = _run._parse_cmd('cmd')
assert 'dummy', '' == result
verifyStubbedInvocationsAreUsed()
def test_parse_exe_with_args():
when(_run).find_executable(...).thenReturn('dummy')
result = _run._parse_cmd('cmd')
assert 'dummy', ['some', 'args'] == result
verifyStubbedInvocationsAreUsed()
def test_parse_cmd_exe_not_found():
when(_run).find_executable(...).thenReturn(None)
with pytest.raises(_run.ExecutableNotFoundError):
_run._parse_cmd('dummy')
verifyStubbedInvocationsAreUsed()
@pytest.mark.parametrize(
'mute', (True, False)
)
@pytest.mark.windows
def test_run(mute):
expect(_run.RunContext).start_process()
expect(_run).monitor_running_process(...)
expect(_run).check_error(...)
_run.run('cmd', mute=mute)
verifyNoUnwantedInteractions()
| theendsofinvention/elib_run | test/test_run.py | test_run.py | py | 3,523 | python | en | code | 0 | github-code | 36 |
70533372263 |
from pickle import TRUE
from re import A
from turtle import Turtle, penup, reset, speed
carpoints = (
(4,0),
(2,2),
(1,4),
(1,8),
(0,8),
(0.10),
(1,10),
(1,18),
(0,18),
(0,20),
(1,20),
(1,24),
(2,26),
(4,28),
(7,28),
(9,26),
(10,24),
(10,20),
(11,20),
(11,18),
(10,18),
(10,10),
(11,10),
(11,8),
(10,8),
(10,4),
(9,2),
(7.0)
)
height = 500
width = 700
distance = 10
ben = Turtle()
screen = ben.getscreen()
screen.register_shape("car", (
(4,0),
(2,2),
(1,4),
(1,8),
(0,8),
(0,10),
(1,10),
(1,18),
(0,18),
(0,20),
(1,20),
(1,24),
(2,26),
(4,28),
(7,28),
(9,26),
(10,24),
(10,20),
(11,20),
(11,18),
(10,18),
(10,10),
(11,10),
(11,8),
(10,8),
(10,4),
(9,2),
(7,0)
))
ben.shape("car")
ben.color("black", "blue")
timer = Turtle()
timer.hideturtle()
timer.penup()
timer.left(90)
timer.forward(300)
speed = 0
angle = 10
update = 0.2
screen.bgpic("nn2.gif")
def accelerate():
global speed
speed = speed + update
def decelerate():
global speed
speed = speed - update
def moveleft():
ben.left(angle)
def moveright():
ben.right(angle)
def stop():
global speed
speed = 0
def reset():
global time, speed
time = 0
ben.pu()
ben.goto(-265, 0)
ben.setheading(90)
ben.pd()
speed = 0
screen.listen()
screen.onkeypress(accelerate, "w")
screen.onkeypress(decelerate, "s")
screen.onkeypress(moveright,"d")
screen.onkeypress(moveleft,"a")
screen.onkeypress(stop, "space")
screen.onkeypress(accelerate,"Up")
screen.onkeypress(decelerate, "Down")
screen.onkeypress(moveright,"Right")
screen.onkeypress(moveleft,"Left")
screen.onkeypress(reset,"q")
ben.pu()
ben.goto(-265, 0)
ben.left(90)
ben.pd()
time = 0
scoring = False
while True:
allowedforward = True
if ben.ycor() < -height and ben.heading == 180:
allowedforward = False
ben.forward(speed)
if scoring:
time += 0.1
timer.clear()
timer.write(f"{time:02.2f}", False, "center", ("Arial", 30, "bold"))
if abs(ben.xcor() - (-265)) < 10 and ben.ycor() > 0:
if time < 10:
scoring = True
else:
scoring = False
if allowedforward == TRUE:
ben.forward(distance)
screen.update() | ArseniyMegrabyan/FBlockComputerProgramming | sus.py | sus.py | py | 2,423 | python | en | code | 0 | github-code | 36 |
36587263845 | # 완전제곱수
import sys
input = sys.stdin.readline
M = int(input())
N = int(input())
sqr = [i**2 for i in range(1, 101)]
ans = []
for s in sqr:
if M <= s <= N:
ans.append(s)
if len(ans) == 0:
print(-1)
else:
print(sum(ans))
print(ans[0])
| meatsby/algorithm | boj/1977.py | 1977.py | py | 272 | python | en | code | 0 | github-code | 36 |
70471621543 | # TODO : TRANSFORM INTO A CLASS AND CREATE A REPORT OF REGION TRIMMED
#~~~~~~~GLOBAL IMPORTS~~~~~~~#
# Standard library packages import
from os import remove, path
import gzip
from time import time
from sys import stdout
# Third party package import
from Bio import SeqIO
# Local library packages import
from pyDNA.Utilities import import_seq, file_basename, mkdir
from Blast import Blastn
#~~~~~~~MAIN METHODS~~~~~~~#
def mask ( subject_fasta,
hit_list,
ref_outdir="./references/",
ref_outname="masked_ref.fa",
compress_ouput=True ):
"""
Import a reference fasta sequence, Mask positions indicated by hits from a hit_list and write
the modified fasta sequence in a new file.
@param subject_fasta Fasta sequence of the subject to edit (can be gzipped)
@param hit_list List of hit objects. Hits need at least 3 fields named s_id, s_start and s_end
coresponding to the name of the sequence matched, and the hit start/end (0 based).
@param ref_outdir Directory where the masked reference will be created
@param ref_outname Name of the masked reference
@param compress_ouput If true the output will be gzipped
@return A path to the modified sequence if the hit list was valid.
"""
# Test if object the first object of hit_list have the require s_id, s_start and s_end fields
try:
a = hit_list[0].s_id
a = hit_list[0].s_start
a = hit_list[0].s_end
except IndexError:
print ("No hit found, The subject fasta file will not be edited")
return subject_fasta
except AttributeError as E:
print ("The list provided does not contain suitable hit object, The subject fasta file will not be edited")
return subject_fasta
# Initialize output folder
mkdir(ref_outdir)
# Initialize input fasta file
if subject_fasta[-2:].lower() == "gz":
in_handle = gzip.open(subject_fasta, "r")
else:
in_handle = open(subject_fasta, "r")
# Initialize output fasta file
if compress_ouput:
ref_path = path.join (ref_outdir, ref_outname+".gz")
out_handle = gzip.open(ref_path, 'w')
else:
ref_path = path.join (ref_outdir, ref_outname)
out_handle = open(ref_path, 'w')
# Generate a list of ref that will need to be modified
id_list = {hit.s_id:0 for hit in hit_list}.keys()
# Iterate over record in the subject fasta file
print ("Masking hit positions and writting a new reference for {} ".format(ref_outname))
i=j=0
start_time = time()
for record in SeqIO.parse(in_handle, "fasta"):
# Progress Marker
stdout.write("*")
stdout.flush()
# Check if the record is in the list of record to modify
if record.id in id_list:
i+=1
#~print ("Hit found in {}. Editing the sequence".format(record.id))
# Casting Seq type to MutableSeq Type to allow string editing
record.seq = record.seq.tomutable()
# For each hit in the list of hit found
for hit in hit_list:
if record.id == hit.s_id:
# For all position between start and end coordinates modify the base by N
for position in range (hit.s_start, hit.s_end):
record.seq[position]= 'n'
else:
j+=1
#~print ("No hit found in {}".format(record.id))
# Finally write the sequence modified or not
out_handle.write(record.format("fasta"))
print("")
# Report informations
print("{} sequence(s) from {} modified in {}s".format(i,ref_outname, round(time()-start_time),2))
# Close files and return the masked ref path
in_handle.close()
out_handle.close()
return ref_path
| a-slide/pyDNA | RefMasker.py | RefMasker.py | py | 3,819 | python | en | code | 1 | github-code | 36 |
39665406570 | import gzip
import sys
from SPARQLWrapper import SPARQLWrapper, JSON
import gzip
from bs4 import BeautifulSoup
import re
import spacy
from spacy import displacy
from collections import Counter
import en_core_web_md
import difflib
import requests
import json
from elasticsearch import Elasticsearch
nlp = en_core_web_md.load()
KEYNAME = "WARC-TREC-ID"
KEYHTML= "<!DOCTYPE html"
NER_type = ["DATE","TIME","CARDINAL","ORDINAL","QUANTITY","PERCENT","MONEY"] # ruled type list avoid
## format function for output
def label_process(label):
if len(label.split(" "))>1:
return label.title()
return label
## rule format function in NER
def entity_process(entity):
l = []
for X,Y in entity:
if "cancer" in X:
X = X.lower()
l.append((X,Y))
return l
## retrieve the text from HTML pages
## including text cleaning
def html_to_text(record):
html = ''
flag = 0
for line in record.splitlines():
if line.startswith(KEYHTML):
flag = 1
if flag == 1 :
html += line
realHTML = html.replace('\n', '<br>')
soup = BeautifulSoup(realHTML,features="html.parser")
for script in soup(["script", "style","aside"]):
script.extract()
## text cleaning
text = " ".join(re.split(r'[\n\t]+', soup.get_text()))
text = re.sub(r"\s+", " ", text)
text = re.sub("[^\u4e00-\u9fa5^\s\.\!\:\-\@\#\$\(\)\_\,\;\?^a-z^A-Z^0-9]","",text)
return text
## NER function using spaCy
def ner(text):
doc = nlp(text)
entity = [(X.text, X.label_) for X in doc.ents if X.label_ not in NER_type]
entity = list(set(entity))
entity = entity_process(entity)
return entity
## funtion of entity linking
## link the query result (wikidata url) with each entity
def entity_linking(entity):
entity_list = []
for e,_ in entity:
if es_search(e):
entity_list.append((e,es_search(e)))
return entity_list
## function that finds a most similar entity
def get_closest_word(es_query, es_dictionary):
try:
wl = difflib.get_close_matches(es_query, list(es_dictionary.keys()))
return wl[0]
except:
return list(es_dictionary.keys())[0]
### function that requests elasticsearch to get the candidate
def es_search(es_query):
def search(query):
e = Elasticsearch(["http://fs0.das5.cs.vu.nl:10010/"])
p = { "from" : 0, "size" : 20, "query" : { "query_string" : { "query" : query }}}
response = e.search(index="wikidata_en", body=json.dumps(p))
id_labels = {}
if response:
for hit in response['hits']['hits']:
label = hit['_source']['schema_name']
id = hit['_id']
id_labels.setdefault(id, set()).add(label)
return id_labels
d = {}
try:
for entity, labels in search(es_query.lower()).items():
d[list(labels)[0]] = entity
res = get_closest_word(es_query,d)
return d[res]
except Exception as e:
print(e)
return d
# The goal of this function process the webpage and returns a list of labels -> entity ID
def find_labels(payload):
if payload == '':
return
# The variable payload contains the source code of a webpage and some additional meta-data.
# We firt retrieve the ID of the webpage, which is indicated in a line that starts with KEYNAME.
# The ID is contained in the variable 'key'
# cheats = dict((line.split('\t', 2) for line in open('data/sample-labels-cheat.txt').read().splitlines()))
key = None
for line in payload.splitlines():
if line.startswith(KEYNAME):
key = line.split(': ')[1]
break
try:
# Problem 1: The webpage is typically encoded in HTML format.
# We should get rid of the HTML tags and retrieve the text. How can we do it?
text = html_to_text(payload)
# Problem 2: Let's assume that we found a way to retrieve the text from a webpage. How can we recognize the
# entities in the text?
entity = ner(text)
# Problem 3: We now have to disambiguate the entities in the text. For instance, let's assugme that we identified
# the entity "Michael Jordan". Which entity in Wikidata is the one that is referred to in the text?
result = entity_linking(entity)
for label, wikidata_id in result:
if key and label and wikidata_id:
yield key, label, wikidata_id
except:
pass
def split_records(stream):
payload = ''
for line in stream:
if line.strip() == "WARC/1.0":
yield payload
payload = ''
else:
payload += line
yield payload
if __name__ == '__main__':
import sys
try:
_, INPUT = sys.argv
except Exception as e:
print('Usage: python starter-code.py INPUT')
sys.exit(0)
with gzip.open(INPUT, 'rt', errors='ignore') as fo:
for record in split_records(fo):
for key, label, wikidata_id in find_labels(record):
# print(key + '\t' + label + '\t' + wikidata_id)
print(key + '\t' + label_process(label) + '\t' + f"{wikidata_id}")
| SummerXIATIAN/wdps_asg1_group27 | code_es.py | code_es.py | py | 5,220 | python | en | code | 0 | github-code | 36 |
40943557380 | from ruamel.yaml import YAML
from datetime import datetime
from common import *
import sys
def main():
fn = 'data/races.yaml'
if len(sys.argv) > 1:
fn = sys.argv[1]
yaml = YAML(typ='safe')
with open(fn, 'r') as fi:
ydat = yaml.load(fi)
prev_date = None
for race in ydat['races']:
dt = datetime.fromisoformat(race['datetime']).replace(tzinfo=RACETZ)
ts = int(dt.timestamp())
desc = race['desc']
if prev_date != dt.date():
day = dt.strftime('%A')
print('')
print(f'{day} <t:{ts}:d>')
prev_date = dt.date()
print(f'<t:{ts}:t> (<t:{ts}:R>) - {desc}')
if __name__ == '__main__':
main()
| pkdawson/workrobot | print_schedule.py | print_schedule.py | py | 717 | python | en | code | 0 | github-code | 36 |
9454038158 | # coding: utf-8
# Credits : https://gist.github.com/jason-w/4969476
from typing import List, Dict, Any
from mongoengine import (
Document,
ListField,
EmbeddedDocumentField,
DictField,
EmbeddedDocument,
FloatField,
DateTimeField,
ComplexDateTimeField,
IntField,
BooleanField,
ObjectIdField,
DecimalField,
StringField,
QuerySet
)
def query_to_dict(query_set: QuerySet) -> List[Dict[str, str]]:
"""Convert a query result into a list of each ouput document as dict.
Args:
query_set (QuerySet): the query result.
Returns:
List[Dict[str, str]]: output list of documents as dicts.
"""
return [mongo_to_dict(document) for document in query_set]
def mongo_to_dict(obj, exclude_fields: List[str] = []) -> Dict[str, str]:
"""Returns the Dict format of the Document instance given in parameter.
Args:
obj (Deferred): the document queried from database to convert into dict.
exclude_fields (List[str], optional): list of fields to exclude in the
output dict. Defaults to [].
Returns:
Dict[str, str]: output dict.
"""
return_data = list()
if obj is None:
return None
if isinstance(obj, Document):
return_data.append(("id",str(obj.id)))
for field_name in obj._fields:
if field_name in exclude_fields:
continue
if field_name in ("id",):
continue
data = obj._data[field_name]
if isinstance(obj._fields[field_name], ListField):
return_data.append((field_name, list_field_to_dict(data)))
elif isinstance(obj._fields[field_name], EmbeddedDocumentField):
return_data.append((field_name, mongo_to_dict(data,[])))
elif isinstance(obj._fields[field_name], DictField):
return_data.append((field_name, data))
else:
return_data.append(
(field_name, mongo_to_python_type(obj._fields[field_name],data))
)
return dict(return_data)
def list_field_to_dict(list_field: List) -> List[str]:
"""Converts mongo db output list fields as a list of str.
Args:
list_field (List): list to convert.
Returns:
List[str]: output list.
"""
return_data = []
for item in list_field:
if isinstance(item, EmbeddedDocument):
return_data.append(mongo_to_dict(item,[]))
else:
return_data.append(mongo_to_python_type(item,item))
return return_data
def mongo_to_python_type(field: str, data: Any):
"""Convert the field into str depending on the field type.
Args:
field (str): field type.
data (Any): Associated data to convert.
Returns:
str: data converted.
"""
if isinstance(field, DateTimeField):
return str(data.isoformat())
elif isinstance(field, ComplexDateTimeField):
return field.to_python(data).isoformat()
elif isinstance(field, StringField):
return str(data)
elif isinstance(field, FloatField):
return float(data)
elif isinstance(field, IntField):
return int(data)
elif isinstance(field, BooleanField):
return bool(data)
elif isinstance(field, ObjectIdField):
return str(data)
elif isinstance(field, DecimalField):
return data
else:
return str(data) | nicolasjlln/lbc-challenge | app/database/utils.py | utils.py | py | 3,396 | python | en | code | 0 | github-code | 36 |
43639712257 | import os
from hashlib import md5
from bson.objectid import ObjectId
import datetime as dt
import re
def all_files(path):
files = []
with os.scandir(path) as entries:
for entry in entries:
entry_path = os.path.abspath(entry)
if entry.is_file() and os.path.splitext(entry_path)[1] == '.xlsx':
file_hash = md5(entry_path.encode()).hexdigest()[:24]
files.append({'_id': ObjectId(file_hash), 'path': entry_path})
elif entry.is_dir():
files += all_files(entry_path)
return files
def week_dates(string):
dates = [date.split('-') for date in re.findall(r'\d+-\d+-\d+', string)]
week_start = [int(numeric_string) for numeric_string in dates[0]]
week_start = dt.datetime(year=week_start[2], month=week_start[0], day=week_start[1])
week_end = [int(numeric_string) for numeric_string in dates[1]]
week_end = dt.datetime(year=week_end[2], month=week_end[0], day=week_end[1])
return week_start, week_end
def test_week_dates():
week_start, week_end = week_dates('Week 31 (Q3) From: 07-31-2016 To: 08-06-2016')
assert week_start == dt.datetime(year=2016, month=7, day=31)
assert week_end == dt.datetime(year=2016, month=8, day=6)
if __name__ == '__main__':
test_week_dates()
| blry/docker-flask-mongodb-uwsgi-nginx | parser/project/utils.py | utils.py | py | 1,316 | python | en | code | 3 | github-code | 36 |
12834483142 | import sys
import math
# 파이썬에선 해시맵을 딕셔너리라고 부릅니다.
# 해시맵생성 방법 변수이름 = {key1 : value1, key2 : value2, key3 : value3}
n = int(input())
dictionary = []
for i in range(n):
word = input()
dictionary.append(word)
letters = input()
max_score = 0
max_score_word = ""
def is_word_fessible(word, letters):
for char in word:
# 단어에 쓰인 각 글자의 개수가 알파벳 꾸러미의 글자 개수보다 많은 경우
# 단어를 조합할 수 없습니다.
if word.count(char) > letters.count(char):
return False
return True
def get_char_score(char):
score = 0
letters_scores = {'a': 1, 'b' : 3, 'c' : 3, 'd' : 2, 'e' : 1,
'f' : 4, 'g' : 2, 'h' : 4, 'i' : 1, 'j' : 8,
'k' : 5, 'l' : 1, 'm' : 3, 'n' : 1, 'o' : 1,
'p' : 3, 'q' : 10, 'r' : 1, 's' : 1, 't' : 1,
'u' : 1, 'v' : 4, 'w' : 4, 'x' : 8, 'y' : 4, 'z' : 10}
return letters_scores[char]
def get_word_score(word):
score = 0
for char in word:
score += get_char_score(char)
return score
for word in dictionary:
if is_word_fessible(word, letters):
score = get_word_score(word)
if score > max_score:
max_score = score
max_score_word = word
print(max_score_word)
| ohjooyeong/codingame | scrabble.py | scrabble.py | py | 1,389 | python | en | code | 0 | github-code | 36 |
1239251599 | from django.shortcuts import render
from django.views.generic import View
from django.http import JsonResponse
from application.chart.models.chart import TopPosts_MH, TopPosts_WH, TopPosts_PVN, TopPosts_RW, TopPosts_BI, TopPosts_ROL, TopPosts_WE
class GetTopPosts(View):
def get(self, request):
models_map = {
"WH": TopPosts_WH,
"MH": TopPosts_MH,
"PVN": TopPosts_PVN,
"RW": TopPosts_RW,
"BI": TopPosts_BI,
"ROL": TopPosts_ROL,
"WE": TopPosts_WE
}
response = []
for brand, TopPosts in models_map.items():
for model in TopPosts.objects.all():
response.append(
dict(viral_unique = model.viral_unique,
unique = model.unique,
link = model.link))
response.sort(key = lambda post: post['viral_unique']/post['unique'])
response.reverse()
return JsonResponse(response[:3], safe=False) | jialinzou/DjangoDashboard | application/chart/views/get_top_posts.py | get_top_posts.py | py | 1,005 | python | en | code | 7 | github-code | 36 |
14838347473 | import re
import nltk
import spacy
from nltk import Tree
from nltk.corpus import brown
sentence = "A solution of piperidin-4-ol (100 mg, 0.989 mmol) and 3-((phenylsulfonyl)methylene)oxetane (prepared according to a published literature procedure: Wuitschik et al. J. Med. Chem. 53(8) 3227-3246, 2010, 416 mg, 1.977 mmol) in methanol (5 mL) was heated at 50° C. for 20 h. Solvent was evaporated in vacuo and the crude product was purified by flash chromatography on silica gel using an automated ISCO system (40 g column, eluting with 0-8% 2 N ammonia in methanol/dichloromethane). 1-(3-((phenylsulfonyl)methyl)oxetan-3-yl)piperidin-4-ol (300 mg) was obtained as a colorless oil. If the temperature exceed 64 degrees when heating methanol it will result in 3% decrease in the final products."
def clean_sentence(example):
cleaned_sentence = example
mmole_qnuatities = re.findall("(\d+\.\d* mmol)", cleaned_sentence)
for x in mmole_qnuatities:
cleaned_sentence = cleaned_sentence.replace(x, '')
return cleaned_sentence
sentence = clean_sentence(sentence)
def tok_format(tok, is_quantity=False, is_unit=False):
if is_quantity:
return "_".join([tok.orth_, "QNTTY"])
if is_unit:
return "_".join([tok.orth_, "UNIT"])
return "_".join([tok.orth_, tok.tag_])
def to_nltk_tree(node, is_quantity=False, is_unit=False):
if node.n_lefts + node.n_rights > 0:
if is_quantity:
return Tree(tok_format(node, is_quantity=True),
[to_nltk_tree(child) for child in node.children])
if node.text in units_list:
return Tree(tok_format(node, is_unit=True), [to_nltk_tree(child, is_quantity=True) for child in node.children])
return Tree(tok_format(node), [to_nltk_tree(child) for child in node.children])
else:
if is_quantity and node.text.isnumeric():
return Tree(tok_format(node, is_quantity=True),
[to_nltk_tree(child) for child in node.children])
return tok_format(node)
parser = spacy.load("en_core_web_sm")
doc = parser(' '.join(sentence.split()))
units_list = ['mg', 'g', 'gr', 'gram', 'grams', 'kg', 'milligrams', 'milligram', 'mmol', 'ml', 'mL', 'L', 'millilitre']
uni_tags = []
for sent in doc.sents:
for idx, token in enumerate(sent):
if token.text in units_list:
uni_tags.append((token.text, 'UNT'))
elif token.text.isnumeric() and idx < len(sent) - 1 and sent[idx + 1].text in units_list:
uni_tags.append((token.text, 'QNTY'))
else:
uni_tags.append((token.text, token.tag_))
# t0 = nltk.DefaultTagger('NN')
# t1 = nltk.UnigramTagger(uni_tags, backoff=t0)
[to_nltk_tree(sent.root).pretty_print() for sent in doc.sents]
# def tag_sentence(sentence):
# for word in se
| arrafmousa/generate_code | custom_tags.py | custom_tags.py | py | 2,826 | python | en | code | 0 | github-code | 36 |
1836493461 |
class Solution:
def __init__(self,nums):
self.nums =nums
def lomuto_partition(self,low,high):
pivot = self.nums[high]
i = (low - 1)
for j in range(low, high):
if (self.nums[j] <= pivot):
i += 1
self.nums[i],self.nums[j] = self.nums[j],self.nums[i]
self.nums[i+1],self.nums[high] = self.nums[high],self.nums[i+1]
return (i+1)
def quickSort3(self,low,high):
if( low < high):
pi = self.lomuto_partition(low,high)
self.quickSort3(low, pi-1)
self.quickSort3(pi+1 ,high)
print(self.nums)
def quickSort2(self,nums):
if len(nums)<=1: return nums
smaller,equal,lager = [],[],[]
pivot = nums[0]
for x in nums:
if x < pivot: smaller.append(x)
elif x == pivot: equal.append(x)
else: lager.append(x)
return self.quickSort2(smaller) + equal + self.quickSort2(lager)
def quickSort(self,nums,left,right):
if (left >= right):
return
p = nums[left]
i = left
j = right
while (i != j):
while (j > i) and nums[j] > p:
j -= 1
nums[i],nums[j] = nums[j],nums[i]
while (i < j) and nums[i] <= p:
i += 1
nums[i],nums[j] = nums[j],nums[i]
self.quickSort(nums,left,i-1)
self.quickSort(nums,i+1,right)
# print(nums)
def quickArraySort(self,nums):
left = 0
right = len(nums) - 1
self.quickSort(nums,left,right)
if __name__ == "__main__":
arr = [21,38,29,17,4,25,11,32,9]
solution = Solution(arr)
solution.quickSort3(0,len(arr)-1)
| zideajang/python_tuts | data_struture/quick_sort.py | quick_sort.py | py | 1,775 | python | en | code | 0 | github-code | 36 |
6708836597 | import unittest
# @param {Integer[]} nums
# @param {Integer} target
# @return {Integer[]}
class Solution:
def twoSum1(self, nums, target):
"""
Time: O(n)
Space: O(n)
"""
map = {}
for i in range(len(nums)):
compliment = target - nums[i]
if compliment in map:
return [map[compliment], i]
else:
map[nums[i]] = i
return [-1, -1]
def twoSum2(self, nums, target):
"""
Time: O(n^2)
Space: O(1)
"""
for i in range(len(nums)):
for j in range(1, len(nums)):
if nums[i] + nums[j] == target:
return [i, j]
return [-1, -1]
class Test(unittest.TestCase):
def setUp(self):
self.s1 = Solution()
self.s2 = Solution()
def tearDown(self):
pass
def test_twoSum(self):
self.assertEqual(self.s1.twoSum1([2, 7, 11, 15, 9], 9), [0, 1])
self.assertEqual(self.s1.twoSum1([3, 2, 4], 6), [1, 2])
self.assertEqual(self.s2.twoSum2([3, 3], 6), [0, 1])
self.assertEqual(self.s2.twoSum2([3, 3, 4, 1, 0, 6], 100), [-1, -1])
if __name__ == '__main__':
unittest.main()
| tanveer/leetcode-python | 0001_two_sum.py | 0001_two_sum.py | py | 1,261 | python | en | code | 1 | github-code | 36 |
20139269542 | '''
o/ Iae pessoal, Tudo bem?
Espero que sim :)
Bom este é o exercício 1018 do URI, modulo iniciante
Leia um valor inteiro. A seguir, calcule o menor número de notas possíveis (cédulas) no qual o valor pode ser decomposto.
As notas consideradas são de 100, 50, 20, 10, 5, 2 e 1. A seguir mostre o valor lido e a relação de notas necessárias.
<-- Entrada
O arquivo de entrada contém um valor inteiro N (0 < N < 1000000).
--> Saída
Imprima o valor lido e, em seguida, a quantidade mínima de notas de cada tipo necessárias, conforme o exemplo fornecido.
Não esqueça de imprimir o fim de linha após cada linha, caso contrário seu programa apresentará a mensagem: “Presentation Error”.
'''
valor = int(input())
cedula = [100, 50, 20, 10, 5, 2, 1] #lista usada para os tipos de cedula 100,50,20 e por ai
ncedula = [0, 0, 0, 0, 0, 0, 0] #lista usada para receber as quantidades de cada tipo de cedula
valor_pre_calculado = 0 #variavel para ajudar no calculo da quantidade das cedulas
print(valor)
for index in range(7): #laço de repeticão que roda enquanto a variavel index não chegar em 7
#variável de indexação = Usada para rodar as listas e laços de maneira ordenada e sincrona
#Em cada loop, a variável index somará mais 1 no seu valor, até chegar no valor estimado no range
#EX: for index in range(5) -> index = 0; index = 1; index = 2; ...; index = 5
ncedula[index] = int((valor - valor_pre_calculado) / cedula[index]) #calcula a quantidade de cada cedula, pegando somente o primeiro valor inteiro
valor_pre_calculado = valor_pre_calculado + ncedula[index] * cedula[index] #auxilia a calcular somente o dinheiro que já n foi calculado (tranformado em inteiro na ncedula)
print("{} nota(s) de R$ {},00".format(ncedula[index], cedula[index]))
'''
Caso ainda esteja dificil de entender o laço, tenho outro código fonte que não uso for
mas é mais complicado de entender hehe XD
tae para quem quiser ver
valor = int(input())
n100 = int(valor/100)
n50 = int((valor - n100*100)/50)
n20 = int((valor - (n100*100 + n50*50))/20)
n10 = int((valor - (n100*100 + n50*50 + n20*20))/10)
n5 = int((valor - (n100*100 + n50*50 + n20*20 + n10*10))/5)
n2 = int((valor - (n100*100 + n50*50 + n20*20 + n10*10 + n5*5))/2)
n1 = int((valor - (n100*100 + n50*50 + n10*20 + n10*10 + n5*5 + n2*2))/1)
print(valor)
print("%i nota(s) de R$ 100,00" %n100)
print("%i nota(s) de R$ 50,00" %n50)
print("%i nota(s) de R$ 20,00" %n20)
print("%i nota(s) de R$ 10,00" %n10)
print("%i nota(s) de R$ 5,00" %n5)
print("%i nota(s) de R$ 2,00" %n2)
print("%i nota(s) de R$ 1,00" %n1)
no fim, da no msm, mais consome mais memória '-'
Bons estudos e até o/
'''
| ronaldocoding/ipc-python | desafios/iniciante/cedulas.py | cedulas.py | py | 2,903 | python | pt | code | 7 | github-code | 36 |
72774806185 | from typing import Any, Dict, List, TypedDict
import torch as th
from tango.integrations.torch import DataCollator
from tango.integrations.transformers import Tokenizer
from dreambooth.steps.transform_data import PreprocessedExample
class BatchExample(TypedDict):
input_ids: th.Tensor
pixel_values: th.Tensor
@DataCollator.register("custom_collator")
class CustomCollator(DataCollator[PreprocessedExample]):
def __init__(self, tokenizer: Tokenizer, is_prior_preservation: bool) -> None:
super().__init__()
self.tokenizer = tokenizer
self.is_prior_preservation = is_prior_preservation
def __call__(self, items: List[PreprocessedExample]) -> BatchExample:
input_ids = [item["instance_prompt_ids"] for item in items]
pixel_values_list = [item["instance_images"] for item in items]
if self.is_prior_preservation:
input_ids += [item["class_prompt_ids"] for item in items] # type: ignore
pixel_values_list += [item["class_images"] for item in items] # type: ignore
pixel_values = th.stack(pixel_values_list)
pixel_values = pixel_values.to(memory_format=th.contiguous_format).float()
input_ids = self.tokenizer.pad(
{"input_ids": input_ids},
padding="max_length",
return_tensors="pt",
max_length=self.tokenizer.model_max_length,
).input_ids
batch: BatchExample = {
"input_ids": input_ids,
"pixel_values": pixel_values,
}
return batch
| shunk031/tango-dreambooth | dreambooth/integrations/torch/data_collator.py | data_collator.py | py | 1,556 | python | en | code | 0 | github-code | 36 |
718069407 |
from django.db import models
from django.contrib.auth.models import User
from django.db.models.query import ModelIterable
from django.db.models.signals import post_save, post_init
import requests
import random
def sendNotification(usertoken, title, body):
userdata = {
"to": str(usertoken),
"notification": {
"body": str(title),
"title": str(body),
"content_available": True,
"priority": "high"
}
}
headers = {
"Authorization": "key=AAAAwVFO9Fw:APA91bHymQMWRKlGHZOVMxp4_-0HA5vOlybPEpCU7NHOs1v9lkkd5JrtYzsU_3UYH5-nxcSZYA9xUOVYfpyKPE_YFdL2BgCKUvbIBBNuqfvIAOcbjLZ6eQ7o4SCAFG1UGBp8X7JnB2HI",
"Content-Type": "application/json"
}
r = requests.post(
'https://fcm.googleapis.com/fcm/send', json=userdata, headers=headers)
class CustomerProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, blank=True)
# aadharNo = models.IntegerField(default=0)
phoneNo = models.CharField(max_length=10, blank=True)
def __str__(self):
return "%s's profile" % self.user
class DeliveryProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, blank=True)
# aadharNo = models.IntegerField(default=0)
phoneNo = models.CharField(max_length=10, blank=True)
def __str__(self):
return "%s's profile" % self.user
class ShopLocality(models.Model):
name = models.CharField(max_length=500)
def __str__(self):
return self.name
class Shop(models.Model):
vendor = models.ForeignKey(
User, on_delete=models.CASCADE, blank=True, null=True)
name = models.CharField(max_length=500)
currentOffer = models.FloatField()
ShopImg = models.CharField(max_length=500, blank=True,
default="https://images.unsplash.com/photo-1498837167922-ddd27525d352?ixid=MXwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHw%3D&ixlib=rb-1.2.1&auto=format&fit=crop&w=1050&q=80.jpg")
locality = models.ForeignKey(
ShopLocality, on_delete=models.CASCADE, null=True)
latitude = models.FloatField(null=True)
longitude = models.FloatField(null=True)
addressinwords = models.CharField(
max_length=1000, default="")
phoneNo = models.CharField(max_length=10, blank=True)
email = models.CharField(max_length=10, blank=True)
date = models.DateField(auto_now_add=True, null=True)
time = models.TimeField(auto_now_add=True, null=True)
def __str__(self):
return self.name
class ProductCategory(models.Model):
name = models.CharField(max_length=500)
def __str__(self):
return self.name
class Product(models.Model):
name = models.CharField(max_length=500)
price = models.FloatField()
shop = models.ForeignKey(Shop, on_delete=models.CASCADE, null=True)
category = models.ForeignKey(
ProductCategory, on_delete=models.CASCADE, null=True)
productImage = models.CharField(
max_length=500, default="https://images.unsplash.com/photo-1458642849426-cfb724f15ef7?ixid=MXwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHw%3D&ixlib=rb-1.2.1&auto=format&fit=crop&w=1050&q=80")
def __str__(self):
return self.name
class PaymentCategory(models.Model):
name = models.CharField(max_length=500)
def __str__(self):
return self.name
class CustomerOrder(models.Model):
orderFor = models.ForeignKey(
User, on_delete=models.CASCADE, blank=True)
product = models.ManyToManyField(
Product, blank=True)
shop = models.ForeignKey(Shop, on_delete=models.CASCADE, null=True)
latitude = models.FloatField(null=True)
longitude = models.FloatField(null=True)
date = models.DateField(auto_now_add=True, null=True)
time = models.TimeField(auto_now_add=True, null=True)
orderImg = models.CharField(
max_length=500, null=True, default="https://images.unsplash.com/photo-1498837167922-ddd27525d352?ixid=MXwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHw%3D&ixlib=rb-1.2.1&auto=format&fit=crop&w=1050&q=80.jpg")
status = models.CharField(max_length=1000, null=True)
orderPrice = models.FloatField(default=100)
deliveryboy = models.ForeignKey(
DeliveryProfile, on_delete=models.CASCADE, null=True, blank=True)
locality = models.ForeignKey(
ShopLocality, on_delete=models.CASCADE, null=True, blank=True)
addressinwords = models.CharField(
max_length=1000, default="")
typeOfPayment = models.ForeignKey(
PaymentCategory, on_delete=models.CASCADE, null=True)
OTP = models.IntegerField(null=True, default=0)
payment_status = models.CharField(
max_length=100, null=True, blank=True)
@staticmethod
def post_save(sender, **kwargs):
instance = kwargs.get('instance')
if instance.previous_status != instance.status or instance.OTP == 0:
print("status changed")
try:
try:
user = FireabaseToken.objects.filter(
user=instance.orderFor).first()
usertoken = user.token
vendor = FireabaseToken.objects.filter(
user=instance.shop.vendor).first()
vendortoken = vendor.token
except:
pass
status = instance.status
if instance.OTP == 0:
instance.OTP = random.randint(1000, 9999)
instance.save()
sendNotification(vendortoken, 'New Order',
"A new order has been placed")
sendNotification(usertoken, 'Order Placed',
"Order has been placed awaiting for the restaurant response")
elif status == "shopreject":
sendNotification(
usertoken, 'Order Staus', "Your order has been denied")
elif status == "pending":
sendNotification(usertoken, 'Order Status',
"Your order is beign prepared")
elif status == "inorder":
sendNotification(usertoken, 'Order Staus',
"Your order is on the way")
elif status == "delivered":
sendNotification(usertoken, 'Order Status',
"You have recived your order")
except:
pass
@staticmethod
def remember_status(sender, **kwargs):
instance = kwargs.get('instance')
instance.previous_status = instance.status
post_save.connect(CustomerOrder.post_save, sender=CustomerOrder)
post_init.connect(CustomerOrder.remember_status, sender=CustomerOrder)
class ProductQuanities(models.Model):
product = models.ForeignKey(
Product, on_delete=models.CASCADE, blank=True)
quantity = models.IntegerField()
orderID = models.ForeignKey(
CustomerOrder, on_delete=models.CASCADE, blank=True, null=True)
class FireabaseToken(models.Model):
token = models.CharField(max_length=500)
user = models.OneToOneField(
User, on_delete=models.CASCADE, blank=True, null=True)
def __str__(self):
return self.token
class StoreImage(models.Model):
image = models.ImageField()
def __str__(self):
return self.image.url
| haydencordeiro/FoodDeliveryDjango | food/models.py | models.py | py | 7,454 | python | en | code | 1 | github-code | 36 |
7350043900 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests import decorators
from openstack_dashboard.test.integration_tests import helpers
from openstack_dashboard.test.integration_tests.regions import messages
@decorators.services_required("neutron")
class TestNetworks(helpers.TestCase):
NETWORK_NAME = helpers.gen_random_resource_name("network")
SUBNET_NAME = helpers.gen_random_resource_name("subnet")
SUBNET_NAME_2 = helpers.gen_random_resource_name("subnet")
@decorators.skip_new_design
def test_private_network_create(self):
"""tests the network creation and deletion functionalities:
* creates a new private network and a new subnet associated with it
* verifies the network appears in the networks table as active
* deletes the newly created network
* verifies the network does not appear in the table after deletion
"""
networks_page = self.home_pg.go_to_network_networkspage()
networks_page.create_network(self.NETWORK_NAME, self.SUBNET_NAME)
self.assertTrue(
networks_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
networks_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(networks_page.is_network_present(self.NETWORK_NAME))
self.assertTrue(networks_page.is_network_active(self.NETWORK_NAME))
networks_page.delete_network(self.NETWORK_NAME)
self.assertTrue(
networks_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
networks_page.find_message_and_dismiss(messages.ERROR))
self.assertFalse(networks_page.is_network_present(self.NETWORK_NAME))
def test_subnet_add(self):
"""This test checks add subnet functionality
Steps:
1) Login to Horizon dashboard as demo user
2) Go to Project -> Network -> Networks
3) Create network with subnet
4) Add subnet for created network
5) Check that subnet is created (on NetworkOverview page)
6) Delete created subnet
7) Check that subnet was deleted
8) Check that subnet that was added during creation
of network is still presented
9) Delete created network
"""
networks_page = self.home_pg.go_to_network_networkspage()
networks_page.create_network(self.NETWORK_NAME, self.SUBNET_NAME)
self.assertTrue(
networks_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
networks_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(networks_page.is_network_present(self.NETWORK_NAME))
self.assertTrue(networks_page.is_network_active(self.NETWORK_NAME))
overview = networks_page.add_subnet(self.NETWORK_NAME,
self.SUBNET_NAME_2,
network_address='10.50.0.0/16')
self.assertTrue(overview.is_subnet_present(self.SUBNET_NAME))
self.assertTrue(overview.is_subnet_present(self.SUBNET_NAME_2))
overview.delete_subnet(self.SUBNET_NAME_2)
self.assertTrue(
networks_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
networks_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(overview.is_subnet_present(self.SUBNET_NAME))
self.assertFalse(overview.is_subnet_present(self.SUBNET_NAME_2))
networks_page = overview.delete_network()
self.assertFalse(networks_page.is_network_present(self.NETWORK_NAME))
def test_create_distributed_router(self):
router_name = helpers.gen_random_resource_name("router")
routers_page = self.home_pg.go_to_network_routerspage()
routers_page.create_router(router_name,
admin_state_up=None,
external_network=None)
self.assertTrue(
routers_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(routers_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(routers_page.is_router_present(router_name))
self.assertTrue(routers_page.is_router_active(router_name))
self.home_pg.log_out()
self.home_pg = self.login_pg.login(self.ADMIN_NAME,
self.ADMIN_PASSWORD)
self.home_pg.change_project(self.ADMIN_PROJECT)
routers_page = self.home_pg.go_to_system_routerspage()
def delete_router():
routers_page.delete_router(router_name)
self.assertTrue(
routers_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
routers_page.find_message_and_dismiss(messages.ERROR))
self.assertFalse(routers_page.is_router_present(router_name))
try:
router_info = routers_page.get_router_info(router_name)
except KeyError as e:
if e.args[0] == 'mode':
routers_page.refresh_page()
delete_router()
self.skipTest("Distributed mode is not supported")
else:
raise
self.assertEqual(router_info['mode'], 'distributed')
delete_router()
class TestAdminNetworks(helpers.AdminTestCase):
NETWORK_NAME = helpers.gen_random_resource_name("network")
def test_network_create_delete_from_admin(self):
"""tests the network creation and deletion functionality:
* creates a new network through Admin panel
* verifies the network appears in the networks table
* deletes the newly created network
* verifies the network does not appear in the table after deletion
"""
networks_page = self.home_pg.go_to_system_networkspage()
networks_page.create_network(name=self.NETWORK_NAME,
project=self.HOME_PROJECT)
self.assertTrue(
networks_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
networks_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(networks_page.is_network_present(self.NETWORK_NAME))
self.assertTrue(networks_page.is_network_active(self.NETWORK_NAME))
networks_page.delete_network(self.NETWORK_NAME)
self.assertTrue(
networks_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(
networks_page.find_message_and_dismiss(messages.ERROR))
self.assertFalse(networks_page.is_network_present(self.NETWORK_NAME))
| Mirantis/mos-horizon | openstack_dashboard/test/integration_tests/tests/test_networks.py | test_networks.py | py | 7,253 | python | en | code | 7 | github-code | 36 |
21631078978 | import os
import csv
# get the current directory
dir_path = os.getcwd()
# Check if the script has been run before
if os.path.exists(os.path.join(dir_path, 'script_has_run.txt')):
print("The script has already been run.")
exit(0)
# list all files in the directory
files = [f for f in os.listdir(dir_path) if os.path.isfile(os.path.join(dir_path, f))]
# filter out only the png files and sort them to ensure order
png_files = sorted([file for file in files if file.endswith('.png')])
# specify the txt files for each column
title_file = 'title.txt'
description_file = 'description.txt'
tags_file = 'tags.txt'
price_file = 'price.txt'
# read data from the txt files
with open(title_file, 'r') as file:
titles = [line.strip() for line in file]
with open(description_file, 'r') as file:
descriptions = [line.strip() for line in file]
with open(tags_file, 'r') as file:
tags = [line.strip() for line in file]
with open(price_file, 'r') as file:
prices = [line.strip() for line in file]
# check that all lists have the same length
if not len(png_files) == len(titles) == len(descriptions) == len(tags) == len(prices):
print("Error: Not all files have the same number of lines.")
exit(1)
# specify the csv file you want to write to
csv_file_path = os.path.join(dir_path, 'file_list.csv')
# write the png file names and other information into the csv file
with open(csv_file_path, 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(["Title", "Description", "Tags", "Price", "Image Path"])
for i, png_file in enumerate(png_files):
writer.writerow([titles[i], descriptions[i], tags[i], prices[i], png_file])
# Write the flag file
with open(os.path.join(dir_path, 'script_has_run.txt'), 'w') as file:
file.write('This script has been run.')
print("Script has run successfully and the file 'script_has_run.txt' has been created to prevent it from running again.")
| MaorAviad1/auto-create-csv-from-files-dir | main.py | main.py | py | 1,935 | python | en | code | 0 | github-code | 36 |
44395709703 | import torch
import numpy as np
from book.pytorch.utils.helper import get_mnist_loader
import torch.nn.functional as F
from torch import nn
import matplotlib.pyplot as plt
class ConvDenoiser(nn.Module):
def __init__(self, encoding_dim):
super(ConvDenoiser, self).__init__()
# encoder layers
# conv layer (depth from 1 --> 32), 3x3 kernels
self.conv1 = nn.Conv2d(1, 32, 3, padding=1)
# conv layer (depth from 32 --> 16), 3x3 kernels
self.conv2 = nn.Conv2d(32, 16, 3, padding=1)
# conv layer (depth from 16 --> 8), 3x3 kernels
self.conv3 = nn.Conv2d(16, 8, 3, padding=1)
# pooling layer to reduce x-y dims by two; kernel and stride of 2
self.pool = nn.MaxPool2d(2, 2)
# decoder layers
# transpose layer, a kernel of 2 and a stride of 2 will increase the spatial dims by 2
self.t_conv1 = nn.ConvTranspose2d(8, 8, 3, stride=2) # kernel_size=3 to get to a 7x7 image output
# two more transpose layers with a kernel of 2
self.t_conv2 = nn.ConvTranspose2d(8, 16, 2, stride=2)
self.t_conv3 = nn.ConvTranspose2d(16, 32, 2, stride=2)
# one, final, normal conv layer to decrease the depth
self.conv_out = nn.Conv2d(32, 1, 3, padding=1)
def forward(self, x):
# encode
# add hidden layers with relu activation function and maxpooling after
x = F.relu(self.conv1(x))
x = self.pool(x)
# add second hidden layer
x = F.relu(self.conv2(x))
x = self.pool(x)
# add third hidden layer
x = F.relu(self.conv3(x))
x = self.pool(x)
# decode
# add transpose conv layers, with relu activation function
x = F.relu(self.t_conv1(x))
x = F.relu(self.t_conv2(x))
x = F.relu(self.t_conv3(x))
# transpose again, output should have a sigmoid applied
x = torch.sigmoid(self.conv_out(x))
return x
if __name__ == '__main__':
"""
used to denoise images quite successfully just by training the network on noisy images
"""
batch_size = 20
train_loader, test_loader, valid_loader = get_mnist_loader(batch_size=batch_size, is_norm=False)
model = ConvDenoiser(encoding_dim=32)
print(model)
"""comparing pixel values in input and output images, it's best to use a loss that meant for a regression task"""
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
noise_factor = 0.5 # for adding noise to images
n_epochs = 20
for epoch in range(1, n_epochs + 1):
train_loss = 0.0
for data in train_loader:
images, _ = data
# add random noise to the input images
noisy_imgs = images + noise_factor * torch.randn(*images.shape)
# clip the images to be between 0 and 1
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
optimizer.zero_grad()
outputs = model(noisy_imgs)
# the "target" is still the original, not-noisy images
loss = criterion(outputs, images)
loss.backward()
optimizer.step()
train_loss += loss.item() * images.size(0)
# print avg training statistics
train_loss = train_loss / len(train_loader)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(epoch, train_loss))
# check test
dataiter = iter(test_loader)
images, labels = dataiter.next()
# add noise to the test images
noisy_imgs = images + noise_factor * torch.randn(*images.shape)
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
output = model(noisy_imgs)
noisy_imgs = noisy_imgs.numpy() # prep images for display
# output is resized into a batch of images
output = output.view(batch_size, 1, 28, 28)
# use detach when it's an output that requires_grad
output = output.detach().numpy()
# plot the first ten input images and then reconstructed images
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(25, 4))
# input images on top row, reconstructions on bottom
for noisy_imgs, row in zip([noisy_imgs, output], axes):
for img, ax in zip(noisy_imgs, row):
ax.imshow(np.squeeze(img), cmap='gray')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
| jk983294/morph | book/pytorch/autoencoder/cnn_denoise.py | cnn_denoise.py | py | 4,398 | python | en | code | 0 | github-code | 36 |
74470182182 | import unittest
import mock
import openstack.common.context
from openstack.common.middleware import context
class ContextMiddlewareTest(unittest.TestCase):
def test_process_request(self):
req = mock.Mock()
app = mock.Mock()
options = mock.MagicMock()
ctx = mock.sentinel.context
with mock.patch.object(context.ContextMiddleware,
'make_context',
mock.Mock(return_value=ctx)):
ctx_middleware = context.ContextMiddleware(app, options)
ctx_middleware.process_request(req)
self.assertEqual(req.context, ctx)
def test_make_context(self):
app = mock.Mock()
options = mock.MagicMock()
with mock.patch.object(openstack.common.context.RequestContext,
'__init__',
mock.Mock(return_value=None)) as init:
ctx_middleware = context.ContextMiddleware(app, options)
ctx_middleware.make_context(mock.sentinel.arg)
init.assert_called_with(mock.sentinel.arg)
def test_make_explicit_context(self):
app = mock.Mock()
import_class = mock.Mock()
options = {'context_class': mock.sentinel.context_class}
with mock.patch('openstack.common.utils.import_class',
mock.Mock(return_value=import_class)):
ctx_middleware = context.ContextMiddleware(app, options)
ctx_middleware.make_context(mock.sentinel.arg)
import_class.assert_called_with(mock.sentinel.arg)
class FilterFactoryTest(unittest.TestCase):
def test_filter_factory(self):
global_conf = dict(sentinel=mock.sentinel.global_conf)
app = mock.sentinel.app
target = 'openstack.common.middleware.context.ContextMiddleware'
def check_ctx_middleware(arg_app, arg_conf):
self.assertEqual(app, arg_app)
self.assertEqual(global_conf['sentinel'], arg_conf['sentinel'])
return mock.DEFAULT
with mock.patch(target,
mock.Mock(return_value=mock.sentinel.ctx)) as mid:
mid.side_effect = check_ctx_middleware
filter = context.filter_factory(global_conf)
self.assertEqual(filter(app), mock.sentinel.ctx)
| emonty/openstack-common | tests/unit/middleware/test_context.py | test_context.py | py | 2,329 | python | en | code | 1 | github-code | 36 |
40851263636 | import json
def help():
helpprint = print("""
addcoins: plus Your coins.
minuscoins: minus Your coins.
help: shows this list.
coins: Shows how many coins do you have
""")
main()
def checkbalance():
with open('coins.json','r') as f:
get_balance = json.loads(f.read())
print(f"You Have {get_balance['coins']} coins")
main()
def addcoins():
addcoins = get_balance()
coinamt = int(input("Enter a amount to add coins: "))
addcoins['coins'] += coinamt
with open('coins.json', 'w') as f:
json.dump(addcoins, f)
print("Add sucessfull")
main()
def minuscoins():
minuscoins = get_balance()
coinamt = int(input("Enter a amount to minus coins: "))
minuscoins['coins'] -= coinamt
with open('coins.json', 'w') as f:
json.dump(minuscoins, f)
print("Minus sucessfull")
main()
def get_balance():
with open('coins.json','r') as f:
users = json.load(f)
return users
def main():
mainFunction = input("Input a Command(Type help for help): ")
if mainFunction == "help":
help()
elif mainFunction == "coins":
checkbalance()
elif mainFunction == "addcoins":
addcoins()
elif mainFunction == "minuscoins":
minuscoins()
else:
print("Command Not Found")
main()
main()
| hahayeslol12/CoinScript | main.py | main.py | py | 1,455 | python | en | code | 0 | github-code | 36 |
30494220256 | import pandas as pd
import numpy as np
import tensorflow as tf
import time
import os
import csv
from sklearn.preprocessing import MinMaxScaler
from keras.layers import Input
from keras.layers import Dense, LSTM, Dropout, Embedding, Input, Activation, Bidirectional, TimeDistributed, RepeatVector, Flatten
from keras.optimizers import Adam
from sklearn.metrics import mean_squared_error
from math import sqrt
from keras.models import Sequential, Model
learning_rate=0.001
look_back=20
batch_size=5
hidden_nodes = 256
epochs = 100
adam = Adam(lr=learning_rate)
def create_dataset_input(dataset, look_back):
dataX = []
for i in range(len(dataset)-look_back):
dataX.append(dataset[i:(i+look_back)])
return np.array(dataX)
def mode_decide(input_mode):
train_mode=input_mode.split('-',1)[0]
val_mode=input_mode.split('-',1)[1]
error1=(train_mode!='a')and(train_mode!='b')and(train_mode!='ab')
error2=(train_mode!='a')and(train_mode!='b')and(train_mode!='ab')
if error1 or error2:
raise ValueError # Wrong input mode type
mode={'train_set':train_mode,'val_set':val_mode}
return mode
def load_data(mode):
filename=[mode['train_set']+'_train_set'+'.csv']
filename="".join(filename)
train_data=pd.read_csv(filename)
filename=[mode['val_set']+'_val_set'+'.csv']
filename="".join(filename)
val_data=pd.read_csv(filename)
return train_data,val_data
def data_prepocess(train_data,val_data,batch_size=batch_size,look_back=look_back):
#train_set 设置
train_raw_x=train_data['Loc_x']
train_raw_x=np.array(train_raw_x).astype(float).reshape(-1,1)
scaler_loc_x=MinMaxScaler()
train_loc_x=scaler_loc_x.fit_transform(train_raw_x)
train_raw_y=train_data['Loc_y']
train_raw_y=np.array(train_raw_y).astype(float).reshape(-1,1)
scaler_loc_y=MinMaxScaler()
train_loc_y=scaler_loc_y.fit_transform(train_raw_y)
train_Mag_x=train_data['GeoX']
train_Mag_x=np.array(train_Mag_x).astype(float).reshape(-1,1)
scaler_mag_x=MinMaxScaler()
Mag_x=scaler_mag_x.fit_transform(train_Mag_x)
train_Mag_y=train_data['GeoY']
train_Mag_y=np.array(train_Mag_y).astype(float).reshape(-1,1)
scaler_mag_y=MinMaxScaler()
Mag_y=scaler_mag_y.fit_transform(train_Mag_y)
train_Mag_z=train_data['GeoZ']
train_Mag_z=np.array(train_Mag_z).astype(float).reshape(-1,1)
scaler_mag_z=MinMaxScaler()
Mag_z=scaler_mag_z.fit_transform(train_Mag_z)
train_size=int(len(train_loc_x))
#val_set 设置
val_raw_x=val_data['Loc_x']
val_raw_x=np.array(val_raw_x).astype(float).reshape(-1,1)
v_scaler_loc_x=MinMaxScaler()
val_loc_x=v_scaler_loc_x.fit_transform(val_raw_x)
val_raw_y=val_data['Loc_y']
val_raw_y=np.array(val_raw_y).astype(float).reshape(-1,1)
v_scaler_loc_y=MinMaxScaler()
val_loc_y=v_scaler_loc_y.fit_transform(val_raw_y)
val_Mag_x=val_data['GeoX']
val_Mag_x=np.array(val_Mag_x).astype(float).reshape(-1,1)
v_scaler_mag_x=MinMaxScaler()
val_Mag_x=v_scaler_mag_x.fit_transform(val_Mag_x)
val_Mag_y=val_data['GeoY']
val_Mag_y=np.array(val_Mag_y).astype(float).reshape(-1,1)
v_scaler_mag_y=MinMaxScaler()
val_Mag_y=v_scaler_mag_y.fit_transform(val_Mag_y)
val_Mag_z=val_data['GeoZ']
val_Mag_z=np.array(val_Mag_z).astype(float).reshape(-1,1)
v_scaler_mag_z=MinMaxScaler()
val_Mag_z=v_scaler_mag_z.fit_transform(val_Mag_z)
val_size=int(len(val_loc_x))
train_mag_x = create_dataset_input(train_Mag_x, look_back = look_back)
train_mag_y = create_dataset_input(train_Mag_y, look_back = look_back)
train_mag_z = create_dataset_input(train_Mag_z, look_back = look_back)
test_mag_x = create_dataset_input(val_Mag_x, look_back = look_back)
test_mag_y = create_dataset_input(val_Mag_y, look_back = look_back)
test_mag_z = create_dataset_input(val_Mag_z, look_back = look_back)
#print('trian_mag_x:',train_mag_x)
train_loc_x = create_dataset_input(train_loc_x, look_back = look_back)
train_loc_y = create_dataset_input(train_loc_y, look_back = look_back)
test_loc_x = create_dataset_input(val_loc_x, look_back = look_back)
test_loc_y = create_dataset_input(val_loc_y, look_back = look_back)
trainX = np.concatenate((train_mag_x,train_mag_y,train_mag_z),axis = 2)
testX = np.concatenate((test_mag_x,test_mag_y,test_mag_z),axis = 2)
#print('train_loc_x.shape:',train_loc_x.shape)
trainY = np.concatenate((train_loc_x,train_loc_y),axis = 2)
testY = np.concatenate((test_loc_x,test_loc_y),axis = 2)
trainY = np.reshape(trainY, (len(trainY),look_back,2))
#print('trianY:',trainY.shape)
lengthTrain = len(trainX)
lengthTest = len(testX)
while(lengthTrain % batch_size != 0):
lengthTrain -= 1
while(lengthTest % batch_size != 0):
lengthTest -= 1
return trainX[0:lengthTrain],trainY[0:lengthTrain],testX[0:lengthTest],testY[0:lengthTest]
def model_train(train_x, train_y, test_x, test_y,file_structure,file_acc2loss):
model=model_build()
for i in range(epochs):
history = model.fit(train_x, train_y, batch_size=batch_size, epochs = 1, verbose=1,shuffle = False) #validation_split=0.1, validation_data=(test_x, test_y)
# # need to reset state for every epoch
model.reset_states()
# #print('hidden_state:',hidden_state)
# # list all data in history
# '''
# print('history.keys()',hist.history.keys())
# # summarize history for accuracy
# plt.plot(hist.history['acc'])
# plt.plot(hist.history['val_acc'])
# plt.title('model accuracy')
# plt.ylabel('accuracy')
# plt.xlabel('epoch')
# plt.legend(['train', 'test'], loc='upper left')
# plt.show()
# '''
print('Real Epoches:',i+1)
with open(file_acc2loss,'a', newline='') as csvfile:
if not os.path.getsize(file_acc2loss): #file is empty
spamwriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_NONE)
spamwriter.writerow(['epochs','loss','acc'])#, 'val_loss','val_acc'
data = ([
i,history.history['loss'][0],history.history['acc'][0]#, history.history['val_loss'][0], history.history['val_acc'][0]
])
spamwriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_NONE)
spamwriter.writerow(data)
return model
def model_build(hidden_nodes=hidden_nodes,batch_size=batch_size , time_steps = look_back, feature_size = 3):
inputs1 = Input(batch_shape = (batch_size,look_back,feature_size))
lstm1 = LSTM(hidden_nodes, stateful = True, return_sequences=True, return_state=True,dropout=0.2)(inputs1)
lstm1 = LSTM(hidden_nodes,return_sequences=True,dropout=0.2)(lstm1)
lstm1 = TimeDistributed(Dense((2)))(lstm1)
model = Model(input = inputs1, outputs = lstm1)
print(model.layers)
model.compile(loss='mean_squared_error', optimizer=adam,metrics=['acc'])
model.summary()
return model
if __name__=='__main__':
input_mode=[]
change=False
if change:
input_mode=input('Please inport train and val mode in _-_(e.g:a-b)\n')
if input_mode=='all' :
input_mode=['a-a','a-b','a-ab','b-a','b-b','b-ab','ab-a','ab-b','ab-ab']
else:
input_mode=['a-b']
for t_v in input_mode:
mode=mode_decide(t_v)
file_structure = [mode['train_set']+'-'+mode['val_set']+'_'+'model_ts=30_256_5_100.png']
file_acc2loss = [mode['train_set']+'-'+mode['val_set']+'_'+'log_ts=30_256_5_100.csv']
file_structure="".join(file_structure)
file_acc2loss = "".join(file_acc2loss)
train_data,val_data=load_data(mode)
train_x, train_y, test_x, test_y=data_prepocess(train_data,val_data)
model=model_train(train_x, train_y, test_x, test_y,file_structure,file_acc2loss)
del model
| MeichenBu/2018-2019-SURF | CNN+LSTM/LSTM_old.py | LSTM_old.py | py | 7,596 | python | en | code | 0 | github-code | 36 |
30613126170 | # 이것이 코딩테스트다
# p.180
n = int(input())
nameScoreList = []
for i in range(0, n):
nameScore = input().split()
nameScoreList.append((nameScore[0], int(nameScore[1])))
nameScoreList = sorted(nameScoreList, key = lambda x: x[1])
for name in nameScoreList:
print(name[0], end = ' ')
| KodaHye/Algorithm | This is CodingTest/practice/ascScore.py | ascScore.py | py | 310 | python | en | code | 0 | github-code | 36 |
27467133509 | #verificar se tem caracteres duplicados em string
def tem_duplicado(palavra):
vistos=[]
for c in palavra:
if c in vistos:
return True
vistos.append(c)
return False
if tem_duplicado('abacaxi'):
print('tem duplicados')
else:
print("nao tem duplicados") | GiulianeEC/praticas_python | modulo02/duplicidade.py | duplicidade.py | py | 285 | python | pt | code | 0 | github-code | 36 |
16098074633 | from selenium import webdriver
from webdriver_manager.firefox import GeckoDriverManager # The Webdriver
import pyautogui # To Click
import time # To wait and all
custom_site = input("Enter The Website to Download Video...") # Take the youtube video link as the input
driver = webdriver.Firefox(executable_path=GeckoDriverManager().install()) # installs the webdriver.
cus = custom_site.find('y') # Finds 'Y' in the Link.
suc = custom_site[0:cus] # Slices upto but not includes y
final_website = suc + 'ss' + custom_site[cus:] # WIth the help of string concatenation it adds ss to the
# link before the letter 'y'
web = driver.get(final_website) # Uses the Selenium Web driver to go to the website.
driver.implicitly_wait(5) # Waits For 5 Secs
driver.find_element_by_xpath(r'/html/body/div[1]/div[1]/div[2]/div[4]/div/div[1]/div[2]/div[2]/div[1]/a').click()
# Finds the download button by x path
# Often after clicking the download button the new tab gets open automatically.
# If a new tab gets open it closes that tab.
# You Can Run the below program in your terminal to get the live position of your mouse
# import pyautogui
# pyautogui.displayMousePosition()
def new_tab_cut():
x = 504 # You can change the x cordinate to your mouse position.
y = 48 # # You can change the y cordinate to your mouse position.
pyautogui.moveTo(x, y, duration=1.2)
pyautogui.click(x, y)
# The Following block of code is used to click on the download button in firefix browser.
def save_file():
x1 = 504
y1 = 471
pyautogui.moveTo(x1, y1, duration=1)
pyautogui.click(x1, y1)
time.sleep(1)
x2 = 916
y2 = 575
pyautogui.click(x2, y2)
time.sleep(1)
new_tab_cut()
save_file()
time.sleep(100)
driver.quit()
print("Downloaded")
# executable_path=GeckoDriverManager().install() | JhaRishikesh/Projects | YouTube Downloader.py | YouTube Downloader.py | py | 1,902 | python | en | code | 0 | github-code | 36 |
10315136743 | from __future__ import print_function
import sys
import mdtraj as md
from simtk.openmm import app
import simtk.openmm as mm
from simtk import unit
import argparse
class Tee(object):
def __init__(self, name, mode):
self.file = open(name, mode)
self.stdout = sys.stdout
def write(self, data):
self.file.write(data)
self.stdout.write(data)
if __name__ == '__main__':
if len(sys.argv) != 3:
print('usage %s <trajectory index (for output file)> <model index of starting conformation>')
exit(1)
pdb = md.load('100-fs-peptide-400K.pdb')
forcefield = app.ForceField('amber99sbildn.xml', 'amber99_obc.xml')
system = forcefield.createSystem(pdb.topology.to_openmm(), nonbondedMethod=app.CutoffNonPeriodic,
nonbondedCutoff=1.0*unit.nanometers, constraints=app.HBonds)
integrator = mm.LangevinIntegrator(300*unit.kelvin, 91.0/unit.picoseconds,
2.0*unit.femtoseconds)
integrator.setConstraintTolerance(0.00001)
platform = mm.Platform.getPlatformByName('CPU')
#properties = {'CudaPrecision': 'mixed', 'CudaDeviceIndex': sys.argv[1]}
simulation = app.Simulation(pdb.topology.to_openmm(), system, integrator, platform)
simulation.context.setPositions(pdb.xyz[int(sys.argv[2])])
simulation.context.setVelocitiesToTemperature(300*unit.kelvin)
nsteps = int((1*unit.nanoseconds) / (2*unit.femtoseconds))
interval = int((10*unit.picoseconds) / (2*unit.femtoseconds))
simulation.reporters.append(app.StateDataReporter(open('trajectory-%s.log' % sys.argv[1], 'w', 0),
interval, step=True, time=True, progress=True,
potentialEnergy=True, temperature=True, remainingTime=True,
speed=True, totalSteps=nsteps, separator='\t'))
# equilibrate
simulation.step(int(100*unit.picoseconds / (2*unit.femtoseconds)))
# now add the trajectory reporter.
simulation.reporters.append(app.DCDReporter('trajectory-%s.dcd' % sys.argv[1], interval))
simulation.step(nsteps)
| vivek-bala/adaptive-msm-openmm | entk2/fs-peptide/simulate-fs.py | simulate-fs.py | py | 2,035 | python | en | code | 0 | github-code | 36 |
10525922454 | import copy
import torch
import torch.nn as nn
from .backbone import *
import numpy as np
import torch.nn.functional as F
import thop
def ConvBNReLU(in_chann, out_chann, ks, st, p=1):
return nn.Sequential(
nn.Conv2d(in_chann, out_chann, kernel_size=ks, stride=st, padding=p, bias=False),
nn.BatchNorm2d(out_chann),
nn.ReLU(inplace=True)
)
class Aggregation(nn.Module):
def __init__(self, in_chann, out_chann, asy_ks=5):
super(Aggregation, self).__init__()
self.conv = ConvBNReLU(in_chann, out_chann, 3, 1, 1)
self.left_asymmetric = nn.Sequential(
nn.Conv2d(out_chann, out_chann, kernel_size=(1, asy_ks), stride=1, \
padding=(0, asy_ks//2), groups=out_chann, bias=True),
nn.Conv2d(out_chann, out_chann, kernel_size=(asy_ks, 1), stride=1, \
padding=(asy_ks//2, 0), groups=out_chann, bias=True),
)
self.right_asymmetric = nn.Sequential(
nn.Conv2d(out_chann, out_chann, kernel_size=(asy_ks, 1), stride=1, \
padding=(asy_ks//2, 0), groups=out_chann, bias=True),
nn.Conv2d(out_chann, out_chann, kernel_size=(1, asy_ks), stride=1, \
padding=(0, asy_ks//2), groups=out_chann, bias=True),
)
self.bn_relu = nn.Sequential(
nn.BatchNorm2d(out_chann),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.conv(x)
left = self.left_asymmetric(x)
right = self.right_asymmetric(x)
out = left + right
out = self.bn_relu(out)
return out
class DeepLabHead(nn.Module):
def __init__(self, num_classes, last_channels, mid_channels, low_channels):
super(DeepLabHead, self).__init__()
self.low_process = ConvBNReLU(low_channels, 48, 1, 1, 0)
self.mid_process = ConvBNReLU(mid_channels, 48, 1, 1, 0)
self.mid_project = ConvBNReLU(304, 256, 3, 1, 1)
self.classifier = nn.Sequential(
ConvBNReLU(304, 256, 3, 1, 1),
nn.Dropout(0.1),
nn.Conv2d(256, num_classes,
kernel_size=1,
stride=1,
padding=0,
bias=True)
)
def forward(self, last_feat, mid_feat, low_feat):
low_feat = self.low_process(low_feat)
mid_feat = self.mid_process(mid_feat)
last_feat = F.interpolate(last_feat, size=mid_feat.size()[2:], mode="bilinear", align_corners=True)
mid_feat = torch.cat([last_feat, mid_feat], dim=1)
mid_feat = self.mid_project(mid_feat)
mid_feat = F.interpolate(mid_feat, size=low_feat.size()[2:], mode="bilinear", align_corners=True)
out_feat = torch.cat([mid_feat, low_feat], dim=1)
out = self.classifier(out_feat)
return out
class CPNet(nn.Module):
def __init__(self, num_classes, input_channels=512,
prior_channels=512, prior_size=(40, 40), backend="resnet34", pretrained=True):
super(CPNet, self).__init__()
self.prior_size = np.prod(prior_size)
self.num_classes = num_classes
self.prior_channels = prior_channels
self.backbone = eval(backend)(pretrained=pretrained) # backbone
self.aggregation = Aggregation(input_channels, prior_channels, 11) # 特征聚合,丰富特征的上下文信息
self.prior_conv = nn.Sequential(
nn.Conv2d(prior_channels, self.prior_size, kernel_size=1, stride=1, bias=True),
# nn.BatchNorm2d(self.prior_size)
)
self.intra_conv = ConvBNReLU(prior_channels, prior_channels, 1, 1, 0)
self.inter_conv = ConvBNReLU(prior_channels, prior_channels, 1, 1, 0)
self.post_process = nn.Sequential(
ConvBNReLU(input_channels + prior_channels*2, 256, 1, 1, 0),
ConvBNReLU(256, 256, 3, 1, 1) # prior_channels
)
# without deeplab
self.head = nn.Sequential(
ConvBNReLU(256, 256, 3, 1, 1), # prior_channels
nn.Dropout(0.1),
nn.Conv2d(256, num_classes, 1, 1, bias=True)
)
# with deeplab
'''self.deeplab_head = DeepLabHead(num_classes, 256, 128, 64)'''
def _reinit(self, input_size):
input_size = input_size/16
self.prior_size = int(np.prod(input_size))
self.prior_conv = nn.Sequential(
nn.Conv2d(self.prior_channels, self.prior_size, kernel_size=1, stride=1, bias=True),
)
def forward(self, x):
feat, feat_2, feat_1 = self.backbone(x)
h, w = feat.size()[2:]
value = self.aggregation(feat)
context_proir_map = self.prior_conv(value)
context_proir_map = context_proir_map.view(context_proir_map.size()[0], \
-1, self.prior_size).permute(0, 2, 1)
intra_context_proir_map = torch.sigmoid(context_proir_map) # [bs, 40*40, 40*40], 类内
inter_context_prior_map = 1 - context_proir_map # 类间
value = value.view(value.size()[0], value.size()[1], -1).permute(0, 2, 1).contiguous() # [bs, 512, 40*40]==>[bs, 40*40, 512]
intra_context_proir_map = F.softmax(intra_context_proir_map, dim=-1)
intra_context = torch.matmul(intra_context_proir_map, value) # [bs, 40*40, 512] # 利用类内全局特征更新每一个特征
# intra_context = intra_context.div(self.prior_size)
intra_context = intra_context.permute(0, 2, 1).contiguous()
intra_context = intra_context.view(intra_context.size(0), self.prior_channels, h, w)
intra_context = self.intra_conv(intra_context)
inter_context_prior_map = F.softmax(inter_context_prior_map, dim=-1)
inter_context = torch.matmul(inter_context_prior_map, value)
# inter_context = inter_context.div(self.prior_size)
inter_context = inter_context.permute(0, 2, 1).contiguous()
inter_context = inter_context.view(inter_context.size(0), self.prior_channels, h, w)
inter_context = self.inter_conv(inter_context)
out = torch.cat([feat, intra_context, inter_context], dim=1)
out = self.post_process(out)
# without deeplab
seg_out = self.head(out)
seg_out = F.interpolate(seg_out, size=(x.size()[2], x.size()[3]), mode="bilinear", align_corners=True)
# with deeplab
'''seg_out = self.deeplab_head(out, feat_2, feat_1)
seg_out = F.interpolate(seg_out, size=x.size()[2:], mode="bilinear", align_corners=True)'''
if self.training:
return seg_out, intra_context_proir_map
return seg_out
from utils.utils import get_model_infos
@get_model_infos
def cpnet(num_classes, backend="resnet34", pretrained=False):
model = CPNet(num_classes, backend=backend, pretrained=pretrained)
return model
if __name__ == "__main__":
model = CPNet(20)
inputs = torch.randn(1, 3, 640, 640)
seg_out, context_map = model(inputs)
print("segout: ", seg_out.size(), ' context_map siz: ', context_map.size())
# labels = torch.randint(0, 20, (1, 640, 640)).long()
# model._get_loss(context_map, labels, [80, 80])
'''model = cpnet_resnet34(4, pretrained=False)
feat = torch.randn(1, 3, 640, 640)
out, context_proir_map = model(feat)
print(out.size(), " context_proir_map size: ", context_proir_map.size())''' | yadongJiang/semantic-segmentation-projects | libs/cpnet/model.py | model.py | py | 7,478 | python | en | code | 5 | github-code | 36 |
29654851332 | """ A python program that scrapes news articles, classifies their sentiment, and creates a time series of sentiment over time """
import os
import openai
# Set OpenAI API key from environment
openai.api_key = os.environ.get('OPENAI_API_KEY', '')
def classify(query, search_model="ada", model="davinci"):
openai.Classification.create(
search_model=search_model,
model=model,
examples=[
[""],
[""],
[""],
[""],
],
query=query,
labels=[
"Very Positive",
"Mostly Positive",
"Neutral",
"Mostly Negative"
"Very Negative",
]
)
if __name__ == "__main__":
print(openai.Model.list()) | candiceevemiller/company-sentiment-analysis | main.py | main.py | py | 752 | python | en | code | 0 | github-code | 36 |
40272854300 |
class Board:
def __init__(self, init_data):
self.data = [[-1,-1,-1,-1,-1],[-1,-1,-1,-1,-1],[-1,-1,-1,-1,-1],[-1,-1,-1,-1,-1],[-1,-1,-1,-1,-1]]
for i in range(0,5):
for j in range(0,5):
self.data[i][j] = int(init_data[i][(j*3):(j*3)+2].lstrip())
def __str__(self):
s = ""
for i in range(0,len(self.data)):
for j in range(0,len(self.data[i])):
s += str(self.data[i][j]).rjust(3)
s += "\n"
return s
def play(self, num):
# If we have the selected number, mark it as played
check = False
won = False
for i in range(0,len(self.data)):
for j in range(0,len(self.data[i])):
if self.data[i][j] == num:
self.data[i][j] = -1
check = True # Don't exit loop incase the number appears more than once
if check: # Have we won?
# Check rows
for i in range(0,len(self.data)):
row = 0
for j in range(0,len(self.data[i])):
row += self.data[i][j]
if row == -5:
return True
# Check columns
for i in range(0,5):
col = 0
for j in range(0,5):
col += self.data[j][i]
if col == -5:
return True
return won
def score(self):
s = 0
for i in range(0,len(self.data)):
for j in range(0,len(self.data[i])):
if self.data[i][j] >= 0:
s += self.data[i][j]
return s
def dump(self):
print(self.data)
if __name__=="__main__":
with open("04.txt", "r") as f:
data = f.read().splitlines()
drawn = data[0]
drawn = [int(n) for n in drawn.split(",")]
boards = []
for i in range(2, len(data),6):
boards.append(Board(data[i:i+5]))
print("Boards loaded...")
for i in range(len(boards)):
boards[i].dump()
# print(boards[i])
n = 0
winner = -1
while winner == -1:
num = drawn[n]
print("Playing",num)
for i in range(0, len(boards)):
won = boards[i].play(num)
if won:
winner = i
n += 1
print("Winning board")
boards[winner].dump()
s = boards[winner].score()
print(s, num) # 929 80
print(s*num) # 74320
print("Part 2...")
n = 0
lastwon = False
while not lastwon:
num = drawn[n]
print("Playing",num)
i = 0
while i < len(boards):
won = boards[i].play(num)
if won:
if len(boards) == 1:
lastwon = True
else:
boards.pop(i)
else:
i += 1
n += 1
print("Losing board")
boards[0].dump()
s = boards[0].score()
print(s, num)
print(s*num)
| paulbaumgarten/advent-of-code | 2021/day04a.py | day04a.py | py | 3,017 | python | en | code | 4 | github-code | 36 |
323540718 | # -*- coding: utf-8 -*-
"""Console script for mcc."""
import os
import click
from pydub import AudioSegment
from pydub.silence import split_on_silence
@click.command()
@click.argument('sound_path')
@click.option('--mls', default=500, help='沉默的时长,毫秒')
@click.option('--st', default=-30, help='无声的界限,如果比这个数值更小则认为是无声')
@click.option('--name', default=0, help='分割出来文件的名字,默认从0开始')
def main(sound_path, mls, st, name):
"""切割一段带有停顿的空白语音"""
sound = AudioSegment.from_wav(sound_path)
chunks = split_on_silence(sound,
# 沉默的时长, 毫秒
min_silence_len=mls,
# 如果比silence_thresh这个数值更安静则认为是无声
silence_thresh=st
)
print(f'碎片数量: {len(chunks)}')
# 创建文件夹
dirname = f'{name}-{name + len(chunks) - 1}'
if not os.path.exists(dirname):
os.makedirs(dirname)
for i, chunk in enumerate(chunks):
# 导出文件
chunk.export(f'{dirname}/{name + i}.wav', format='wav')
| nanke-ym/mcc | mcc/split.py | split.py | py | 1,227 | python | en | code | 0 | github-code | 36 |
7762827089 | # =============================================================================
# Smallest multiple
# Problem 5
# 2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.
# What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?
#
# =============================================================================
from math import factorial
def checkDiv(number):
for i in range(20, 10, -1):
if (number % i != 0):
return False
return True
maximum = factorial(20)
counter = 232792540
while counter <= maximum:
if checkDiv(counter):
print ("Your number is: ", counter)
break
else:
counter += 1 | piotrpatrzylas/projecteuler.net | Problem 5 python.py | Problem 5 python.py | py | 743 | python | en | code | 0 | github-code | 36 |
41961905364 | # importing module
import re
# taking input from user
n = int(input())
# iterating through the credit cards
for t in range(n):
#taking the credit card number from user
credit = input().strip()
credit_removed_hiphen = credit.replace('-','')
# valid is true in the beggining
valid = True
# using regual expressions
length_16 = bool(re.match(r'^[4-6]\d{15}$',credit))
length_19 = bool(re.match(r'^[4-6]\d{3}-\d{4}-\d{4}-\d{4}$',credit))
consecutive = bool(re.findall(r'(?=(\d)\1\1\1)',credit_removed_hiphen))
# checking if the above regural expressions are true
if length_16 == True or length_19 == True:
if consecutive == True:
valid=False
else:
valid = False
if valid == True:
print('Valid')
else:
print('Invalid') | achyuth9490/Python | credit_card.py | credit_card.py | py | 876 | python | en | code | 0 | github-code | 36 |
29026071968 | #!/usr/bin/env python3
animals = ['cat', 'dog']
while (len(animals)) != 0:
print(animals[(len(animals)) - 1])
animals.pop()
else:
print('End of the stock')
for animal in animals:
print(animal)
else:
print('Results end here') | Himesh-Codes/Python | Base/loop.py | loop.py | py | 249 | python | en | code | 0 | github-code | 36 |
69982633063 | import torch
import torch.nn as nn
from torch.nn import functional as F
import math
from typing import Tuple
device = "cuda" if torch.cuda.is_available() else "cpu"
class Embedding(nn.Module):
def __init__(self,
config,
vocab_size):
"""
Embedding generates learnable representation of an input sequence which encodes
contextual, semantic meaning for each word.
Params:
d_model(int): specifies the embedding dimension for each token/word
vocab_size(int): number of embeddings that would be needed. # of unique words
max_seq_len(int): the maximum sequence length of an input sequence. Used for generation positional encoding
dropout(float): probability of dropout applied on the final embedding output
"""
super().__init__()
self.vocab_size = vocab_size
self.token_embedding_table = nn.Embedding(num_embeddings=vocab_size,
embedding_dim=config["d_model"])
self.position_embedding_table = nn.Embedding(num_embeddings=config["context_length"],
embedding_dim=config["d_model"])
self.dropout = nn.Dropout(p=config["dropout"])
def forward(self, x: torch.Tensor) -> torch.Tensor:
# x => [B, S]
B, S = x.shape
token_emb = self.token_embedding_table(x) # [B, S, D]
pos_emb = self.position_embedding_table(torch.arange(S, device=device)).unsqueeze(0) # [1, S, D]
out = self.dropout(token_emb+pos_emb)
return self.dropout(out)
class AttentionHead(nn.Module):
def __init__(self,
config) -> None:
super().__init__()
self.d_model = config["d_model"]
self.head_dim = config["head_dim"]
self.query = nn.Linear(self.d_model, self.head_dim)
self.key = nn.Linear(self.d_model, self.head_dim)
self.value = nn.Linear(self.d_model, self.head_dim)
self.dropout = nn.Dropout(p=config["dropout"])
def forward(self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
mask=None) -> torch.Tensor:
# query => [B, Q, D]
# key => [B, K, D]
# value => [B, K, D]
q = self.query(query) # B, Q, HEAD_DIM
k = self.key(key) # B, K, HEAD_DIM
v = self.value(value) # B, K, HEAD_DIM
weights = q @ k.transpose(1, 2) # B, Q, K
if mask is not None:
weights = weights.masked_fill(mask==0, value=float("-inf"))
weights = F.softmax(weights/math.sqrt(self.head_dim), dim=-1)
out = weights @ v # [B, Q, K] x [B, K, HEAD_DIM] => [B, Q, HEAD_DIM]
return self.dropout(out)
class MultiHeadAttention(nn.Module):
def __init__(self,
config) -> None:
super().__init__()
self.sa_heads = nn.ModuleList([AttentionHead(config) for _ in range(config["n_heads"])])
self.proj = nn.Linear(config["d_model"], config["d_model"])
self.dropout = nn.Dropout(p=config["dropout"])
def forward(self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
mask=None) -> torch.Tensor:
out = torch.cat([h(query, key, value, mask) for h in self.sa_heads], dim=-1)
out = self.proj(out)
return self.dropout(out)
class FeedForward(nn.Module):
def __init__(self,
config):
super().__init__()
d_model = config["d_model"]
self.net = nn.Sequential(
nn.Linear(d_model, d_model*4),
nn.ReLU(),
nn.Linear(d_model*4, d_model),
nn.Dropout(p=config["dropout"])
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.net(x)
return x
class GPTDecoderBlock(nn.Module):
def __init__(self, config) -> None:
super().__init__()
self.mha = MultiHeadAttention(config)
self.ff = FeedForward(config)
self.ln_1 = nn.LayerNorm(normalized_shape=config["d_model"])
self.ln_2 = nn.LayerNorm(normalized_shape=config["d_model"])
def forward(self, x: torch.Tensor, mask=None) -> torch.Tensor:
x = x + self.mha(self.ln_1(x), self.ln_1(x), self.ln_1(x), mask)
x = x + self.ff(self.ln_2(x))
return x
class GPTDecoder(nn.Module):
def __init__(self, config) -> None:
super().__init__()
self.blocks = nn.ModuleList([GPTDecoderBlock(config) for _ in range(config["n_decoders"])])
def forward(self, x: torch.Tensor, mask=None) -> torch.Tensor:
for block in self.blocks:
x = block(x, mask)
return x
class PoemGPT(nn.Module):
def __init__(self, config, vocab_size) -> None:
super().__init__()
self.context_length = config["context_length"]
self.embedding = Embedding(config, vocab_size)
self.gpt = GPTDecoder(config)
self.lm_head = nn.Linear(config["d_model"], vocab_size)
def forward(self,
x: torch.Tensor,
targets: torch.Tensor = None) -> Tuple[torch.Tensor, torch.Tensor]:
B, S = x.shape
# x -> [B, S], targets -> [B, S]
x = self.embedding(x) # B, S, D_MODEL
mask = create_causal_mask(S)
x = self.gpt(x, mask) # B, S, D_MODEL
logits = self.lm_head(x) # B, S, VOCAB_SIZE
if targets is None:
loss = None
else:
logits = logits.view(B*S, -1)
targets = targets.view(-1)
loss = F.cross_entropy(logits, targets)
return logits, loss
def generate(self, x:torch.Tensor=None, max_new_tokens: int=500) -> torch.Tensor:
if x is None:
x = torch.zeros((1, 1), dtype=torch.long, device=device) # B, S
for _ in range(max_new_tokens):
preds, _ = self(x[:, -self.context_length:])# B, S, VOCAB_SIZE
preds = preds[:, -1, :] # B, VOCAB_SIZE
probs = F.softmax(preds, dim=-1)
x_next = torch.multinomial(input=probs, num_samples=1) # B, 1
x = torch.cat((x, x_next), dim=1) # B, S+1
return x
def create_causal_mask(sz):
mask = torch.ones((sz, sz), device=device)
mask = torch.tril(mask)
return mask | SkAndMl/MusGPT | model.py | model.py | py | 6,552 | python | en | code | 3 | github-code | 36 |
43304002854 | import sys, os
import os.path
import shutil
from rpython.translator.translator import TranslationContext
from rpython.translator.tool.taskengine import SimpleTaskEngine
from rpython.translator.goal import query
from rpython.translator.goal.timing import Timer
from rpython.annotator.listdef import s_list_of_strings
from rpython.annotator import policy as annpolicy
from rpython.tool.udir import udir
from rpython.rlib.debug import debug_start, debug_print, debug_stop
from rpython.rlib.entrypoint import secondary_entrypoints,\
annotated_jit_entrypoints
import py
from rpython.tool.ansi_print import AnsiLogger
log = AnsiLogger("translation")
def taskdef(deps, title, new_state=None, expected_states=[],
idemp=False, earlycheck=None):
def decorator(taskfunc):
taskfunc.task_deps = deps
taskfunc.task_title = title
taskfunc.task_newstate = None
taskfunc.task_expected_states = expected_states
taskfunc.task_idempotent = idemp
taskfunc.task_earlycheck = earlycheck
return taskfunc
return decorator
# TODO:
# sanity-checks using states
# set of translation steps to profile
PROFILE = set([])
class Instrument(Exception):
pass
class ProfInstrument(object):
name = "profinstrument"
def __init__(self, datafile, compiler):
self.datafile = datafile
self.compiler = compiler
def first(self):
return self.compiler._build()
def probe(self, exe, args):
env = os.environ.copy()
env['PYPY_INSTRUMENT_COUNTERS'] = str(self.datafile)
self.compiler.platform.execute(exe, args, env=env)
def after(self):
# xxx
os._exit(0)
class TranslationDriver(SimpleTaskEngine):
_backend_extra_options = {}
def __init__(self, setopts=None, default_goal=None,
disable=[],
exe_name=None, extmod_name=None,
config=None, overrides=None):
from rpython.config import translationoption
self.timer = Timer()
SimpleTaskEngine.__init__(self)
self.log = log
if config is None:
config = translationoption.get_combined_translation_config(translating=True)
# XXX patch global variable with translation config
translationoption._GLOBAL_TRANSLATIONCONFIG = config
self.config = config
if overrides is not None:
self.config.override(overrides)
if setopts is not None:
self.config.set(**setopts)
self.exe_name = exe_name
self.extmod_name = extmod_name
self.done = {}
self.disable(disable)
if default_goal:
default_goal, = self.backend_select_goals([default_goal])
if default_goal in self._maybe_skip():
default_goal = None
self.default_goal = default_goal
self.extra_goals = []
self.exposed = []
# expose tasks
def expose_task(task, backend_goal=None):
if backend_goal is None:
backend_goal = task
def proc():
return self.proceed(backend_goal)
self.exposed.append(task)
setattr(self, task, proc)
backend, ts = self.get_backend_and_type_system()
for task in self.tasks:
explicit_task = task
if task == 'annotate':
expose_task(task)
else:
task, postfix = task.split('_')
if task in ('rtype', 'backendopt', 'llinterpret',
'pyjitpl'):
if ts:
if ts == postfix:
expose_task(task, explicit_task)
else:
expose_task(explicit_task)
elif task in ('source', 'compile', 'run'):
if backend:
if backend == postfix:
expose_task(task, explicit_task)
elif ts:
if ts == 'lltype':
expose_task(explicit_task)
else:
expose_task(explicit_task)
def set_extra_goals(self, goals):
self.extra_goals = goals
def set_backend_extra_options(self, extra_options):
self._backend_extra_options = extra_options
def get_info(self): # XXX more?
d = {'backend': self.config.translation.backend}
return d
def get_backend_and_type_system(self):
type_system = self.config.translation.type_system
backend = self.config.translation.backend
return backend, type_system
def backend_select_goals(self, goals):
backend, ts = self.get_backend_and_type_system()
postfixes = [''] + ['_'+p for p in (backend, ts) if p]
l = []
for goal in goals:
for postfix in postfixes:
cand = "%s%s" % (goal, postfix)
if cand in self.tasks:
new_goal = cand
break
else:
raise Exception("cannot infer complete goal from: %r" % goal)
l.append(new_goal)
return l
def disable(self, to_disable):
self._disabled = to_disable
def _maybe_skip(self):
maybe_skip = []
if self._disabled:
for goal in self.backend_select_goals(self._disabled):
maybe_skip.extend(self._depending_on_closure(goal))
return dict.fromkeys(maybe_skip).keys()
def setup(self, entry_point, inputtypes, policy=None, extra={}, empty_translator=None):
standalone = inputtypes is None
self.standalone = standalone
if standalone:
# the 'argv' parameter
inputtypes = [s_list_of_strings]
self.inputtypes = inputtypes
if policy is None:
policy = annpolicy.AnnotatorPolicy()
self.policy = policy
self.extra = extra
if empty_translator:
translator = empty_translator
else:
translator = TranslationContext(config=self.config)
self.entry_point = entry_point
self.translator = translator
self.libdef = None
self.secondary_entrypoints = []
if self.config.translation.secondaryentrypoints:
for key in self.config.translation.secondaryentrypoints.split(","):
try:
points = secondary_entrypoints[key]
except KeyError:
raise KeyError("Entrypoint %r not found (not in %r)" %
(key, secondary_entrypoints.keys()))
self.secondary_entrypoints.extend(points)
self.translator.driver_instrument_result = self.instrument_result
def setup_library(self, libdef, policy=None, extra={}, empty_translator=None):
""" Used by carbon python only. """
self.setup(None, None, policy, extra, empty_translator)
self.libdef = libdef
self.secondary_entrypoints = libdef.functions
def instrument_result(self, args):
backend, ts = self.get_backend_and_type_system()
if backend != 'c' or sys.platform == 'win32':
raise Exception("instrumentation requires the c backend"
" and unix for now")
datafile = udir.join('_instrument_counters')
makeProfInstrument = lambda compiler: ProfInstrument(datafile, compiler)
pid = os.fork()
if pid == 0:
# child compiling and running with instrumentation
self.config.translation.instrument = True
self.config.translation.instrumentctl = (makeProfInstrument,
args)
raise Instrument
else:
pid, status = os.waitpid(pid, 0)
if os.WIFEXITED(status):
status = os.WEXITSTATUS(status)
if status != 0:
raise Exception("instrumentation child failed: %d" % status)
else:
raise Exception("instrumentation child aborted")
import array, struct
n = datafile.size()//struct.calcsize('L')
datafile = datafile.open('rb')
counters = array.array('L')
counters.fromfile(datafile, n)
datafile.close()
return counters
def info(self, msg):
log.info(msg)
def _profile(self, goal, func):
from cProfile import Profile
from rpython.tool.lsprofcalltree import KCacheGrind
d = {'func':func}
prof = Profile()
prof.runctx("res = func()", globals(), d)
KCacheGrind(prof).output(open(goal + ".out", "w"))
return d['res']
def _do(self, goal, func, *args, **kwds):
title = func.task_title
if goal in self.done:
self.log.info("already done: %s" % title)
return
else:
self.log.info("%s..." % title)
debug_start('translation-task')
debug_print('starting', goal)
self.timer.start_event(goal)
try:
instrument = False
try:
if goal in PROFILE:
res = self._profile(goal, func)
else:
res = func()
except Instrument:
instrument = True
if not func.task_idempotent:
self.done[goal] = True
if instrument:
self.proceed('compile')
assert False, 'we should not get here'
finally:
try:
debug_stop('translation-task')
self.timer.end_event(goal)
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
#import gc; gc.dump_rpy_heap('rpyheap-after-%s.dump' % goal)
return res
@taskdef([], "Annotating&simplifying")
def task_annotate(self):
""" Annotate
"""
# includes annotation and annotatation simplifications
translator = self.translator
policy = self.policy
self.log.info('with policy: %s.%s' % (policy.__class__.__module__, policy.__class__.__name__))
annotator = translator.buildannotator(policy=policy)
if self.secondary_entrypoints is not None:
for func, inputtypes in self.secondary_entrypoints:
if inputtypes == Ellipsis:
continue
annotator.build_types(func, inputtypes, False)
if self.entry_point:
s = annotator.build_types(self.entry_point, self.inputtypes)
translator.entry_point_graph = annotator.bookkeeper.getdesc(self.entry_point).getuniquegraph()
else:
s = None
self.sanity_check_annotation()
if self.entry_point and self.standalone and s.knowntype != int:
raise Exception("stand-alone program entry point must return an "
"int (and not, e.g., None or always raise an "
"exception).")
annotator.complete()
annotator.simplify()
return s
def sanity_check_annotation(self):
translator = self.translator
irreg = query.qoutput(query.check_exceptblocks_qgen(translator))
if irreg:
self.log.info("Some exceptblocks seem insane")
lost = query.qoutput(query.check_methods_qgen(translator))
assert not lost, "lost methods, something gone wrong with the annotation of method defs"
RTYPE = 'rtype_lltype'
@taskdef(['annotate'], "RTyping")
def task_rtype_lltype(self):
""" RTyping - lltype version
"""
rtyper = self.translator.buildrtyper()
rtyper.specialize(dont_simplify_again=True)
@taskdef([RTYPE], "JIT compiler generation")
def task_pyjitpl_lltype(self):
""" Generate bytecodes for JIT and flow the JIT helper functions
lltype version
"""
from rpython.jit.codewriter.policy import JitPolicy
get_policy = self.extra.get('jitpolicy', None)
if get_policy is None:
self.jitpolicy = JitPolicy()
else:
self.jitpolicy = get_policy(self)
#
from rpython.jit.metainterp.warmspot import apply_jit
apply_jit(self.translator, policy=self.jitpolicy,
backend_name=self.config.translation.jit_backend, inline=True)
#
self.log.info("the JIT compiler was generated")
@taskdef([RTYPE], "test of the JIT on the llgraph backend")
def task_jittest_lltype(self):
""" Run with the JIT on top of the llgraph backend
"""
# parent process loop: spawn a child, wait for the child to finish,
# print a message, and restart
from rpython.translator.goal import unixcheckpoint
unixcheckpoint.restartable_point(auto='run')
# load the module rpython/jit/tl/jittest.py, which you can hack at
# and restart without needing to restart the whole translation process
from rpython.jit.tl import jittest
jittest.jittest(self)
BACKENDOPT = 'backendopt_lltype'
@taskdef([RTYPE, '??pyjitpl_lltype', '??jittest_lltype'], "lltype back-end optimisations")
def task_backendopt_lltype(self):
""" Run all backend optimizations - lltype version
"""
from rpython.translator.backendopt.all import backend_optimizations
backend_optimizations(self.translator, replace_we_are_jitted=True)
STACKCHECKINSERTION = 'stackcheckinsertion_lltype'
@taskdef(['?'+BACKENDOPT, RTYPE, 'annotate'], "inserting stack checks")
def task_stackcheckinsertion_lltype(self):
from rpython.translator.transform import insert_ll_stackcheck
count = insert_ll_stackcheck(self.translator)
self.log.info("inserted %d stack checks." % (count,))
def possibly_check_for_boehm(self):
if self.config.translation.gc == "boehm":
from rpython.rtyper.tool.rffi_platform import configure_boehm
from rpython.translator.platform import CompilationError
try:
configure_boehm(self.translator.platform)
except CompilationError as e:
i = 'Boehm GC not installed. Try e.g. "translate.py --gc=minimark"'
raise Exception(str(e) + '\n' + i)
@taskdef([STACKCHECKINSERTION, '?'+BACKENDOPT, RTYPE, '?annotate'],
"Creating database for generating c source",
earlycheck = possibly_check_for_boehm)
def task_database_c(self):
""" Create a database for further backend generation
"""
translator = self.translator
if translator.annotator is not None:
translator.frozen = True
standalone = self.standalone
get_gchooks = self.extra.get('get_gchooks', lambda: None)
gchooks = get_gchooks()
if standalone:
from rpython.translator.c.genc import CStandaloneBuilder
cbuilder = CStandaloneBuilder(self.translator, self.entry_point,
config=self.config, gchooks=gchooks,
secondary_entrypoints=
self.secondary_entrypoints + annotated_jit_entrypoints)
else:
from rpython.translator.c.dlltool import CLibraryBuilder
functions = [(self.entry_point, None)] + self.secondary_entrypoints + annotated_jit_entrypoints
cbuilder = CLibraryBuilder(self.translator, self.entry_point,
functions=functions,
name='libtesting',
config=self.config,
gchooks=gchooks)
if not standalone: # xxx more messy
cbuilder.modulename = self.extmod_name
database = cbuilder.build_database()
self.log.info("database for generating C source was created")
self.cbuilder = cbuilder
self.database = database
@taskdef(['database_c'], "Generating c source")
def task_source_c(self):
""" Create C source files from the generated database
"""
cbuilder = self.cbuilder
database = self.database
if self._backend_extra_options.get('c_debug_defines', False):
defines = cbuilder.DEBUG_DEFINES
else:
defines = {}
if self.exe_name is not None:
exe_name = self.exe_name % self.get_info()
else:
exe_name = None
c_source_filename = cbuilder.generate_source(database, defines,
exe_name=exe_name)
self.log.info("written: %s" % (c_source_filename,))
if self.config.translation.dump_static_data_info:
from rpython.translator.tool.staticsizereport import dump_static_data_info
targetdir = cbuilder.targetdir
fname = dump_static_data_info(self.log, database, targetdir)
dstname = self.compute_exe_name() + '.staticdata.info'
shutil_copy(str(fname), str(dstname))
self.log.info('Static data info written to %s' % dstname)
def compute_exe_name(self, suffix=''):
newexename = self.exe_name % self.get_info()
if '/' not in newexename and '\\' not in newexename:
newexename = './' + newexename
if suffix:
# Replace the last `.sfx` with the suffix
newname = py.path.local(newexename.rsplit('.', 1)[0])
newname = newname.new(basename=newname.basename + suffix)
return newname
return py.path.local(newexename)
def create_exe(self):
""" Copy the compiled executable into current directory, which is
pypy/goal on nightly builds
"""
if self.exe_name is not None:
exename = self.c_entryp
newexename = py.path.local(exename.basename)
shutil_copy(str(exename), str(newexename))
self.log.info("copied: %s to %s" % (exename, newexename,))
if self.cbuilder.shared_library_name is not None:
soname = self.cbuilder.shared_library_name
newsoname = newexename.new(basename=soname.basename)
shutil_copy(str(soname), str(newsoname))
self.log.info("copied: %s to %s" % (soname, newsoname,))
if hasattr(self.cbuilder, 'executable_name_w'):
# Copy pypyw.exe
exename_w = self.cbuilder.executable_name_w
newexename_w = py.path.local(exename_w.basename)
self.log.info("copied: %s to %s" % (exename_w, newexename_w,))
shutil_copy(str(exename_w), str(newexename_w))
# for pypy, the import library is renamed and moved to
# libs/python32.lib, according to the pragma in pyconfig.h
libname = self.config.translation.libname
oldlibname = soname.new(ext='lib')
if not libname:
libname = oldlibname.basename
libname = str(newsoname.dirpath().join(libname))
shutil.copyfile(str(oldlibname), libname)
self.log.info("copied: %s to %s" % (oldlibname, libname,))
# the pdb file goes in the same place as pypy(w).exe
ext_to_copy = ['pdb',]
for ext in ext_to_copy:
name = soname.new(ext=ext)
newname = newexename.new(basename=soname.basename)
shutil.copyfile(str(name), str(newname.new(ext=ext)))
self.log.info("copied: %s" % (newname,))
# HACK: copy libcffi-*.dll which is required for venvs
# At some point, we should stop doing this, and instead
# use the artifact from packaging the build instead
libffi = py.path.local.sysfind('libffi-8.dll')
if sys.platform == 'win32' and not libffi:
raise RuntimeError('could not find libffi')
elif libffi:
target = os.getcwd() + r'\libffi-8.dll'
if not os.path.exists(target):
# in tests, we can mock using windows without libffi
shutil.copyfile(str(libffi), target)
self.c_entryp = newexename
self.log.info("created: %s" % (self.c_entryp,))
@taskdef(['source_c'], "Compiling c source")
def task_compile_c(self):
""" Compile the generated C code using either makefile or
translator/platform
"""
cbuilder = self.cbuilder
kwds = {}
if self.standalone and self.exe_name is not None:
kwds['exe_name'] = self.compute_exe_name().basename
cbuilder.compile(**kwds)
if self.standalone:
self.c_entryp = cbuilder.executable_name
self.create_exe()
else:
self.c_entryp = cbuilder.get_entry_point()
@taskdef([STACKCHECKINSERTION, '?'+BACKENDOPT, RTYPE], "LLInterpreting")
def task_llinterpret_lltype(self):
from rpython.rtyper.llinterp import LLInterpreter
translator = self.translator
interp = LLInterpreter(translator.rtyper)
bk = translator.annotator.bookkeeper
graph = bk.getdesc(self.entry_point).getuniquegraph()
v = interp.eval_graph(graph,
self.extra.get('get_llinterp_args',
lambda: [])())
log.llinterpret("result -> %s" % v)
def proceed(self, goals):
if not goals:
if self.default_goal:
goals = [self.default_goal]
else:
self.log.info("nothing to do")
return
elif isinstance(goals, str):
goals = [goals]
goals.extend(self.extra_goals)
goals = self.backend_select_goals(goals)
result = self._execute(goals, task_skip = self._maybe_skip())
self.log.info('usession directory: %s' % (udir,))
return result
@classmethod
def from_targetspec(cls, targetspec_dic, config=None, args=None,
empty_translator=None,
disable=[],
default_goal=None):
if args is None:
args = []
driver = cls(config=config, default_goal=default_goal,
disable=disable)
target = targetspec_dic['target']
spec = target(driver, args)
try:
entry_point, inputtypes, policy = spec
except TypeError:
# not a tuple at all
entry_point = spec
inputtypes = policy = None
except ValueError:
policy = None
entry_point, inputtypes = spec
driver.setup(entry_point, inputtypes,
policy=policy,
extra=targetspec_dic,
empty_translator=empty_translator)
return driver
def prereq_checkpt_rtype(self):
assert 'rpython.rtyper.rmodel' not in sys.modules, (
"cannot fork because the rtyper has already been imported")
prereq_checkpt_rtype_lltype = prereq_checkpt_rtype
# checkpointing support
def _event(self, kind, goal, func):
if kind == 'planned' and func.task_earlycheck:
func.task_earlycheck(self)
if kind == 'pre':
fork_before = self.config.translation.fork_before
if fork_before:
fork_before, = self.backend_select_goals([fork_before])
if not fork_before in self.done and fork_before == goal:
prereq = getattr(self, 'prereq_checkpt_%s' % goal, None)
if prereq:
prereq()
from rpython.translator.goal import unixcheckpoint
unixcheckpoint.restartable_point(auto='run')
if os.name == 'posix':
def shutil_copy(src, dst):
# this version handles the case where 'dst' is an executable
# currently being executed
shutil.copy(src, dst + '~')
os.rename(dst + '~', dst)
else:
shutil_copy = shutil.copy
| mozillazg/pypy | rpython/translator/driver.py | driver.py | py | 24,503 | python | en | code | 430 | github-code | 36 |
152948829 | import PyQt6
import pandas as pd
from PyQt6 import QtWidgets, QtGui, QtCore
from PyQt6.QtCore import pyqtSignal, pyqtSlot, Qt
from PyQt6.QtWidgets import QListWidget, QFileDialog
from matplotlib.backends.backend_qtagg import FigureCanvasQTAgg
from matplotlib.figure import Figure
from gui.HyperParamterWidget import HyperParameterWidget
from gui.HyperParamterWidgetBool import HyperParameterWidgetBool
from gui.Slider import Slider
from model.profiles.builder.data_readers import DataReaders
from model.profiles.builder.losses import Losses
from model.profiles.builder.models import Models
from model.profiles.builder.optimizers import Optimizers
from model.profiles.training_configuration import TrainingConfiguration
from model.profiles.training_profile import TrainingProfile
from model.profiles.training_session import Session
from utils.ConfigChangedArgs import ConfigChangedArgs
from utils.ListChangedArgs import ListChangedArgs
from utils.gui_tools import add_vlayout
from utils.stat_tools import calc_profile_f1, calc_data_stats
class CustomCanvas(FigureCanvasQTAgg):
def __init__(self, parent=None, width=5, height=4, dpi=200):
fig = Figure(figsize=(width, height), dpi=dpi)
self.ax_loss = fig.add_subplot()
super(CustomCanvas, self).__init__(fig)
class MainWindow(QtWidgets.QMainWindow):
# --- signals -------------------------
start_multi_fit = pyqtSignal(int, int)
train_signal = pyqtSignal()
close_signal = pyqtSignal()
config_changed = pyqtSignal(ConfigChangedArgs)
create_profile = pyqtSignal()
profile_selection_changed = pyqtSignal(int)
select_session = pyqtSignal(int)
export_model = pyqtSignal(int)
clear_session = pyqtSignal()
signal_remove_session = pyqtSignal(int)
signal_clone_model = pyqtSignal(str)
signal_clone_data = pyqtSignal(str)
signal_validate = pyqtSignal()
# --- slots ---------------------------
@pyqtSlot(bool)
def on_export_state_changed(self, running):
self.gb_state.setEnabled(not running)
@pyqtSlot(list)
def profiles_updates(self, profiles):
self.plot_accuracies(profiles)
@pyqtSlot(int, int)
def job_time_update(self, sec, epoch):
min = int(sec/60)
h = int(min/60)
min = min % 60
sec = sec % 60
self.time_label.setText(f"remaining: ~{h}h {min}min {sec}s (~{epoch} s per epoch)")
@pyqtSlot(bool)
def fit_status_changed(self, active):
if active:
self.fit_button.setText("Stop")
else:
self.fit_button.setText("Fit")
self.time_label.setText("")
@pyqtSlot(Session)
def session_changed(self, args):
if args.type == ListChangedArgs.ADDED:
self.session_list.addItem(args.data.get_name())
if args.type == ListChangedArgs.UPDATED:
self.update_session(args.data)
if args.index != -1 and self.session_list.count() >= args.index:
self.session_list.item(args.index).setText(args.data.get_name())
if args.type == ListChangedArgs.REMOVED:
self.session_list.takeItem(args.index)
if args.type == ListChangedArgs.RESET:
self.session_list.clear()
for s in args.data:
self.session_list.addItem(s.get_name())
@pyqtSlot(TrainingProfile)
def profiles_added(self, profile):
self.profile_list.addItem(profile.name)
checkbox = QtWidgets.QCheckBox(profile.name)
checkbox.setChecked(False)
checkbox.stateChanged.connect(self.acc_cb_state_changed)
self.acc_layout.addWidget(checkbox)
self.acc_cbs.append(checkbox)
@pyqtSlot(TrainingConfiguration)
def config_update(self, config):
self.refresh_config_category('opt', config.optimizer, self._opt_param_widgets, self.opt_layout, self.optimizer_cb)
self.refresh_config_category('loss', config.loss, self._loss_param_widgets, self.loss_layout, self.loss_cb)
self.refresh_config_category('model', config.model, self._model_param_widgets, self.model_layout, self.model_cb)
self.refresh_config_category('reader', config.data, self._data_param_widgets, self.data_layout, self.data_cb)
self.config_name_label.setText(config.get_name())
self.create_profile_button.setEnabled(config.is_complete())
@pyqtSlot(int, TrainingProfile)
def profile_selected(self, i, profile):
self.profile_list.setCurrentRow(i)
@pyqtSlot(int, int, int, bool)
def on_batch_complete(self, i, cnt, remaining, training):
if training:
self.label_phase.setText('training')
else:
self.label_phase.setText('validation')
if i < 0:
self.label_batch.setText('preparing')
self.label_time.setText('')
elif i < cnt:
self.label_batch.setText(f'current batch: {i}/{cnt}')
self.label_time.setText(f'time remaining: ~{remaining} s')
else:
self.label_batch.setText('calculating metrics')
self.label_time.setText('')
# --- handler --------------------------
def button_clone_data_clicked(self):
dlg = QFileDialog()
# dlg.setFileMode(QFileDialog.AnyFile)
# dlg.setFilter("Numpy Data File (*.npy)")
if dlg.exec_():
filenames = dlg.selectedFiles()
if len(filenames) != 0:
self.signal_clone_data.emit(filenames[0])
def button_clone_model_clicked(self):
filename = QFileDialog.getOpenFileName(self, 'Open file', filter="Checkpoint (*.ckp)")
self.signal_clone_model.emit(filename[0])
def remove_profile_clicked(self):
self.signal_remove_session.emit(self.profile_list.currentRow)
def acc_cb_state_changed(self, checked):
self.plot_accuracies(self._profiles)
def session_selection_changed(self, index):
self.select_session.emit(index)
def model_selection_changed(self, txt):
self.config_changed.emit(ConfigChangedArgs('model', txt, None, None))
def opt_selection_changed(self, txt):
self.config_changed.emit(ConfigChangedArgs('opt', txt, None, None))
def data_selection_changed(self, txt):
self.config_changed.emit(ConfigChangedArgs('reader', txt, None, None))
def loss_selection_changed(self, txt):
self.config_changed.emit(ConfigChangedArgs('loss', txt, None, None))
def hp_changed(self, category, index, value):
self.config_changed.emit(ConfigChangedArgs(category, None, index, value))
def closeEvent(self, event):
self.close_signal.emit()
def create_profile_clicked(self):
self.create_profile.emit()
def change_profile_selection(self):
self.profile_selection_changed.emit(self.profile_list.currentRow())
def click_fit(self):
self.start_multi_fit.emit(self.session_slider.value, self.epoch_slider.value)
# --- private methods ------------------
def update_session(self, session):
self.plot_session(session)
if session.epoch_cnt() == 0:
return
self.label_current.setText(f'cur: (c: {round(session.f1_crack[-1],3)}, i: {round(session.f1_inactive[-1],3)}, m: {round(session.f1[-1],3)})')
self.label_best_mean.setText(f'best m: ({round(session.best_f1_m[0],3)}, {round(session.best_f1_m[1],3)}, {round(session.best_f1_m[2],3)})')
self.label_best_crack.setText(f'best c: ({round(session.best_f1_c[0],3)}, {round(session.best_f1_c[1],3)}, {round(session.best_f1_c[2],3)})')
self.label_best_inactive.setText(f'best i: ({round(session.best_f1_i[0],3)}, {round(session.best_f1_i[1],3)}, {round(session.best_f1_i[2],3)})')
stats = calc_data_stats(session)
self.label_data_stat_t_nbr.setText(str(stats[0][0]))
self.label_data_stat_v_nbr.setText(str(stats[0][1]))
self.label_data_stat_t_f.setText(str(stats[1][0]))
self.label_data_stat_v_f.setText(str(stats[1][1]))
self.label_data_stat_t_c.setText(str(stats[2][0]))
self.label_data_stat_v_c.setText(str(stats[2][1]))
self.label_data_stat_t_i.setText(str(stats[3][0]))
self.label_data_stat_v_i.setText(str(stats[3][1]))
self.label_data_stat_t_b.setText(str(stats[4][0]))
self.label_data_stat_v_b.setText(str(stats[4][1]))
def refresh_config_category(self, cat, descriptor, widgets, layout, combo_box):
# remove hyperparameters if descriptor is None, doesn't contain hp or type selection was changed
if descriptor is None or descriptor.hyperparams is None \
or (combo_box is not None and combo_box.currentText() != descriptor.name)\
or len(widgets) != len(descriptor.hyperparams):
for w in widgets:
w.value_changed.disconnect(self.hp_changed)
layout.removeWidget(w)
widgets.clear()
# reset combo_box and return if descriptor is None
if descriptor is None:
combo_box.setCurrentIndex(-1)
return
if combo_box is not None:
combo_box.setCurrentText(descriptor.name)
if descriptor.hyperparams is None:
return
if len(descriptor.hyperparams) != len(widgets):
for i, param in enumerate(descriptor.hyperparams):
if param.type == 'bool':
pw = HyperParameterWidgetBool(cat, i, param)
else:
pw = HyperParameterWidget(cat, i, param)
pw.value_changed.connect(self.hp_changed)
layout.addWidget(pw)
widgets.append(pw)
else:
for i, param in enumerate(descriptor.hyperparams):
widgets[i].set_value(param.get_value())
def plot_session(self, session):
self.canvas.ax_loss.clear()
self.last_session = session
tr_loss = session.training_loss
val_loss = session.eval_loss
if len(tr_loss) > 15:
tr_loss = tr_loss[5:]
val_loss = val_loss[5:]
#if len(tr_loss) > 10:
# tr_loss = tr_loss[10:]
# val_loss = val_loss[10:]
training_loss = pd.Series(tr_loss).rolling(self.rolling_average).mean()
eval_loss = pd.Series(val_loss).rolling(self.rolling_average).mean()
self.canvas.ax_loss.plot(training_loss, label='training loss')
self.canvas.ax_loss.plot(eval_loss, label='test loss')
self.canvas.ax_loss.legend()
self.canvas.draw()
def plot_accuracies(self, profiles):
if profiles is None:
return
self._profiles = profiles
self.acc_ax.clear()
# f1 mean
once = False
for i, p in enumerate(profiles):
if not self.acc_cbs[i].isChecked():
continue
once = True
f1, f1_c, f1_i = calc_profile_f1(p)
ts = pd.Series(f1)
data = ts.rolling(self.rolling_average).mean()
self.acc_ax.plot(data, label=f'{p.name} (mean)')
if not self.plot_only_mean:
ts = pd.Series(f1_c)
data = ts.rolling(self.rolling_average).mean()
self.acc_ax.plot(data, label=f'{p.name} (crack)')
ts = pd.Series(f1_i)
data = ts.rolling(self.rolling_average).mean()
self.acc_ax.plot(data, label=f'{p.name} (inactive)')
if once:
self.acc_ax.legend()
self.acc_canvas.draw()
def smooth_slider_value_changed(self, value):
self.rolling_average = int(value)
self.plot_accuracies(self._profiles)
if self.last_session is not None:
self.plot_session((self.last_session))
def epoch_slider_value_changed(self, value):
self.acc_cnt = int(value)
self.plot_accuracies(self._profiles)
def button_export_current(self):
self.export_model.emit(0)
def button_export_best_m(self):
self.export_model.emit(3)
def button_export_best_c(self):
self.export_model.emit(1)
def button_export_best_i(self):
self.export_model.emit(2)
def button_clear_session_clicked(self):
self.clear_session.emit()
def click_validate(self):
self.signal_validate.emit()
# --- construction ---------------------
def load_profile_builder(self):
self.loss_cb.addItems(Losses.losses)
self.optimizer_cb.addItems(Optimizers.optimizers)
self.model_cb.addItems(Models.models)
self.data_cb.addItems(DataReaders.reader)
def __init__(self):
super(MainWindow, self).__init__()
self.plot_only_mean = False
self.rolling_average = 1
self.acc_cnt = 50
self._profiles = None
self._opt_param_widgets = []
self._model_param_widgets = []
self._data_param_widgets = []
self._loss_param_widgets = []
self.acc_figure = Figure()
self.acc_ax = self.acc_figure.add_subplot()
self.last_session = None
# new configuration widgets
self.model_layout = None
self.loss_layout = None
self.opt_layout = None
self.data_layout = None
self.config_name_label = None
self.create_profile_button = None
self.model_cb = None
self.loss_cb = None
self.optimizer_cb = None
# profile widgets
self.profile_list = None
self.label_data_stat_t_nbr = None
self.label_data_stat_t_f = None
self.label_data_stat_t_c = None
self.label_data_stat_t_i = None
self.label_data_stat_t_b = None
self.label_data_stat_v_nbr = None
self.label_data_stat_v_f = None
self.label_data_stat_v_c = None
self.label_data_stat_v_i = None
self.label_data_stat_v_b = None
# session widgets
self.session_list = None
self.session_slider = None
self.epoch_slider = None
self.fit_button = None
self.time_label = None
self.label_current = None
self.label_best_mean = None
self.label_best_crack = None
self.label_best_inactive = None
self.gb_state = None
self.label_batch = None
self.label_phase = None
self.label_time = None
# monitoring widgets
self.acc_canvas = None
self.canvas = None
self.profile_acc_check_gb = None
self.acc_layout = None
self.acc_cbs = []
self.button_clone_model = None
self.button_clone_data = None
self.init_widgets()
# --- horizontal main layout -----
# ---------------------------------
self.load_profile_builder()
def init_config_widgets(self, layout):
# config_layout
# --- declarations
gb_model = QtWidgets.QGroupBox("Model")
self.model_layout = QtWidgets.QVBoxLayout()
gb_loss = QtWidgets.QGroupBox("Loss")
self.loss_layout = QtWidgets.QVBoxLayout()
gb_opt = QtWidgets.QGroupBox("Optimizer")
self.opt_layout = QtWidgets.QVBoxLayout()
gb_data = QtWidgets.QGroupBox("Data")
self.data_layout = QtWidgets.QVBoxLayout()
placeholder = QtWidgets.QWidget()
self.config_name_label = QtWidgets.QLabel()
self.create_profile_button = QtWidgets.QPushButton("Create Profile")
self.model_cb = QtWidgets.QComboBox() # model selection
self.loss_cb = QtWidgets.QComboBox() # loss selection
self.optimizer_cb = QtWidgets.QComboBox() # optimizer selection
self.data_cb = QtWidgets.QComboBox() # model selection
# --- layout
layout.addWidget(gb_model)
self.model_layout.addWidget(self.model_cb)
layout.addWidget(gb_loss)
self.loss_layout.addWidget(self.loss_cb)
layout.addWidget(gb_opt)
self.opt_layout.addWidget(self.optimizer_cb)
layout.addWidget(gb_data)
self.data_layout.addWidget(self.data_cb)
layout.addWidget(placeholder)
layout.addWidget(self.config_name_label)
layout.addWidget(self.create_profile_button)
# --- initialization
gb_model.setLayout(self.model_layout)
gb_loss.setLayout(self.loss_layout)
gb_opt.setLayout(self.opt_layout)
gb_data.setLayout(self.data_layout)
placeholder.setSizePolicy(QtWidgets.QSizePolicy.Policy.Maximum, QtWidgets.QSizePolicy.Policy.Expanding)
self.create_profile_button.setEnabled(False)
self.create_profile_button.clicked.connect(self.create_profile_clicked)
self.model_cb.currentTextChanged.connect(self.model_selection_changed)
self.loss_cb.currentTextChanged.connect(self.loss_selection_changed)
self.optimizer_cb.currentTextChanged.connect(self.opt_selection_changed)
self.data_cb.currentTextChanged.connect(self.data_selection_changed)
def init_profile_widgets(self, layout):
# profile layout
# --- declarations
self.profile_list = QListWidget()
button_remove = QtWidgets.QPushButton('Remove')
label = QtWidgets.QLabel("Training Profiles")
# self.acc_canvas = FigureCanvasQTAgg(self.acc_figure)
gb_data = QtWidgets.QGroupBox('Session Data')
data_layout = QtWidgets.QGridLayout(gb_data)
label_r0 = QtWidgets.QLabel('t')
label_r1 = QtWidgets.QLabel('v')
label_c0 = QtWidgets.QLabel('#')
label_c1 = QtWidgets.QLabel('#f')
label_c2 = QtWidgets.QLabel('#c')
label_c3 = QtWidgets.QLabel('#i')
label_c4 = QtWidgets.QLabel('#b')
self.label_data_stat_t_nbr = QtWidgets.QLabel('0')
self.label_data_stat_t_f = QtWidgets.QLabel('0')
self.label_data_stat_t_c = QtWidgets.QLabel('0')
self.label_data_stat_t_i = QtWidgets.QLabel('0')
self.label_data_stat_t_b = QtWidgets.QLabel('0')
self.label_data_stat_v_nbr = QtWidgets.QLabel('0')
self.label_data_stat_v_f = QtWidgets.QLabel('0')
self.label_data_stat_v_c = QtWidgets.QLabel('0')
self.label_data_stat_v_i = QtWidgets.QLabel('0')
self.label_data_stat_v_b = QtWidgets.QLabel('0')
# --- layout
layout.addWidget(label)
layout.addWidget(self.profile_list)
layout.addWidget(button_remove)
layout.addWidget(gb_data)
data_layout.addWidget(label_r0, 0, 1, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(label_r1, 0, 2, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(label_c0, 1, 0, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(label_c1, 2, 0, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(label_c2, 3, 0, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(label_c3, 4, 0, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(label_c4, 5, 0, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(self.label_data_stat_t_nbr, 1, 1, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(self.label_data_stat_t_f, 2, 1, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(self.label_data_stat_t_c, 3, 1, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(self.label_data_stat_t_i, 4, 1, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(self.label_data_stat_t_b, 5, 1, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(self.label_data_stat_v_nbr, 1, 2, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(self.label_data_stat_v_f, 2, 2, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(self.label_data_stat_v_c, 3, 2, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(self.label_data_stat_v_i, 4, 2, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
data_layout.addWidget(self.label_data_stat_v_b, 5, 2, alignment=QtCore.Qt.AlignmentFlag.AlignCenter)
# --- initialization
self.profile_list.setFixedWidth(300)
self.profile_list.itemSelectionChanged.connect(self.change_profile_selection)
button_remove.clicked.connect(self.remove_profile_clicked)
def init_session_widgets(self, layout, panel):
# --- declarations
self.session_list = QListWidget()
label = QtWidgets.QLabel("Training Sessions")
gb_training = QtWidgets.QGroupBox('Training')
self.session_slider = Slider('session #', 1, 100, 1)
self.epoch_slider = Slider('epoch #', 1, 1000, 10)
self.fit_button = QtWidgets.QPushButton('Fit')
self.time_label = QtWidgets.QLabel("")
clear_button = QtWidgets.QPushButton('Clear')
self.gb_state = QtWidgets.QGroupBox('Status')
status_grid = QtWidgets.QGridLayout(self.gb_state)
self.label_current = QtWidgets.QLabel('cur: (0.000, 0.000, 0.000)')
self.label_best_mean = QtWidgets.QLabel('best m: (0.000, 0.000, 0.000)')
self.label_best_crack = QtWidgets.QLabel('best c: (0.000, 0.000, 0.000)')
self.label_best_inactive = QtWidgets.QLabel('best i: (0.000, 0.000, 0.000)')
button_export_current = QtWidgets.QPushButton('Export')
button_export_best_m = QtWidgets.QPushButton('Export')
button_export_best_c = QtWidgets.QPushButton('Export')
button_export_best_i = QtWidgets.QPushButton('Export')
self.label_batch = QtWidgets.QLabel()
self.label_phase = QtWidgets.QLabel()
self.label_time = QtWidgets.QLabel()
val_button = QtWidgets.QPushButton('Validate')
self.button_clone_model = QtWidgets.QPushButton('Set Checkpoint')
self.button_clone_data = QtWidgets.QPushButton('Set Data')
# --- layout
layout.addWidget(label)
layout.addWidget(self.session_list)
layout.addWidget(gb_training)
training_layout, _ = add_vlayout(layout)
training_layout.addWidget(self.button_clone_model)
training_layout.addWidget(self.button_clone_data)
training_layout.addWidget(self.session_slider)
training_layout.addWidget(self.epoch_slider)
training_layout.addWidget(clear_button)
training_layout.addWidget(val_button)
training_layout.addWidget(self.fit_button)
training_layout.addWidget(self.time_label)
layout.addWidget(self.gb_state)
status_grid.addWidget(self.label_current, 0, 0)
status_grid.addWidget(self.label_best_mean, 1, 0)
status_grid.addWidget(self.label_best_crack, 2, 0)
status_grid.addWidget(self.label_best_inactive, 3, 0)
status_grid.addWidget(button_export_current, 0, 1)
status_grid.addWidget(button_export_best_m, 1, 1)
status_grid.addWidget(button_export_best_c, 2, 1)
status_grid.addWidget(button_export_best_i, 3, 1)
layout.addWidget(self.label_phase)
layout.addWidget(self.label_batch)
layout.addWidget(self.label_time)
# --- initialization
panel.setFixedWidth(250)
self.session_list.currentRowChanged.connect(self.session_selection_changed)
gb_training.setLayout(training_layout)
self.fit_button.clicked.connect(self.click_fit)
button_export_current.clicked.connect(self.button_export_current)
button_export_best_c.clicked.connect(self.button_export_best_c)
button_export_best_i.clicked.connect(self.button_export_best_i)
button_export_best_m.clicked.connect(self.button_export_best_m)
clear_button.clicked.connect(self.button_clear_session_clicked)
self.button_clone_model.clicked.connect(self.button_clone_model_clicked)
self.button_clone_data.clicked.connect(self.button_clone_data_clicked)
val_button.clicked.connect(self.click_validate)
panel.setFixedWidth(350)
def init_monitoring_widgets(self, layout):
# declarations
tab = QtWidgets.QTabWidget()
self.canvas = CustomCanvas()
acc_panel = QtWidgets.QWidget()
self.acc_layout = QtWidgets.QVBoxLayout()
self.acc_canvas = FigureCanvasQTAgg(self.acc_figure)
epoch_slider = Slider("max epoch", 10, 500, self.acc_cnt)
smooth_slider = Slider("running average cnt", 1, 20, self.rolling_average)
self.profile_acc_check_gb = QtWidgets.QGroupBox('Visible')
# layout
layout.addWidget(tab)
tab.addTab(self.canvas, "Session Plot")
tab.addTab(acc_panel, "Accuracies")
self.acc_layout.addWidget(self.acc_canvas)
self.acc_layout.addWidget(epoch_slider)
layout.addWidget(smooth_slider)
# initializations
tab.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding,
QtWidgets.QSizePolicy.Policy.Ignored)
self.canvas.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding,
QtWidgets.QSizePolicy.Policy.Expanding)
acc_panel.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding,
QtWidgets.QSizePolicy.Policy.Expanding)
acc_panel.setLayout(self.acc_layout)
epoch_slider.value_changed.connect(self.epoch_slider_value_changed)
smooth_slider.value_changed.connect(self.smooth_slider_value_changed)
smooth_slider.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding,
QtWidgets.QSizePolicy.Policy.Fixed)
def init_widgets(self):
main = QtWidgets.QWidget()
self.setCentralWidget(main)
main_layout = QtWidgets.QHBoxLayout()
main.setLayout(main_layout)
config_layout, config_panel = add_vlayout(main_layout)
profile_layout, profile_panel = add_vlayout(main_layout)
session_layout, session_panel = add_vlayout(main_layout)
monitoring_layout, monitoring_panel = add_vlayout(main_layout)
self.init_config_widgets(config_layout)
self.init_profile_widgets(profile_layout)
self.init_session_widgets(session_layout, session_panel)
self.init_monitoring_widgets(monitoring_layout)
config_panel.setSizePolicy(QtWidgets.QSizePolicy.Policy.Fixed,
QtWidgets.QSizePolicy.Policy.Expanding)
profile_panel.setSizePolicy(QtWidgets.QSizePolicy.Policy.Fixed,
QtWidgets.QSizePolicy.Policy.Expanding)
session_panel.setSizePolicy(QtWidgets.QSizePolicy.Policy.Fixed,
QtWidgets.QSizePolicy.Policy.Expanding)
monitoring_panel.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding,
QtWidgets.QSizePolicy.Policy.Expanding)
| Falrach94/deeplearning_ex4 | gui/MainWindow.py | MainWindow.py | py | 26,881 | python | en | code | 0 | github-code | 36 |
43678975341 | # importing the requests library
import requests
import time
# defining the api-endpoint
URL = "http://127.0.0.1:5000/add"
# data to be sent to api
PARAMS = {
'TimeStamp':time.time(),
'Temp1':'24.00',
'Temp2':'24.00',
'TAmbiant':'23.00',
'Humidity':'35'}
# sending post request and saving response as response object
r = requests.post(url = URL, data = PARAMS)
print("The response is %s"%r)
# extracting response text
pastebin_url = r.text
print("The Response Body is:%s"%pastebin_url) | mh49/HSTM | test_tools/post.py | post.py | py | 548 | python | en | code | 0 | github-code | 36 |
9417381460 | """Lab_3.finite_automaton"""
import random
class State:
"""Determination of possible states of the finite automaton"""
SLEEP = "Sleep"
EAT = "Eat"
WORK = "Work"
RELAX = "Relax"
PLAY = "Play"
class FiniteStateMachine:
"""A class that implements a finite automaton"""
def __init__(self):
"""Constructor"""
self.state = State.SLEEP
def run(self):
"""
The run method simulates a day in the life using a finite state machine.
Each hour is checked for current status and transitions are made
into new states depending on conditions and random events.
"""
for hour in range(24):
if self.state == State.SLEEP:
# Перехід до стану EAT за випадкової події та о 7:00 годині
if random.random() > 0.5 and hour == 7:
print("Ah..., good new day")
self.state = State.EAT
# Перехід до стану RELAX о 8:00 годині
elif hour == 8:
print("Oh god not, I did not wake up in time again..")
self.state = State.RELAX
else:
print("Zzzz.....")
elif self.state == State.EAT:
# Перехід до стану WORK о 9:00 годині
if hour == 9:
print("Good... breakfast was nice, now it is time to work")
self.state = State.WORK
elif self.state == State.WORK:
# Перехід до стану RELAX за випадкової події та о 14:00 годині
if random.random() > 0.8 and hour == 14:
print("I need a break, time to relax")
self.state = State.RELAX
# Перехід до стану PLAY о 18:00 годині
elif hour == 18:
print("Work is done, time to play")
self.state = State.PLAY
elif self.state == State.RELAX:
# Перехід до стану WORK о 19:00 годині
if hour == 19:
print("Relaxation time is over, back to work")
self.state = State.WORK
elif self.state == State.PLAY:
# Перехід до стану SLEEP за випадкової події та о 22:00 годині
if random.random() > 0.6 and hour == 22:
print("Tired, time to sleep")
self.state = State.SLEEP
fsm = FiniteStateMachine()
fsm.run()
| vbronetskyi/Lab_3.disctret.2023 | finite_automaton.py | finite_automaton.py | py | 2,751 | python | en | code | 0 | github-code | 36 |
22777212657 | #!/usr/bin/python
import threading
import Queue
import socket
import time
import struct
import subprocess
class ROAjobMaster(threading.Thread):
def __init__(self, dataQueue, timerQueue, loggerQueue, statEvent, stopEvent):
super(ROAjobMaster, self).__init__()
self.dataQueue = dataQueue
self.timerQueue = timerQueue
self.loggerQueue = loggerQueue
self.PCADDR = ('192.168.2.4',10002)
self.DFTFPGAADDR = ('10.10.1.100', 13108)
self.FPGAADDR = ('10.10.1.102', 3202)
#self.DFTFPGAADDR = ('192.168.2.4', 13108)
#self.FPGAADDR = ('192.168.2.4', 3202)
self.EMITTER_ID = '23'
self.EMITTER_POW = '1'
self.EMITTER_DIM = '1'
self.recv_data = ''
self.pktSize = 0
self.pktTotal = 0
self.xferPeriod = 0.0
self.symrate = 3
self.cfgType = ''
self.lastRun = False
self.idle = True
self.statEvent = statEvent
self.stopEvent = stopEvent
def run(self):
self.dataSock = socket.socket( socket.AF_INET, socket.SOCK_STREAM)
self.dataSock.connect(self.PCADDR)
self.alive = threading.Event()
self.alive.set()
self.INIPktGen()
self.CFGPktGen()
self.EMCPktGen()
while self.lastRun == False:
try:
while self.recv_data != 'START_XFER':
self.recv_data = self.dataSock.recv(10)
time.sleep(0.5)
self.idle = False
#Retrieve configuration
self.pktSize, self.pktTotal, self.xferPeriod, self.symrate, self.lastRun = struct.unpack('!IIfI?', self.dataSock.recv(17))
self.cfgType = self.dataSock.recv(3)
self.loggerPut('ROA configuration received: ' + self.cfgType)
#Forward configuration to UDPTimer
self.timerQueue.put(self.xferPeriod)
if self.cfgType == 'INI':
self.INIPktGen()
self.CFGPktGen()
self.loggerPut('ROA Config Done, wait 5s')
time.sleep(5) #OM gets itself sorted out
self.dataSock.send('ROAREADY')
self.loggerPut('Transfer Start')
for i in xrange(self.pktTotal):
self.recv_data = self.dataSock.recv(self.pktSize+4)
if len(self.recv_data) != (self.pktSize+4):
self.recv_data += self.dataSock.recv(self.pktSize+4 - len(self.recv_data))
#if (i != 106) and (i != 422) and (i != 585) :
self.dataQueue.put(self.recv_data)
while self.idle != True:
self.idle = self.dataQueue.empty()
self.loggerPut('Transfer Done')
time.sleep(5)
self.statEvent.set()
except Queue.Empty as e:
continue
self.loggerPut('STOPPING!!!')
self.dataSock.close()
self.stopEvent.set()
def join(self, timeout=None):
self.alive.clear()
threading.Thread.join(self,timeout)
def INIPktGen(self):
loadFpga = subprocess.call(['/root/load_fpga_WHOI', '/root/om3x_spartan6_b27.bit'], stdout=subprocess.PIPE)
#time.sleep(5)
#FPGA CONFIG
FPGA_MAC1=0x12345678
FPGA_MAC2=0x90AB
FPGA_IP=0x0A0A0166
FPGA_PORT=3202
#ARMADEUS CONFIG
ARMA_MAC1=0x32A7D885
ARMA_MAC2=0x6ABF
ARMA_IP=0x0A0A0102
ARMA_PORT= 3201
MESSAGE="#INI"
MESSAGE += struct.pack("!H", 0x0001) #RESET
MESSAGE += struct.pack("!IH",FPGA_MAC1,FPGA_MAC2)
MESSAGE += struct.pack("!IH",FPGA_IP,FPGA_PORT)
MESSAGE += struct.pack("!IH",ARMA_MAC1,ARMA_MAC2)
MESSAGE += struct.pack("!IH",ARMA_IP,ARMA_PORT)
sock = socket.socket( socket.AF_INET, socket.SOCK_DGRAM ) # UDP
sock.sendto(MESSAGE, self.DFTFPGAADDR)
sock.close()
time.sleep(0.2)
def CFGPktGen(self):
#Modulation settings
PREAMBLE = 512 #at 2*symbol rate
MESSAGE="#CFG"
MESSAGE += struct.pack("!I",0x0000050B) #Settings
MESSAGE += struct.pack("!BH",0x00,10000) #Superframe period
MESSAGE += struct.pack("!BH",0x00,8650) #TX start
MESSAGE += struct.pack("!BH",0x00,9450) #TX end
MESSAGE += struct.pack("!BH",0x00,0) #RX start
MESSAGE += struct.pack("!BH",0x00,8300) #RX end
MESSAGE += struct.pack("!BBB",0x75,0x00,PREAMBLE/4) #AGC response(4) + HV level(12) + Preamble(8)
MESSAGE += struct.pack("!BB",self.symrate,0x00) #TX symbol rate + carrier rate
MESSAGE += struct.pack("!BB",self.symrate,0x00) #RX symbol rate + carrier rate
MESSAGE += struct.pack("!I",0x000F8001) #Capture
sock = socket.socket( socket.AF_INET, socket.SOCK_DGRAM )
sock.sendto(MESSAGE, self.FPGAADDR)
sock.close
def EMCPktGen(self):
MESSAGE="#EMC"
MESSAGE += struct.pack("!H",0)
MESSAGE += struct.pack("!cccc",self.EMITTER_ID[0],
self.EMITTER_ID[1],
self.EMITTER_POW,
self.EMITTER_DIM)
sock = socket.socket( socket.AF_INET, socket.SOCK_DGRAM )
sock.sendto(MESSAGE, self.FPGAADDR)
sock.close
def loggerPut(self, msgIn):
msg = 'ROAjobMaster - ' + str(time.time()) + ': ' + msgIn
self.loggerQueue.put(msg)
class UDPDataSend(threading.Thread):
def __init__(self, dataQueue, tickEvent):
super(UDPDataSend, self).__init__()
self.dataQueue = dataQueue
self.MOAADDR = ('10.10.1.3',10016)
#self.MOAADDR = ('192.168.2.4',10016)
self.alive = threading.Event()
self.alive.set()
self.tick = tickEvent
def run(self):
data = ''
dataSock = socket.socket( socket.AF_INET, socket.SOCK_DGRAM)
while self.alive.is_set():
self.tick.wait()
self.tick.clear()
data = self.dataQueue.get()
dataSock.sendto(data, self.MOAADDR)
dataSock.close()
def join(self, timeout=None):
self.alive.clear()
threading.Thread.join(self,timeout)
class UDPDataTimer(threading.Thread):
def __init__(self, timerQueue, tickEvent):
super(UDPDataTimer, self).__init__()
self.timerQueue = timerQueue
self.alive = threading.Event()
self.alive.set()
self.tick = tickEvent
self.sleepValue = 1
def run(self):
while self.alive.is_set():
try:
time.sleep(self.sleepValue)
self.tick.set()
self.sleepValue = self.timerQueue.get_nowait()
except Queue.Empty as e:
continue
def join(self, timeout=None):
self.alive.clear()
threading.Thread.join(self,timeout)
class ROAStatus(threading.Thread):
def __init__(self, loggerQueue, xferEvent):
super(ROAStatus, self).__init__()
self.STATADDR = ('10.10.1.2', 3201)
self.loggerQueue = loggerQueue
self.alive = threading.Event()
self.alive.set()
self.xferEvent = xferEvent
self.prev_TX = 0
self.new_TX = 0
def run(self):
statSock = socket.socket( socket.AF_INET, socket.SOCK_DGRAM )
statSock.bind(self.STATADDR)
while self.alive.is_set():
data = statSock.recv(63)
tempList = []
#time.sleep(1)
if data[0:4] == '#STA':
tempList = struct.unpack('!I', data[46:50])
self.new_TX = tempList[0]
#self.new_TX = time.time()
if self.xferEvent.is_set():
self.loggerPut( 'ROA end of test status')
time.sleep(0.1)
self.loggerPut( str(self.new_TX - self.prev_TX) + ' Bytes transmitted' )
self.xferEvent.clear()
self.prev_TX = self.new_TX
statSock.close()
def join(self, timeout=None):
self.alive.clear()
threading.Thread.join(self,timeout)
def loggerPut(self, msgIn):
msg = 'ROAStat - ' + str(time.time()) + ': ' + msgIn
self.loggerQueue.put(msg)
class ROALogClient(threading.Thread):
def __init__(self, loggerQueue):
super(ROALogClient, self).__init__()
self.loggerQueue = loggerQueue
self.PCADDR = ('192.168.2.4',10012)
self.alive = threading.Event()
self.alive.set()
def run(self):
queue_data = ''
logSock = socket.socket( socket.AF_INET, socket.SOCK_STREAM)
logSock.connect(self.PCADDR)
while self.alive.is_set():
try:
queue_data = self.loggerQueue.get()
logSock.send(queue_data)
except Queue.Empty as e:
continue
logSock.close()
def join(self, timeout=None):
self.alive.clear()
threading.Thread.join(self,timeout)
if __name__ == '__main__':
timerQueue = Queue.Queue()
loggerQueue = Queue.Queue()
dataQueue = Queue.Queue(2048)
tickEvent = threading.Event()
statEvent = threading.Event()
stopEvent = threading.Event()
stopEvent.clear()
statEvent.clear()
JOBMASTER = ROAjobMaster(dataQueue, timerQueue, loggerQueue, statEvent, stopEvent)
UDPSEND = UDPDataSend(dataQueue, tickEvent)
UDPTIME = UDPDataTimer(timerQueue, tickEvent)
STATUS = ROAStatus(loggerQueue, statEvent)
LOG = ROALogClient(loggerQueue)
JOBMASTER.start()
UDPSEND.start()
UDPTIME.start()
STATUS.start()
LOG.start()
stopEvent.wait()
JOBMASTER.join()
UDPSEND.join()
UDPTIME.join()
STATUS.join()
LOG.join()
exit()
| lpelletier/PY_OMTESTSUITE | OMTEST_ROA.py | OMTEST_ROA.py | py | 8,278 | python | en | code | 0 | github-code | 36 |
21749724281 | import unittest
from BaseClasses import MultiWorld
from worlds.AutoWorld import AutoWorldRegister
class TestBase(unittest.TestCase):
world: MultiWorld
_state_cache = {}
def testUniqueItems(self):
known_item_ids = set()
for gamename, world_type in AutoWorldRegister.world_types.items():
current = len(known_item_ids)
known_item_ids |= set(world_type.item_id_to_name)
self.assertEqual(len(known_item_ids) - len(world_type.item_id_to_name), current)
def testUniqueLocations(self):
known_location_ids = set()
for gamename, world_type in AutoWorldRegister.world_types.items():
current = len(known_location_ids)
known_location_ids |= set(world_type.location_id_to_name)
self.assertEqual(len(known_location_ids) - len(world_type.location_id_to_name), current)
| adampziegler/Archipelago | test/general/TestUniqueness.py | TestUniqueness.py | py | 878 | python | en | code | null | github-code | 36 |
70197138344 | def word_wrap3(adres):
with open(adres) as f:
icerik = f.readlines()
icerik = [elem.replace('\n', '') for elem in icerik]
count = 0
for i in range(0, len(icerik)):
if len(icerik[i]) <= 60:
print(icerik[i], end="")
else:
texts = icerik[i].split(" ")
for text in texts:
if count+len(text)<=60:
print(text,end=" ")
count+=len(text)+1
else:
print()
print(text,end=" ")
count=len(text)+1
count = 0
print()
word_wrap3("any.txt")
| Bygokcen/codestepbystep_works | main.py | main.py | py | 725 | python | en | code | 0 | github-code | 36 |
40761385267 | import os
import sys
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
# PROJ_DIR = FILE_DIR[:FILE_DIR.index('src')]
# sys.path.append(PROJ_DIR)
PROJ_DIR = os.path.abspath("..")
print(f"proj_dir is: {PROJ_DIR}, adding to sys.path")
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
from torchmetrics.functional import bleu_score
from transformers import get_linear_schedule_with_warmup
from q_snippets.data import load_json, save_json
from data_utils import Seq2seqDataModule
from model import get_model_by_name
class Seq2seqGeneration(pl.LightningModule):
def __init__(self, config, model) :
super().__init__()
self.config = config
self.model = model
self.save_hyperparameters(ignore='model') # ignore model to avoid assigning model to Omegaconf when load_from_ckpt
self.val_pred_ids = []
self.val_target_ids = []
self.gold_corpus = []
self.pred_corpus = []
def forward(self, batch):
def _custom_forward(batch):
""" for BART """
if batch.target_ids is not None:
target_ids = batch.target_ids[:, :-1].contiguous()
lm_labels = batch.target_ids[:, 1:].clone()
lm_labels[batch.target_ids[:, 1:] == self.model.config.pad_token_id] = -100
else:
target_ids, lm_labels = None, None
# print(batch.input_ids.size(), target_ids.size(), lm_labels.size())
output = self.model(
input_ids=batch.input_ids, attention_mask=batch.attention_mask,
decoder_input_ids=target_ids,
labels=lm_labels,
# output_attentions=True # for copy mechanism
)
return output
def _default_forward(batch):
""" 训练时模型会自动从labels参数右移得到decoder_input_ids
for T5
"""
return self.model(batch.input_ids, attention_mask=batch.attention_mask, labels=batch.target_ids)
return _default_forward(batch)
# return _custom_forward(batch)
def training_step(self, batch, batch_idx):
output = self(batch)
self.log('train_loss', output.loss, prog_bar=True, sync_dist=True)
return output.loss
def validation_step(self, batch, batch_idx) :
output = self(batch)
# self.val_pred_ids.extend(output.logits.argmax(-1).cpu().numpy().tolist())
self.log('val_loss', output.loss, prog_bar=True, sync_dist=True) #log the val_loss
pred_ids = self.model.generate(
input_ids=batch.input_ids, max_length=500, use_cache=True,
num_beams=1, do_sample=False # greedy search is the fastest
)
self.val_pred_ids.extend(pred_ids)
# save gold ids for bleu computing
if self.gold_corpus == [] and batch.target_ids is not None:
self.val_target_ids.extend(batch.target_ids.cpu().numpy().tolist())
def _save_val_result(self):
self.gold_corpus = ["None" for _ in self.pred_corpus ] if self.gold_corpus == [] else self.gold_corpus
R = []
for p, sample, g in zip(self.pred_corpus, self.trainer.datamodule.valset.samples, self.gold_corpus):
R.append(dict(
**sample.__dict__,
**{
'expected': g,
'generated':p}
))
# logdir = trainer.logger.log_dir if hasattr(trainer.logger, 'log_dir') else trainer.logger.save_dir
logdir = self.trainer.logger.log_dir
filename = os.path.join(logdir, f"val_epoch{self.current_epoch:02}.json")
save_json(R, filename)
def validation_epoch_end(self, outputs):
tokenizer = self.trainer.datamodule.tokenizer
self.pred_corpus = tokenizer.batch_decode(self.val_pred_ids, skip_special_tokens = True, clean_up_tokenization_spaces = True)
if self.gold_corpus == [] and self.val_target_ids != [] :
self.gold_corpus = tokenizer.batch_decode(self.val_target_ids, skip_special_tokens = True, clean_up_tokenization_spaces = True)
print(len(self.pred_corpus), len(self.gold_corpus))
bleu = bleu_score(self.pred_corpus, [ [_] for _ in self.gold_corpus])
self.log('val_bleu', bleu, prog_bar=True, sync_dist=True)
self._save_val_result()
self.val_pred_ids, self.val_target_ids =[], []
def _get_grouped_params(self):
no_decay = ["bias", "LayerNorm.weight"]
# Group parameters to those that will and will not have weight decay applied
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": 0.01,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
return optimizer_grouped_parameters
def configure_optimizers(self):
optimizer = optim.AdamW(self._get_grouped_params(), lr=self.config.lr)
# return optimizer
total_steps = int(len(self.trainer.datamodule.train_dataloader()) // self.config.accumulate_grads ) * self.config.max_epochs # accumulate_grads
warmup_step = int(total_steps * self.config.warmup_rate)
# lr_scheduler = get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=warmup_step, num_training_steps=steps_per_epoch*self.config.max_epochs)
lr_scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_step, num_training_steps=total_steps)
return [optimizer], [{'scheduler': lr_scheduler, 'interval': 'step', 'frequency': 1, 'strict': True, 'monitor': None}]
def predict_step(self, batch , batch_idx) :
batch_pred_ids = self.model.generate(
input_ids=batch.input_ids, max_length=500, use_cache=True)
return batch_pred_ids.cpu().numpy().tolist()
def on_predict_epoch_end(self, results) -> None:
""" results = [[ batch_result ]]
batch_result = [[],[],...]
聚合每个predict_step的结果,解码并保存到文件
"""
all_pred_ids = sum(results[0], [])
preds = self.trainer.datamodule.tokenizer.batch_decode(all_pred_ids, skip_special_tokens = True, clean_up_tokenization_spaces = True)
R = []
for sample, p in zip(self.trainer.datamodule.testset.samples, preds):
R.append(dict(
**sample.__dict__,
**{
'generated':p}
))
save_json(R, self.config.preds.result_path)
return preds
def train_model(config):
_model = get_model_by_name(config)
model = Seq2seqGeneration(config, _model) # 创建Lightning框架
dm = Seq2seqDataModule(config=config)
logger = TensorBoardLogger(
save_dir="./lightning_logs/",
name=None, # 指定experiment, ./lightning_logs/exp_name/version_name
version=config.version, # 指定version, ./lightning_logs/version_name
)
# 设置保存模型的路径及参数
CUR_DIR = os.getcwd()
dirname = os.path.join(CUR_DIR, "./lightning_logs/", config.version)
ckpt_callback = ModelCheckpoint(
dirpath=dirname,
filename="{epoch}_{train_loss:.4f}_{val_bleu:.4f}", # 模型保存名称, epoch信息以及验证集分数
monitor='val_bleu',
mode='max',
save_top_k=3,
verbose=True,
)
es = EarlyStopping('train_loss', patience=10, mode='min')
trainer = pl.Trainer(
accumulate_grad_batches=config.accumulate_grads,
logger=logger,
num_sanity_val_steps=0, # 如果使用sanity_check 会导致val时self.gold_corpus数量出现问题
# limit_train_batches=64, # 限制训练集数量,方便快速调试
# limit_val_batches=64, # 一般直接用全量测试数据吧, 验证函数可能会报错
max_epochs=config.max_epochs,
callbacks=[ckpt_callback, es],
accelerator="gpu",
devices=1,
# resume_from_checkpoint="/home/qing/repos/demo/conditional_generation/lightning_logs/2rd/epoch=1_train_loss=2.0390_val_bleu=0.0197.ckpt"
)
# dm.setup(stage='fit')
trainer.fit(model, dm)
def predict_ckpt(config):
dm = Seq2seqDataModule(config)
dm.setup(stage='test')
_model = get_model_by_name(config)
model = Seq2seqGeneration.load_from_checkpoint(config.preds.ckpt_path, config=config, model=_model)
trainer = pl.Trainer(accelerator="gpu", devices=1)
x = trainer.predict(model, dm) # 预测结果已经在on_predict_epoch_end中保存了
print(type(x))
def raw_generate(config):
from tqdm import tqdm
device = 'cuda' if torch.cuda.is_available() else 'cpu'
dm = Seq2seqDataModule(config)
dm.setup(stage='test')
_model = get_model_by_name(config)
model = Seq2seqGeneration.load_from_checkpoint(config.preds.ckpt_path, config=config, model=_model).to(device)
with torch.no_grad():
R = []
for i, batch in enumerate(tqdm(dm.test_dataloader())):
batch = batch.to(device)
pred_ids = model.model.generate(input_ids=batch.input_ids, max_length=500, use_cache=True, num_beams=1, do_sample=False)
R.extend(pred_ids.cpu().numpy().tolist())
x = dm.tokenizer.batch_decode(R)
| Qing25/demo | conditional_generation/frame.py | frame.py | py | 9,872 | python | en | code | 8 | github-code | 36 |
26932945307 | from EelemFringe import ElemFringe
from Fringe import Fringe
from Node import Node
from State import State
import labyrinth
def treeSearch():
# initialization objects
fringe = Fringe()
state = State()
node = Node(state, None, labyrinth.h[state.position])
elemFringe = ElemFringe(node)
fringe.addElem(elemFringe)
i = 0
while not fringe.isEmpty():
elem = fringe.extract()
print("Passo " + str(i) + ": " + str(elem.node.state.position) + "\n")
if elem.node.state.checkGoalState():
elem.node.printPath()
break
for neighbor in elem.node.state.getNeighborhood():
newNode = Node(State(neighbor), elem.node, labyrinth.h[neighbor])
elem.node.addChild(newNode)
fringe.addElem(ElemFringe(newNode))
i += 1
treeSearch()
| aledigirm3/AI | ricercaEuristica/Astar/treeSearchLabirinto/main.py | main.py | py | 876 | python | en | code | 0 | github-code | 36 |
15826790722 | # Standard imports
import copy
import pandas as pd
import logging
import jsonpickle as jpickle
import sklearn.cluster as sc
# Our imports
import emission.storage.timeseries.abstract_timeseries as esta
import emission.analysis.modelling.tour_model.get_scores as gs
import emission.analysis.modelling.tour_model.get_users as gu
import emission.analysis.modelling.tour_model.label_processing as lp
import emission.analysis.modelling.tour_model.evaluation_pipeline as ep
import emission.analysis.modelling.tour_model.load_predict as load
import emission.analysis.modelling.tour_model.data_preprocessing as preprocess
def find_best_split_and_parameters(user,test_data):
# find the best score
filename = "user_"+str(user)+".csv"
df = pd.read_csv(filename, index_col='split')
scores = df['scores'].tolist()
best_split_idx = scores.index(max(scores))
# use the position of best_score to find best_split
best_split = test_data[best_split_idx]
# use best_split_idx to find the best parameters
low = df.loc[best_split_idx, 'lower boundary']
dist_pct = df.loc[best_split_idx, 'distance percentage']
return best_split,best_split_idx,low,dist_pct
# def find_best_parameters(user,best_split_idx):
# tradeoff_filename = 'tradeoff_' + str(user)
# tradeoff_1user = load.loadModelStage(tradeoff_filename)
# best_parameters = tradeoff_1user[best_split_idx]
# return best_parameters
def save_models(obj_name,obj,user):
obj_capsule = jpickle.dumps(obj)
filename = obj_name + '_' + str(user)
with open(filename, "w") as fd:
fd.write(obj_capsule)
def main():
all_users = esta.TimeSeries.get_uuid_list()
radius = 100
for a in range(len(all_users)):
user = all_users[a]
trips = preprocess.read_data(user)
filter_trips = preprocess.filter_data(trips, radius)
# filter out users that don't have enough valid labeled trips
if not gu.valid_user(filter_trips, trips):
logging.debug("This user doesn't have enough valid trips for further analysis.")
continue
tune_idx, test_idx = preprocess.split_data(filter_trips)
test_data = preprocess.get_subdata(filter_trips, tune_idx)
# find the best split and parameters, and use them to build the model
best_split, best_split_idx, low, dist_pct = find_best_split_and_parameters(user,test_data)
# run the first round of clustering
sim, bins, bin_trips, filter_trips = ep.first_round(best_split, radius)
# It is possible that the user doesn't have common trips. Here we only build models for user that has common trips.
if len(bins) is not 0:
gs.compare_trip_orders(bins, bin_trips, filter_trips)
first_labels = ep.get_first_label(bins)
first_label_set = list(set(first_labels))
# second round of clustering
model_coll = {}
bin_loc_feat = {}
fitst_round_labels = {}
for fl in first_label_set:
# store second round trips data
second_round_trips = []
for index, first_label in enumerate(first_labels):
if first_label == fl:
second_round_trips.append(bin_trips[index])
x = preprocess.extract_features(second_round_trips)
# collect location features of the bin from the first round of clustering
# feat[0:4] are start/end coordinates
bin_loc_feat[str(fl)] = [feat[0:4] for feat in x]
# here we pass in features(x) from selected second round trips to build the model
method = 'single'
clusters = lp.get_second_labels(x, method, low, dist_pct)
n_clusters = len(set(clusters))
# build the model
kmeans = sc.KMeans(n_clusters=n_clusters, random_state=0).fit(x)
# collect all models, the key is the label from the 1st found
# e.g.{'0': KMeans(n_clusters=2, random_state=0)}
model_coll[str(fl)] = kmeans
# get labels from the 2nd round of clustering
second_labels = kmeans.labels_
first_label_obj = []
# save user labels for every cluster
second_label_set = list(set(second_labels))
sec_round_labels = {}
for sl in second_label_set:
sec_sel_trips = []
sec_label_obj = []
for idx, second_label in enumerate(second_labels):
if second_label == sl:
sec_sel_trips.append(second_round_trips[idx])
user_label_df = pd.DataFrame([trip['data']['user_input'] for trip in sec_sel_trips])
user_label_df = lp.map_labels(user_label_df)
# compute the sum of trips in this cluster
sum_trips = len(user_label_df)
# compute unique label sets and their probabilities in one cluster
# 'p' refers to probability
unique_labels = user_label_df.groupby(user_label_df.columns.tolist()).size().reset_index(name='uniqcount')
unique_labels['p'] = unique_labels.uniqcount / sum_trips
labels_columns = user_label_df.columns.to_list()
for i in range(len(unique_labels)):
one_set_labels = {}
# e.g. labels_only={'mode_confirm': 'pilot_ebike', 'purpose_confirm': 'work', 'replaced_mode': 'walk'}
labels_only = {column: unique_labels.iloc[i][column] for column in labels_columns}
one_set_labels["labels"] = labels_only
one_set_labels['p'] = unique_labels.iloc[i]['p']
# e.g. one_set_labels = {'labels': {'mode_confirm': 'walk', 'replaced_mode': 'walk', 'purpose_confirm': 'exercise'}, 'p': 1.0}
# in case append() method changes the dict, we use deepcopy here
labels_set = copy.deepcopy(one_set_labels)
sec_label_obj.append(labels_set)
# put user labels from the 2nd round into a dict, the key is the label from the 2nd round of clustering
#e.g. {'0': [{'labels': {'mode_confirm': 'bus', 'replaced_mode': 'bus', 'purpose_confirm': 'home'}, 'p': 1.0}]}
sec_round_labels[str(sl)] = sec_label_obj
sec_round_collect = copy.deepcopy(sec_round_labels)
# collect all user labels from the 2nd round, the key is to the label from the 1st round
# e.g. fitst_round_labels = {'0': [{'0': [{'labels': {'mode_confirm': 'drove_alone', 'purpose_confirm': 'work', 'replaced_mode': 'drove_alone'}, 'p': 1.0}]}]}
first_label_obj.append(sec_round_collect)
fitst_round_labels[str(fl)] = first_label_obj
# wrap up all labels
# e.g. all_labels = [{'first_label': [{'second_label': [{'labels': {'mode_confirm': 'shared_ride',
# 'purpose_confirm': 'home', 'replaced_mode': 'drove_alone'}, 'p': 1.0}]}]}]
all_labels = [fitst_round_labels]
# save all user labels
save_models('user_labels',all_labels,user)
# save models from the 2nd round of clustering
save_models('models',[model_coll],user)
# save location features of all bins
save_models('locations',[bin_loc_feat],user)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s',
level=logging.DEBUG)
main()
| e-mission/e-mission-server | emission/analysis/modelling/tour_model/build_save_model.py | build_save_model.py | py | 7,787 | python | en | code | 22 | github-code | 36 |
4833398186 | import re
import json
import numbers
import numpy as np
class Composition():
__atom_mass = {
# From NIST, "https://physics.nist.gov/cgi-bin/Compositions/stand_alone.pl?ele=&all=all&isotype=some"
'neutron': 1.00866491595,
'proton': 1.007276466621,
'electron': 0.000548579909065,
'H': 1.00782503223,
'C': 12,
'N': 14.00307400443,
'O': 15.99491461957,
'P': 30.97376199842,
'S': 31.9720711744
}
def __init__(self, class_input):
if type(class_input) == str:
if class_input.isupper():
formular_string = class_input
if formular_string[0] == '-':
self.composition = {i[0]: -int(i[1]) if i[1] else -int(1) for i in
re.findall("([A-Z][a-z]?)(\d*)", formular_string)}
else:
self.composition = {i[0]: int(i[1]) if i[1] else int(1) for i in
re.findall("([A-Z][a-z]?)(\d*)", formular_string)}
else:
self.composition = {class_input:1}
elif type(class_input) == dict:
self.composition = class_input
else:
raise TypeError
self.mass = self.mass_calculater()
def __add__(self, other):
result = {}
if isinstance(other, Composition):
for k in self.composition:
result.update({k: self.composition[k]})
for k in other.composition:
try:
result[k] += other.composition[k]
if result[k] == 0: result.pop(k)
except KeyError:
result.update({k: other.composition[k]})
return Composition(result)
else:
return NotImplemented
def __sub__(self, other):
result = {}
if isinstance(other, Composition):
for k in self.composition:
result.update({k: self.composition[k]})
for k in other.composition:
try:
result[k] -= other.composition[k]
if result[k] == 0: result.pop(k)
except KeyError:
result.update({k: -other.composition[k]})
return Composition(result)
else:
return NotImplemented
def __mul__(self, other):
if isinstance(other, numbers.Integral):
result = {}
for k in self.composition:
result.update({k: other * self.composition[k]})
return Composition(result)
else:
return NotImplemented
def __eq__(self, other):
if isinstance(other, Composition):
return self.composition==other.composition
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, Composition):
return self.mass>other.mass
else:
return NotImplemented
def __ge__(self, other):
if isinstance(other, Composition):
return self.mass>=other.mass
else:
return NotImplemented
def __lt__(self, other):
if isinstance(other, Composition):
return self.mass<other.mass
else:
return NotImplemented
def __le__(self, other):
if isinstance(other, Composition):
return self.mass<=other.mass
else:
return NotImplemented
def __hash__(self):
return hash(json.dumps(self.composition,sort_keys=True))
def __repr__(self):
return 'Composition('+str(self.composition)+')'
def __str__(self):
return 'Composition('+str(self.composition)+')'
def mass_calculater(self):
result = 0
for k in self.composition:
result += self.composition[k] * self.__atom_mass[k]
return result
def comp2formula(self):
seq=''
for k in self.composition:
seq+=k+str(self.composition[k])
return seq
@classmethod
def output_neutron(cls):
return cls.__atom_mass['neutron']
class Residual_seq():
__aa_residual_composition = {
'A': Composition('C3H5ON'),
'R': Composition('C6H12ON4'),
'N': Composition('C4H6O2N2'),
#'N(+.98)': Composition('C4H6O2N2') - Composition('NH3') + Composition('H2O'),
'D': Composition('C4H5O3N'),
#'C': Composition('C3H5ONS'),
'c': Composition('C3H5ONS') - Composition('H') + Composition('C2H4ON'),
'E': Composition('C5H7O3N'),
'Q': Composition('C5H8O2N2'),
#'Q(+.98)': Composition('C5H8O2N2') - Composition('NH3') + Composition('H2O'),
'G': Composition('C2H3ON'),
'H': Composition('C6H7ON3'),
'I': Composition('C6H11ON'),
#'L': Composition('C6H11ON'),
'K': Composition('C6H12ON2'),
'M': Composition('C5H9ONS'),
'm': Composition('C5H9ONS') + Composition('O'),
'F': Composition('C9H9ON'),
'P': Composition('C5H7ON'),
'S': Composition('C3H5O2N'),
'T': Composition('C4H7O2N'),
'W': Composition('C11H10ON2'),
'Y': Composition('C9H9O2N'),
'V': Composition('C5H9ON'),
}
def __init__(self, seqs):
seq = [i for i in seqs if not i.isspace()]
self.step_mass = []
tmp = self.__aa_residual_composition[seq[0]]
for i in seq[1:]:
self.step_mass.append(tmp.mass)
tmp += self.__aa_residual_composition[i]
self.seq = seq
self.composition = tmp
self.mass = tmp.mass
self.step_mass.append(self.mass)
self.step_mass = np.array(self.step_mass)
def __repr__(self):
return str(self.seq)
def __str__(self):
return str(self.seq)
@classmethod
def reset_aadict(cls,newAAdict):
cls.__aa_residual_composition = newAAdict
@classmethod
def remove_from_aadict(cls, keys):
for key in keys:
cls.__aa_residual_composition.pop(key)
@classmethod
def add_to_aadict(cls, additional_AAcomps):
for additional_AAcomp in additional_AAcomps:
cls.__aa_residual_composition.update(additional_AAcomp)
@classmethod
def output_aalist(cls):
return list(cls.__aa_residual_composition.keys())
@classmethod
def output_aadict(cls):
return cls.__aa_residual_composition
@classmethod
def seqs2composition_list(cls,seq):
return [cls.__aa_residual_composition[aa] for aa in seq]
@classmethod
def seqs2massmap(cls,seq):
return [cls.__aa_residual_composition[aa].mass for aa in seq]
class Ion():
# Ion offset design from http://www.matrixscience.com/help/fragmentation_help.html
# Part: Formulae to Calculate Fragment Ion m/z values
__ion_offset = {
'a': Composition('-CHO'),
'a-NH3': Composition('-CHO') + Composition('-NH3'),
'a-H2O': Composition('-CHO') + Composition('-H2O'),
'b': Composition('-H'),
'b-NH3': Composition('-H') + Composition('-NH3'),
'b-H2O': Composition('-H') + Composition('-H2O'),
#'c': Composition('NH2'),
#'x': Composition('CO') + Composition('-H'),
'y': Composition('H'),
'y-NH3': Composition('H') + Composition('-NH3'),
'y-H2O': Composition('H') + Composition('-H2O'),
#'z': Composition('-NH2')
}
__term_ion_offset = {
'a': Composition('-CHO') + Composition('H'),
'a-NH3': Composition('-CHO') + Composition('-NH3') + Composition('H'),
'a-H2O': Composition('-CHO') + Composition('-H2O') + Composition('H'),
'b': Composition('-H') + Composition('H'),
'b-NH3': Composition('-H') + Composition('-NH3') + Composition('H'),
'b-H2O': Composition('-H') + Composition('-H2O') + Composition('H'),
#'c': Composition('NH2') + Composition('H'),
#'x': Composition('CO') + Composition('-H') + Composition('OH'),
'y': Composition('H') + Composition('OH'),
'y-NH3': Composition('H') + Composition('-NH3') + Composition('OH'),
'y-H2O': Composition('H') + Composition('-H2O') + Composition('OH'),
#'z': Composition('-NH2') + Composition('OH')
}
@classmethod
def set_ionoffset_endterm(cls,nterm='H',cterm='OH'):
result = {}
for k in cls.__ion_offset:
if k[0] == 'a' or k[0] == 'b' or k[0] == 'c':
result.update({k: cls.__ion_offset[k] + Composition(nterm)})
elif k[0] == 'x' or k[0] == 'y' or k[0] == 'z':
result.update({k: cls.__ion_offset[k] + Composition(cterm)})
cls.__term_ion_offset = result
@classmethod
def peak2sequencemz(cls, peak_mz, ion, charge=None):
if charge==None:
charge = int(ion[0])
ion = ion[1:]
return (peak_mz-Composition('proton').mass)*charge-cls.__term_ion_offset[ion].mass
@classmethod
def peptide2ionmz(cls, seq, ion, charge):
ion_compsition = Residual_seq(seq).composition+cls.__term_ion_offset[ion]+Composition('proton')*charge
ion_mass = ion_compsition.mass/charge
return ion_mass
@classmethod
def sequencemz2ion(cls, seqmz, ion, charge=None):
if charge==None:
charge = int(ion[0])
ion = ion[1:]
return (seqmz+cls.__term_ion_offset[ion].mass)/charge+Composition('proton').mass
@classmethod
def precursorion2mass(cls, precursor_ion_moverz, precursor_ion_charge):
#Composition('H2O') 是n端和c端原子的总和,但是如果做TMT或者其他对N,C端修饰的需要进行修改
return precursor_ion_moverz*precursor_ion_charge-Composition('H2O').mass-precursor_ion_charge*Composition('proton').mass
@classmethod
def add_ion(cls,ion_comps):
for ion_comp in ion_comps:
cls.__ion_offset.update(ion_comp)
cls.set_ionoffset_endterm()
@classmethod
def remove_ion(cls, keys):
for key in keys:
cls.__ion_offset.pop(key)
cls.set_ionoffset_endterm()
@classmethod
def reset_ions(cls, ion_comps):
cls.__ion_offset = ion_comps
cls.set_ionoffset_endterm()
@classmethod
def output_ions(cls):
return list(cls.__ion_offset.keys())
| AmadeusloveIris/GraphNovo | genova/utils/BasicClass.py | BasicClass.py | py | 10,351 | python | en | code | 6 | github-code | 36 |
72053024743 | import numpy as np
from scipy.optimize import fsolve
import matplotlib.pyplot as plt
from mars import Mars
emissivity_dessert = 0.5
emissivity_PV = 0.5
absorptivity_dessert = 0.5
absorptivity_PV = 0.5
delta_time = 24 * 60 * 60
f = 0.15
cp = 1
T_Atmosphere = 200
rho = 1
num_days = 700
# formula not given. Just a placeholder. replace values in mars.py
x = 1e-5
Mars = Mars(absorptivity_PV, absorptivity_dessert, emissivity_dessert, emissivity_PV, delta_time, f,
cp, T_Atmosphere, rho, x)
# Just any simulated values. Replace by real ones. Numpy vector and each entry is average of the given day
L_in = np.cos(np.linspace(0, 2 * np.pi, num_days)) * 5 + 5
S_in = np.cos(np.linspace(0, 2 * np.pi, num_days)) * 5 + 5
r_H_dessert = np.ones(shape=num_days)
r_H_PV = r_H_dessert / 2
Temperature_init = np.ones(shape=(2 * num_days,)) * 273
root = fsolve(lambda Temperature: Mars.system(Temperature, num_days=num_days, l_in=L_in, s_in=S_in,
r_H_PV=r_H_PV, r_H_dessert=r_H_dessert),
Temperature_init)
plt.figure()
plt.plot(np.arange(0, num_days), root[0:num_days])
plt.plot(np.arange(0, num_days), root[num_days:2 * num_days])
plt.show()
| muedavid/Mars | main.py | main.py | py | 1,207 | python | en | code | 0 | github-code | 36 |
3680795220 | import numpy as np
import cv2
import math
import subprocess
import shutil
import os
if not os.path.exists('/home/martin/fotos'):
os.makedirs('/home/martin/fotos')
image_sudoku_original = cv2.imread('/home/martin/sudoku/sudoku_recognition/testing3.jpeg')
cv2.imshow("Imagen original",image_sudoku_original)
cv2.waitKey(0)
img = cv2.GaussianBlur(image_sudoku_original,(5,5),0)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
cv2.imshow("Imagen en escala de grises",gray)
cv2.waitKey(0)
thresh1 = cv2.adaptiveThreshold(gray,255,0,1,19,2)
cv2.imshow("Imagen binarizada",thresh1)
cv2.waitKey(0)
contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#image_sudoku_candidates = image_sudoku_original.copy()
size_rectangle_max = 0;
biggest = None
max_area = 0
for i in contours:
area = cv2.contourArea(i)
if area > 100:
peri = cv2.arcLength(i, True)
approximation = cv2.approxPolyDP(i, 0.02 * peri, True)
if area > max_area and len(approximation) == 4:
biggest = approximation
max_area = area
for i in range(len(approximation)):
cv2.line(image_sudoku_original,
(biggest[(i % 4)][0][0], biggest[(i % 4)][0][1]),
(biggest[((i + 1) % 4)][0][0], biggest[((i + 1) % 4)][0][1]),
(255, 0, 0), 2)
cv2.imshow("Contorno principal",image_sudoku_original)
cv2.waitKey(0)
def rectify(h):
h = h.reshape((4, 2))
hnew = np.zeros((4, 2), dtype=np.float32)
add = h.sum(1)
hnew[0] = h[np.argmin(add)]
hnew[2] = h[np.argmax(add)]
diff = np.diff(h, axis=1)
hnew[1] = h[np.argmin(diff)]
hnew[3] = h[np.argmax(diff)]
return hnew
approx = rectify(biggest)
h = np.array([[0, 0], [449, 0], [449, 449], [0, 449]], np.float32)
retval = cv2.getPerspectiveTransform(approx, h)
warp_gray = cv2.warpPerspective(gray, retval, (450, 450))
h, w = warp_gray.shape[:2]
cv2.imshow("Imagen con cambio perspectiva",warp_gray)
cv2.waitKey(0)
var2 = cv2.adaptiveThreshold(warp_gray,255,0,1,19,2)
#close = cv2.morphologyEx(var2,cv2.MORPH_CLOSE,kernel1)
gauss = cv2.GaussianBlur(warp_gray, (5, 5), 0)
thresh = cv2.adaptiveThreshold(gauss,255,0,1,19,2)
kernel = np.ones((5, 5), np.uint8)
erosion = cv2.erode(thresh, kernel, iterations=1)
dilation = cv2.dilate(thresh, kernel, iterations=1)
closing = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
#opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel)
#
# close = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel1)
# div = np.float32(warp_gray)/(close)
# res = np.uint8(cv2.normalize(div,div,0,255,cv2.NORM_MINMAX))
# res2 = cv2.cvtColor(res,cv2.COLOR_GRAY2BGR)
#img = cv2.GaussianBlur(var2,(5,5),0)
cv2.imshow("Imagen con Closing",closing)
cv2.waitKey(0)
# cv2.imshow("Imagen ultimo",thresh)
# cv2.waitKey(0)
contours, hierarchy = cv2.findContours(closing, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
def nroCuadrado(x, y,w,h):
width = 449
height = 449
x = x+(w/2)
y = y+(h/2)
widthxCuadrado = width / 9
heightxCuadrado = height / 9
for i in range(0, 9):
for j in range(0, 9):
proximoenAncho = (i + 1) * widthxCuadrado
actualenAncho = i * widthxCuadrado
proximoenAlto = (j + 1) * heightxCuadrado
actualenAlto = j * heightxCuadrado
if (x >= actualenAncho and x <= proximoenAncho and y >= actualenAlto and y <= proximoenAlto):
return i, j
sudoku_matrix = np.zeros((9,9))
squares = []
size_rectangle_max = 0;
biggest = None
max_area = 0
count = 0
area_total = 0
for i in contours:
area = cv2.contourArea(i)
if area > 100:
approximation = cv2.approxPolyDP(i, 0.04 * peri, True)
if len(approximation) == 4:
area = cv2.contourArea(approximation)
if area > 1000 and area <=3000:
squares.append(approximation)
area = cv2.contourArea(approximation)
area_total += area
count +=1
x, y, w, h = cv2.boundingRect(approximation)
#print("X: "+str(x)+" Y: "+str(y)+" W: "+str(w)+ " H: "+str(h))
cv2.rectangle(gauss, (y, x), (y + w, x + h), (0, 255, 0), 2)
new_image = gauss[x+7:x+h-7, y+7:y+w-7]
f, g = nroCuadrado(x, y,w,h)
var2 = cv2.adaptiveThreshold(new_image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \
cv2.THRESH_BINARY, 11, 2)
name = '/home/martin/fotos/var%s%d.jpg' % (f, g)
cv2.imwrite(name, var2)
non_black = cv2.countNonZero(var2)
total = var2.size
percent = (float(non_black)/float(total))*100
if percent > 90.0:
number = -1
else:
#number = predict.main(var2)
command = name
number = subprocess.check_output(['python', 'predict.py', command])
#var = 1
sudoku_matrix[f][g] = number
#print(number)
#cv2.imshow("Imagen perspectiva", var2)
#cv2.waitKey(0)
#name = '/home/lcorniglione/Documents/sudoku_recognition/fotos/var%s%d.jpg' %(f,g)
result = (area_total/count)
area_prom = math.sqrt(result)
print ("CANTIDAD RECONOCIDA:")
print (len(squares))
cant_squares = len(squares)
for i in range(0,9):
for j in range(0,9):
num = sudoku_matrix[i][j]
if num==(-1.0):
sudoku_matrix[i][j] = 0
if num==(0.0) and cant_squares<81:
im_number = gauss[i * (area_prom + 8):(i+1) * (area_prom + 8)][:,
j * (area_prom + 8):(j+1) * (area_prom + 8)]
var2 = cv2.adaptiveThreshold(im_number, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \
cv2.THRESH_BINARY, 11, 2)
non_black = cv2.countNonZero(var2)
total = var2.size
percent = (float(non_black) / float(total)) * 100
name = '/home/martin/fotos/var%s%d.jpg' % (i, j)
cv2.imwrite(name, var2)
if percent > 85.0:
number = -1
else:
command = name
number = subprocess.check_output(['python', 'predict.py', command])
sudoku_matrix[i][j] = number
print ("FINALIZADO")
print (sudoku_matrix)
cv2.imshow("Imagen cuadrados", gauss)
cv2.waitKey(0)
shutil.rmtree('/home/martin/fotos')
| msampietro/sudoku_recognition | sudoku.py | sudoku.py | py | 6,869 | python | en | code | 0 | github-code | 36 |
19826403866 | """
--- Day 20: Grove Positioning System ---
https://adventofcode.com/2022/day/20
"""
from aoc import *
def solve(rep, multiplier):
vals = [(n, v*multiplier) for n, v in enumerate(ints(puzzle_input(20, 2022, sample=False), '\n'))]
vals_copy = vals.copy()
for _ in range(rep):
for n, v in vals_copy:
i = vals.index((n, v))
vals.pop(i)
vals.insert((i+v) % len(vals), (n, v))
zero = next(i for i, (_, v) in enumerate(vals) if v == 0)
return sum(vals[(zero + i) % len(vals)][1] for i in [1000, 2000, 3000])
print(f'Part 1: {solve(1, 1)}')
print(f'Part 2: {solve(10, 811589153)}')
| BricksAndPieces/AdventOfCode | 2022/days/day20.py | day20.py | py | 647 | python | en | code | 1 | github-code | 36 |
37779455706 | import json
from datetime import datetime as dt
from datetime import date as dto
import copy
class ProcessJsonPortfolio:
def calculate_average_price(self, _dict):
"""Calculate dollar cost average per security.
Args:
_dict (:obj:`dict`): Portfolio loaded from json.
Return:
(:obj:`tuple` of :obj:`dict`, :obj:`list`, :obj:`list`): new portfolio object; list of securities; list of holdings
"""
securities = []
holdings = []
result = copy.deepcopy(_dict)
for key in _dict:
if key == 'test':
continue
total_count = 0
total_cost = 0
for i, val in enumerate(_dict[key]['Lots']): #{'date': ... 'shares': ... 'executed price'}
total_count += val['Shares']
total_cost += val['Executed Price'] * val['Shares']
result[key]['Lots'][i]['Processed'] = "True"
avg_cost = total_cost / total_count
result[key]['Total Holdings'] = total_count
result[key]['Total Cost'] = total_cost
result[key]['Average Cost'] = avg_cost
securities.append(key)
holdings.append(total_count)
return result, securities, holdings
def get_earliest_date(self, _dict):
"""Get the earliest unprocessed date.
Args:
_dict (:obj:`dict`): portfolio object.
"""
earliest_date = dto.today()
for key in _dict:
if key == 'test':
continue
for val in _dict[key]['Lots']:
if val.get('Processed') == "True":
continue
else:
cur_date = dt.strptime(val['Date'], '%Y-%m-%d').date()
if cur_date < earliest_date:
earliest_date = cur_date
return earliest_date
| lzy7071/portfolio_tools | portfolio_tools/util/process_json_portfolio.py | process_json_portfolio.py | py | 1,929 | python | en | code | 0 | github-code | 36 |
15207404869 | # 最短距离算法
import networkx as nx
debug = False
start_node = ('start', -1) # 初始节点
end_node = ('end', -1) # 终止节点
def fmt_edges(points, max_score=1.):
"""将节点得分列表格式化成距离矩阵
:param points list[[left, right, score]]
:return edges [(node_id1, node_id2, score)]
:return nodes [(left, right)]
"""
points_dict = dict()
for i, j, score in points:
# 默认值为虚拟节点
points_dict.setdefault(i, [(-1, max_score)]).append((j, max_score - score))
edges, last_nodes = [], [start_node]
nodes = [start_node]
for left, points_score in points_dict.items():
curr_nodes = []
for right, score in points_score:
node = (left, right)
curr_nodes.append(node)
edges += init_edges(last_nodes, node, score)
nodes += curr_nodes
last_nodes = curr_nodes
# 终止节点
nodes.append(end_node)
if debug:
print('edges:', [edge[:2] for edge in edges if edge[0] != start_node])
edges += init_edges(last_nodes, end_node, 0.)
node_keys = {val: key for key, val in enumerate(nodes)}
edges = [(node_keys[f_node], node_keys[t_node], score) for f_node, t_node, score in edges]
return edges, nodes
def init_edges(last_nodes, point, score):
""""""
edges = []
for last in last_nodes:
if last[1] >= 0 and point[1] >= 0 and last[1] >= point[1]:
continue
edges.append((last, point, score))
return edges
def shortest_distance(edges, nodes, target=None, source=0):
"""最短距离算法
:return path list[(left, right)]
"""
if target is None:
target = len(nodes) - 1
G = nx.DiGraph()
G.add_weighted_edges_from(edges)
path = nx.dijkstra_path(G, source=source, target=target)
if debug:
print('shortest path: ', path)
path = [p for p in path if p not in set((source, target))]
path = [nodes[p] for p in path]
return path
if __name__ == '__main__':
debug = True
def test(points):
edges, nodes = fmt_edges(points)
path = shortest_distance(edges, nodes)
print(path)
print("-------")
points = [[0, 0, 0.5]]
test(points)
points = [[0, 0, 0.7], [0, 1, 0.1], [1, 0, 0.2], [1, 1, 0.6]]
test(points)
points = [[0, 0, 0.7], [0, 1, 0.1], [1, 0, 0.8], [1, 1, 0.6]]
test(points)
points = [[0, 0, 0.7], [1, 0, 0.1], [1, 1, 0.2], [2, 1, 0.8]]
test(points)
| ibbd-dev/python-ibbd-algo | ibbd_algo/shortest_distance.py | shortest_distance.py | py | 2,510 | python | en | code | 1 | github-code | 36 |
13279982119 | # -*- coding: utf-8 -*-
# Scrapy settings for news project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'news'
SPIDER_MODULES = ['news.spiders']
NEWSPIDER_MODULE = 'news.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 UBrowser/6.2.3964.2 Safari/537.36'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'news.middlewares.NewsSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
# 'news.middlewares.NewsDownloaderMiddleware': 543,
'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': 110,
'news.middlewares.ProxyDownloaderMiddleware': 100
}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'news.pipelines.NewsPipeline': 300,
# 'crapy_redis.pipelines.RedisPipeline': 301
# 'crapy_redis.pipelines.RedisPipeline': 301
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
# 连接redis
REDIS_HOST = '10.0.0.102'
REDIS_PORT = 6379
REDIS_PARAMS = {'password': 'xiaojiexiaojie'}
REDIS_ENCODING = 'utf-8'
DUPEFILTER_CLASS = 'scrapy_redis.dupefilter.RFPDupeFilter'
# 启用scrapy_redis调度器
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.PriorityQueue' # 默认使用优先级队列(默认),其他:PriorityQueue(有序集合),FifoQueue(列表)、LifoQueue(列表)
SCHEDULER_QUEUE_KEY = '%(spider)s:requests' # 调度器中请求存放在redis中的key
SCHEDULER_SERIALIZER = "scrapy_redis.picklecompat" # 对保存到redis中的数据进行序列化,默认使用pickle
SCHEDULER_PERSIST = False # 是否在关闭时候保留原来的调度器和去重记录,True=保留,False=清空
SCHEDULER_FLUSH_ON_START = True # 是否在开始之前清空 调度器和去重记录,rue=清空,False=不清空
SCHEDULER_IDLE_BEFORE_CLOSE = 10 # 去调度器中获取数据时,如果为空,最多等待时间(最后没数据,未获取到)。
SCHEDULER_DUPEFILTER_KEY = '%(spider)s:dupefilter' # 去重规则,在redis中保存时对应的key
SCHEDULER_DUPEFILTER_CLASS = 'scrapy_redis.dupefilter.RFPDupeFilter' # 去重规则对应处理的类
REDIS_START_URLS_BATCH_SIZE = 1 # 表示爬虫其实有几个url
REDIS_START_URLS_AS_SET = False # true:把起始url放在redis集合中, false:把起始url放到redis的列表中
import os
BASE_DIRS = os.path.join(os.path.dirname(__file__))
DOWNLOADER_HTTPCLIENTFACTORY = "scrapy.core.downloader.webclient.ScrapyHTTPClientFactory"
DOWNLOADER_CLIENTCONTEXTFACTORY = "scrapy.core.downloader.contextfactory.ScrapyClientContextFactory"
| xiaojie0202/web_spider | news(Script-redis应用)/settings.py | settings.py | py | 5,093 | python | en | code | 4 | github-code | 36 |
31279947642 | from ray import serve
from typing import List, Dict
import json
import numpy as np
from scipy.optimize import linprog
@serve.deployment(num_replicas=1, ray_actor_options={"num_cpus": 1, "num_gpus": 0})
class LinearProgrammingService(object):
# def __init__(self):
def LinearProgramming(self, body: Dict):
print(123)
try:
event = body
MinOrMax = event['MinOrMax']
target = event['target']
A = event['A']
b = event['b']
bounds = event['bounds']
print("线性规划求解器:")
if MinOrMax == 'min':
pass
elif MinOrMax == 'max':
target = np.array(target) * (-1)
# minimize
res = linprog(target, A, b, bounds=bounds)
except Exception as e:
print(e)
print(e.__traceback__.tb_frame.f_globals["__file__"]) # 发生异常所在的文件
print(e.__traceback__.tb_lineno) # 发生异常所在的行数
else:
print("success")
return {
"OptimalValue": res.fun,
"OptimalSolution": res.x
}
| tju-hwh/Yet-Another-Serverless-Benchmark | solver/ray_stateful/so/service/linear_programming.py | linear_programming.py | py | 1,180 | python | en | code | 0 | github-code | 36 |
7615563038 | # -*- coding: utf-8 --
import re
import math
from multiprocessing import cpu_count, freeze_support
from multiprocessing.pool import Pool
import sys
from util import read_text_lines
from util import refine_line
from char2vec import load_model
B = 1
I = 0
'''
1. word2vec 모델 불러오기(from char2vec)
'''
def is_hangul(ch):
codepoint = ord(ch) if isinstance(ch, str) else int(ch)
return codepoint >= 0xac00 and codepoint <= 0xd7a3
def is_ascii(ch):
codepoint = ord(ch) if isinstance(ch, str) else int(ch)
return codepoint >= 0x20 and codepoint <= 0x7e
def ch2num(ch):
codepoint = ord(ch) if isinstance(ch, str) else ch
if is_hangul(ch):
return codepoint - ord('가') + 256
elif is_ascii(ch):
return codepoint
else:
return None
def get_features(line_ch, i):
X = [0 for i in range(6)]
if i > 2:
X[0] = ch2num(line_ch[i - 2])
if i > 1:
X[1] = ch2num(line_ch[i - 1])
X[2] = ch2num(line_ch[i])
if i < len(line_ch) - 1:
X[3] = ch2num(line_ch[i + 1])
if i < len(line_ch) - 2:
X[4] = ch2num(line_ch[i + 2])
# 문장의 시작 위치 기록
if i == 0:
X[5] = 1
else:
X[5] = 0
return X
def raw2corpus(raw_sentence):
taggeds = []
text = re.sub(r'(\ )+', ' ', raw_sentence).strip()
for i in range(len(text)):
if i == 0:
taggeds.append('{}/B'.format(text[i]))
elif text[i] != ' ':
successor = text[i - 1]
if successor == ' ':
taggeds.append('{}/B'.format(text[i]))
else:
taggeds.append('{}/I'.format(text[i]))
return ' '.join(taggeds)
def corpus2sent(line):
sent = []
tokens = line.split(' ')
for token in tokens:
if '/' not in token:
continue
word, tag = token.split('/')
sent.append((word, tag))
return sent
# 이건 사용 안함
char2vec_model = load_model(r'./char2vec_Etri_d30.txt')
ngram2vec_models = []
for n in range(1, 4):
#ngram2vec_models.append(load_model(r'./char2vec_Etri_d30_{}gram.txt'.format(n)))
#ngram2vec_models.append(load_model(r'./char2vec_ted_d40_{}gram.txt'.format(n)))
ngram2vec_models.append(load_model(r'./char2vec_MDM001_d40_{}gram.txt'.format(n)))
def char2vec(ch):
n = len(ch)
return [float(f) for f in ngram2vec_models[n-1][ch]]
# 조화 평균
def hmean(values):
top = float(len(values))
bottom = 0.0
for v in values:
top *= v
bottom += v
return top / bottom
# 산술 평균
def amean(values):
s = 0.0
for v in values:
s += v
return v / len(values)
# 기하 평균
def gmean(values):
m = 1
for v in values:
m *= v
r = m ** (1.0/float(len(values)))
return r
def index2feature(line, i, offsets):
'''
해당 offset에 위치한 글자의 word embedding 벡터를 가져온다.
offset이 여러 개 있으면 중간값이나 평균(산술, 조화 등)값으로 합쳐서 실험해보기
* 중간값: 다른 조합인데 같은 걸로 취급될 수 있는 경우가 있으므로 빼기
* 실험1 --> 산술평균: (a + b + c) / 3
* 실험2 --> 조화평균: 3*a*b*c / (a + b + c) --> 모두 양수일때만 의미있는 결과 나옴
* 실험3 --> 기하평균: sqrt3(a * b * c)
※ 기하평균 --> 곱해야 하는 값의 평균 구할때 사용(예: 은행 n년간 평균 이자 계산 등)
'''
vec = []
for off in offsets:
if i + off < 0 or i + off >= len(line):
return [0.0 for i in range(50)]
ch, _ = line[i + off]
vec.append(char2vec_model[ch])
result = []
for i in range(len(vec[0])):
v = []
for j in range(len(vec)):
v.append(float(vec[j][i]))
result.append(amean(v))
return result
# 다른 논문 참고한 자질에서 2개이상 글자에 해당하는 임베딩은 각 글자의
# 임베딩 정보를 평균낸걸로 만든 자질
def generate_feature(args):
line = args[0]
i = args[1]
feature = []
feature += index2feature(line, i, [-1])
feature += index2feature(line, i, [0])
feature += index2feature(line, i, [1])
feature += index2feature(line, i, [-2, -1])
feature += index2feature(line, i, [-1, 0])
feature += index2feature(line, i, [0, 1])
feature += index2feature(line, i, [-2, -1, 0])
feature += index2feature(line, i, [-1, 0, 1])
feature += index2feature(line, i, [0, 1, 2])
return feature
# 앞 2글자부터 뒤2글자까지 각 한글자씩의 임베딩 정보를 자질로 사용한 것
def generate_feature2(args):
line = args[0]
i = args[1]
feature = []
if i >= 2:
ch, _ = line[i - 2]
feature += char2vec(ch)
else:
feature += [0.0 for i in range(30)]
if i >= 1:
ch, _ = line[i - 1]
feature += char2vec(ch)
else:
feature += [0.0 for i in range(30)]
ch, _ = line[i]
feature += char2vec(ch)
if i < len(line) - 1:
ch, _ = line[i + 1]
feature += char2vec(ch)
else:
feature += [0.0 for i in range(30)]
if i < len(line) - 2:
ch, _ = line[i + 2]
feature += char2vec(ch)
else:
feature += [0.0 for i in range(30)]
return feature
# 타 논문 참고한 자질에서 여러 글자에 해당하는 임베딩 정보를 추가한 자질
def generate_feature3(args):
line = ''.join([l[0] for l in args[0]])
i = args[1]
dim = 40
feature = []
# 1-gram
feature += char2vec(line[i-1]) if i >= 1 else [0.0 for a in range(dim)]
feature += char2vec(line[i])
feature += char2vec(line[i+1]) if i < len(line)-1 else [0.0 for a in range(dim)]
# 2-gram
feature += char2vec(line[i-2:i]) if i >= 2 else [0.0 for a in range(dim)]
feature += char2vec(line[i-1:i+1]) if i >= 1 else [0.0 for a in range(dim)]
feature += char2vec(line[i:i+2]) if i < len(line)-1 else [0.0 for a in range(dim)]
# 3-gram
feature += char2vec(line[i-2:i+1]) if i >= 2 else [0.0 for a in range(dim)]
feature += char2vec(line[i-1:i+2]) if i >= 1 and i < len(line)-1 else [0.0 for a in range(dim)]
feature += char2vec(line[i:i+3]) if i < len(line)-2 else [0.0 for a in range(dim)]
return feature
def generate_feature4(args):
line = ''.join([l[0] for l in args[0]])
i = args[1]
dim = 40
feature = []
# 1-gram
feature += char2vec(line[i])
feature += char2vec(line[i-1]) if i >= 1 else [0.0 for a in range(dim)]
feature += char2vec(line[i+1]) if i < len(line)-1 else [0.0 for a in range(dim)]
# 2-gram
feature += char2vec(line[i-2:i]) if i >= 2 else [0.0 for a in range(dim)]
feature += char2vec(line[i-1:i+1]) if i >= 1 else [0.0 for a in range(dim)]
feature += char2vec(line[i:i+2]) if i < len(line)-1 else [0.0 for a in range(dim)]
feature += char2vec(line[i+1:i+3]) if i < len(line)-2 else [0.0 for a in range(dim)]
# 3-gram
feature += char2vec(line[i-2:i+1]) if i >= 2 else [0.0 for a in range(dim)]
feature += char2vec(line[i-1:i+2]) if i >= 1 and i < len(line)-1 else [0.0 for a in range(dim)]
feature += char2vec(line[i:i+3]) if i < len(line)-2 else [0.0 for a in range(dim)]
feature += char2vec(line[i+1:i+4]) if i < len(line)-3 else [0.0 for a in range(dim)]
return feature
def make_data(pool, fname):
lines = read_text_lines(fname)
lines = (refine_line(line) for line in lines)
corpus = (raw2corpus(line) for line in lines)
sent = (corpus2sent(line) for line in corpus)
X = []
Y = []
for line in sent:
X += pool.map(generate_feature4, [(line, i) for i in range(len(line))])
Y += [(1 if y == 'B' else 0) for _, y in line]
return X, Y
def make_data_divided(pool, fname):
lines = read_text_lines(fname)
lines = (refine_line(line) for line in lines)
corpus = (raw2corpus(line) for line in lines)
sent = (corpus2sent(line) for line in corpus)
line_cnt = 0
X = []
Y = []
for line in sent:
line_cnt += 1
x = pool.map(generate_feature4, [(line, i) for i in range(len(line))])
X += norm_many(pool, x)
Y += ((1 if y == 'B' else 0) for _, y in line)
if line_cnt == 100000:
yield X, Y
line_cnt = 0
X = []
Y = []
yield X, Y
# todo: 여러 글자의 워드벡터를 더한 것을 고려해서 수정하기
def norm(arr):
return [round(x*1000, 0) + 10000 for x in arr]
def norm_many(pool, X):
return list(pool.map(norm, X))
def main():
for i in range(1, 4):
char2vec_model = load_model(r'./char2vec_ted_d40_{}gram.txt'.format(i))
min_v = 0.0
max_v = 0.0
for k in char2vec_model.wv.vocab.keys():
vec = char2vec_model.wv[k]
for v in vec:
if v < min_v:
min_v = v
elif v > max_v:
max_v = v
print('#{}: min={}, max={}'.format(i, min_v, max_v))
#sys.exit(1)
pool = Pool(processes=cpu_count())
X, Y = make_data(pool, r'./ted_7_ErasePunc_FullKorean__train.txt')
print(X[:5])
print(Y[:5])
if __name__ == '__main__':
freeze_support()
main()
| kimwansu/autospacing_tf | make_data.py | make_data.py | py | 9,337 | python | en | code | 0 | github-code | 36 |
18200360642 | import json
import os
import random
import shutil
from predictor import get_predictor
from yolox.tracking_utils.timer import Timer
import cv2
import numpy as np
def get_gt_by_frame(bbox_file: str):
gtByFrames = {}
# convert to List[bboxes, List[int]]
with open(bbox_file) as f:
annot = json.load(f)
labels = annot['labels']
infos = annot['info']
frames_map = [int(u.split('_')[-1].split('.')[0]) for u in infos['url']]
for pIdx, player in enumerate(labels):
frames = player['data']['frames']
#group by frame
for frame in frames:
frame_idx = frames_map[frame['frame']]
if frame_idx not in gtByFrames:
gtByFrames[frame_idx] = [[], []]
gtByFrames[frame_idx][0].append(frame['points'] + [1])
gtByFrames[frame_idx][1].append(pIdx)
for k, v in gtByFrames.items():
gtByFrames[k] = (np.array(v[0]), np.array(v[1]))
return gtByFrames
def ensure_folder(path: str):
try:
os.makedirs(path)
except: pass
def remove_folder(path:str):
try:
shutil.rmtree(path)
except: pass
if __name__ == '__main__':
gt_labels = os.listdir('./input/bboxes')
detector = get_predictor()
OUTPUT_ROOT = './output'
remove_folder(OUTPUT_ROOT)
ensure_folder(OUTPUT_ROOT)
splits = ['train', 'val', 'test']
for split in splits:
ensure_folder(OUTPUT_ROOT + f'/{split}/negative')
ensure_folder(OUTPUT_ROOT + f'/{split}/positive')
for gt_label_file in gt_labels:
gt_bboxes = get_gt_by_frame(f'./input/bboxes/{gt_label_file}')
video_id = gt_label_file.split('.')[0]
print(video_id)
if not os.path.exists(f'./input/images/{video_id}'):
continue
# extract positive patches
for frame_idx, (player_bboxes, player_ids) in gt_bboxes.items():
img = cv2.imread(f'./input/images/{video_id}/{frame_idx}.jpg')
img_masked = img.copy()
for bbox in player_bboxes:
x1, y1, x2, y2, _ = bbox.astype(int)
img_masked[y1:y2, x1:x2] = 0
cv2.imwrite(OUTPUT_ROOT + '/masked.png', img_masked)
outputs, img_info = detector.inference(img_masked[:, :, :3], Timer())
output_results = outputs[0]
if output_results is None: break
imgH, imgW = img_info['height'], img_info['width']
# human_bboxes = []
output_results = output_results.cpu().numpy()
scores = output_results[:, 4] * output_results[:, 5]
human_bboxes = output_results[:, :4] # x1y1x2y2
remain_indx = scores > 0.6
scores = scores[remain_indx]
human_bboxes = human_bboxes[remain_indx]
img_size = (800, 1440)
scale = min(img_size[0] / float(imgH), img_size[1] / float(imgW))
human_bboxes /= scale
# negative samples
negative_samples = [(bIdx, b) for bIdx, b in enumerate(human_bboxes) if b.min() > 0]
negative_samples = random.sample(negative_samples, min(20, len(negative_samples)))
random.shuffle(negative_samples)
train_split_idx = int(len(negative_samples) * 0.7)
val_split_idx = int(len(negative_samples) * 0.8)
for idx, (bIdx, bbox) in enumerate(negative_samples):
split = 'train' if idx < train_split_idx else ('val' if idx < val_split_idx else 'test')
x1, y1, x2, y2 = bbox.astype(int)
cv2.imwrite(f'{OUTPUT_ROOT}/{split}/negative/{video_id}_{frame_idx}_{bIdx}.png', img[y1:y2, x1:x2])
positive_samples = list(zip(player_ids, player_bboxes))
random.shuffle(positive_samples)
train_split_idx = int(len(positive_samples) * 0.7)
val_split_idx = int(len(positive_samples) * 0.8)
for idx, (player_id, player_bboxe) in enumerate(positive_samples):
split = 'train' if idx < train_split_idx else ('val' if idx < val_split_idx else 'test')
x1, y1, x2, y2, _ = player_bboxe.astype(int)
x1 = max(x1, 0)
try:
cv2.imwrite(f'{OUTPUT_ROOT}/{split}/positive/{video_id}_{frame_idx}_{player_id}.png', img[y1:y2, x1:x2])
except:
print(f'{OUTPUT_ROOT}/{split}/positive/{video_id}_{frame_idx}_{player_id}.png', x1, y1, x2, y2)
| chenzhutian/nba-Player-classifier | generate_samples.py | generate_samples.py | py | 4,488 | python | en | code | 0 | github-code | 36 |
19022803935 | from aiogram.types import (
InlineKeyboardMarkup,
InlineKeyboardButton,
ReplyKeyboardMarkup,
KeyboardButton,
)
from main import admins_id
from utils.db_api.schemas.table_db import session, Contest
kb = ReplyKeyboardMarkup(resize_keyboard=True)
kb.add(KeyboardButton("Добавить конкурс")).add(KeyboardButton("Отмена"))
kb_auth = ReplyKeyboardMarkup(resize_keyboard=True)
kb_auth.add(KeyboardButton("Авторизация по телефону", request_contact=True))
def genmarkup(data: list) -> InlineKeyboardMarkup:
markup = InlineKeyboardMarkup()
markup.row_width = 1
for i in data:
markup.add(InlineKeyboardButton(i[0], callback_data=f"con_{i[0]}"))
return markup
def to_pay(user_id: int, contest: str, skip: bool = False):
markup = InlineKeyboardMarkup()
markup.row_width = 1
contest_query = session.query(Contest).filter(Contest.name == contest).first()
if not (contest_query.winner):
if str(user_id) in admins_id:
markup.add(
InlineKeyboardButton(
"Выбрать победителя и закончить конкурс",
callback_data=f"win_{contest}",
)
)
else:
if not skip:
markup.add(
InlineKeyboardButton(
"Внести плату",
callback_data=f"pay_{contest}"
)
)
return markup
| A-Sergey/TelegramBot_Contest | keyboards/buttons.py | buttons.py | py | 1,574 | python | en | code | 0 | github-code | 36 |
10018384107 | from list import student
import pickle
f=open("satya.db","wb")
rows=int(input("enter rows how many rows you want : "))
for i in range(rows):
print("----------------------------------")
print("enter "+str(i+1)+"student details")
print("-----------------------------------")
id=int(input("enter student number : "))
name=input("enter student number :")
s=student()
s.studentdetails(id,name)
pickle.dump(s,f)
print(i+1,"student details saved")
print("-----------------------")
s.dispaly()
print("successfully complete")
f.close()
| prasadnaidu1/django | Adv python practice/demo.py | demo.py | py | 571 | python | en | code | 0 | github-code | 36 |
30039556459 | import math
import numpy as np
from sympy import*
import matplotlib.pyplot as plt
class Solver:
def __init__(self, f, t0, y0, h, nsteps, inital_points):
self.f = f
self.t0 = t0
self.y0 = y0
self.h = h
self.nsteps = nsteps
self.inital_points = inital_points;
self.coef_ab = [
[1],
[1],
[3.0/2.0, 1.0/2.0],
[23.0/12.0, -4.0/3.0, 5.0/12.0],
[55.0/24.0, -59.0/24.0, 37.0/24.0, -3.0/8.0],
[1901.0/720.0, -1387.0/360.0, 109.0/30.0, -637.0/360.0, 251.0/720.0],
[4277.0/1440.0, -2641.0/480.0, 4991.0/720.0, -3649.0/720.0, 959.0/480.0, -95.0/288.0],
[198721.0/60480.0, 18367.0/2520.0, 235183.0/20160.0, 10754.0/945.0, 135713.0/20160.0, 5603.0/2520.0, 19087.0/60480.0],
[16083.0/4480.0, 1152169.0/120960.0, 242653.0/13440.0, 296053.0/13440.0, 2102243.0/120960.0, 115747.0/13440.0, 32863.0/13440.0, 5257.0/17280.0]
]
self.coef_am = [
[1],
[1.0/2.0, 1.0/2.0],
[5.0/12.0, 2.0/3.0, -1.0/12.0],
[3.0/8.0, 19.0/24.0, -5.0/24.0, 1.0/24.0],
[251.0/720.0, 323.0/360.0, -11.0/30.0, 53.0/360.0, 19.0/720.0],
[95.0/288.0, 1427.0/1440.0, -133.0/240.0, 241.0/720.0, -173.0/1440.0, 3.0/160.0],
[19087.0/60480.0, 2713.0/2520.0, -15487.0/20160.0, 586.0/945.0, -6737.0/20160.0, 263.0/2520.0, -863.0/60480.0],
[5257.0/17280.0, 139849.0/120960.0, -4511.0/4480.0, 123133.0/120960.0, -88547.0/120960.0, 1537.0/4480.0, -11351.0/120960.0, 275.0/24192.0]
]
self.coef_inv = [
[1],
[1, 1],
[2.0/3.0, 4.0/3.0, -1.0/3.0],
[6.0/11.0, 18.0/11.0, -9.0/11.0, 2.0/11.0],
[12.0/25.0, 48.0/25.0, -36.0/25.0, 16.0/25.0, -3.0/25.0],
[60.0/137.0, 300.0/137.0, -300.0/137.0, 200.0/137.0, -75.0/137.0, 12.0/137.0],
[60.0/147.0, 360.0/147.0, -450.0/147.0, 400.0/147.0, -225.0/147.0, 72.0/147.0, -10.0/147.0]
]
def get_ab(self, ans, idx, order):
value = ans[idx-1][1]
for i in range(1, order+1):
value += (self.h)*(self.coef_ab[order][i-1])*self.f(ans[idx-i][0], ans[idx-i][1])
return value
def euler(self):
ans = []
ans.append([self.t0, self.y0])
t, y = self.t0, self.y0
for i in range(1, self.nsteps+1):
y = y + self.h*self.f(t, y)
t = t + self.h
ans.append([t, y])
return ans
def inverse_euler(self):
ans = []
ans.append([self.t0, self.y0])
t, y = self.t0, self.y0
for i in range(1, self.nsteps+1):
k = y + self.h*self.f(t, y)
y = y + self.h*self.f(t + self.h, k)
t = t + self.h
ans.append([t, y])
return ans
def improved_euler(self):
ans = []
ans.append([self.t0, self.y0])
t, y = self.t0, self.y0
for i in range(1, self.nsteps+1):
k = y + self.h*self.f(t, y)
y = y + 0.5*self.h*(self.f(t + self.h, k) + self.f(t, y))
t = t + self.h
ans.append([t, y])
return ans
def runge_kutta(self):
ans = []
t, y = self.t0, self.y0
ans.append([t, y])
for i in range(1, self.nsteps+1):
k1 = f(t, y)
k2 = f(t + 0.5*h, y + 0.5*h*k1)
k3 = f(t + 0.5*h, y + 0.5*h*k2)
k4 = f(t + h, y + h*k3)
y = y + h*(k1 + 2*k2 + 2*k3 + k4)/6
t = t + h
ans.append([t, y])
return ans
def adam_bashforth_by_method(self, order, method):
if method == 'euler':
ans = self.euler()
elif method == 'inverse euler':
ans = self.inverse_euler()
elif method == 'improved euler':
ans = self.improved_euler()
elif method == 'runge kutta':
ans = self.runge_kutta()
elif method == 'list':
ans = self.inital_points
h, f = self.h, self.f
for i in range(order, self.nsteps+1):
if len(ans) == i:
ans.append([0, 0])
ans[i][1] = self.get_ab(ans, i, order)
ans[i][0] = ans[i-1][0] + h
return ans
def get_am(self, ans, idx, order):
value = ans[idx-1][1]
ans[idx][1] = self.get_ab(ans, idx, order)
ans[idx][0] = ans[idx-1][0] + self.h
for i in range(0, order+1):
value += self.h*self.coef_am[order][i]*self.f(ans[idx-i][0], ans[idx-i][1])
return value
def adam_multon_by_method(self, order, method):
if method == 'euler':
ans = self.euler()
elif method == 'inverse euler':
ans = self.inverse_euler()
elif method == 'improved euler':
ans = self.improved_euler()
elif method == 'runge kutta':
ans = self.runge_kutta()
elif method == 'list':
ans = self.inital_points
h, f = self.h, self.f
for i in range(order, self.nsteps+1):
if len(ans) == i:
ans.append([0, 0])
ans[i][1] = self.get_am(ans, i, order)
ans[i][0] = ans[i-1][0] + h
return ans
def get_inv(self, ans, idx, order):
ans[idx][1] = self.get_ab(ans, idx, order)
ans[idx][0] = ans[idx-1][0] + self.h
value = self.coef_inv[order][0]*self.h*self.f(ans[idx][0], ans[idx][1])
for i in range (1, order+1):
value += self.coef_inv[order][i]*ans[idx-i][1]
return value
def backward_diff(self, order, method):
if method == 'euler':
ans = self.euler()
elif method == 'inverse euler':
ans = self.inverse_euler()
elif method == 'improved euler':
ans = self.improved_euler()
elif method == 'runge kutta':
ans = self.runge_kutta()
elif method == 'list':
ans = self.inital_points
h, f = self.h, self.f
for i in range(order, self.nsteps+1):
if len(ans) == i:
ans.append([0, 0])
ans[i][1] = self.get_inv(ans, i, order)
ans[i][0] = ans[i-1][0] + h
return ans
#Main part of the code
#We wish to find an approximate solution to the equation dy/dt = f(t, y)
f = open("in.txt")
for line in f:
entrada = line.split()
method = entrada[0]
ini_pts = []
if method == 'adam_bashforth' or method == 'adam_multon' or method == 'formula_inversa':
order = int(entrada[-1])
expr = sympify(entrada[-2])
t, y = symbols("t y")
f = lambdify((t, y), expr, "numpy")
nsteps = int(entrada[-3])
h = float(entrada[-4])
t0, y0 = float(entrada[-5]), 0
for i in range(1, 1 + order):
ini_pts.append([t0 + (i-1)*h, float(entrada[i])])
else:
y0, t0 = float(entrada[1]), float(entrada[2])
h = float(entrada[3])
nsteps = int(entrada[4])
expr = sympify(entrada[5])
t, y = symbols("t y")
f = lambdify((t, y), expr, "numpy")
solver = Solver(f, t0, y0, h, nsteps, ini_pts)
pts = []
if method == "euler":
pts = solver.euler()
print("Metodo de Euler")
elif method == "euler_inverso":
pts = solver.inverse_euler()
print("Metodo de Euler Inverso")
elif method == "euler_aprimorado":
pts = solver.improved_euler()
print("Metodo de Euler Aprimorado")
elif method == "runge_kutta":
pts = solver.runge_kutta()
print("Metodo de Runge-Kutta")
elif method == "adam_bashforth_by_euler":
order = int(entrada[6])
print("Metodo de Adam-Bashforth por Euler")
pts = solver.adam_bashforth_by_method(order, 'euler')
elif method == 'adam_bashforth_by_euler_inverso':
order = int(entrada[6])
print("Metodo de Adam-Bashforth por Euler Inverso")
pts = solver.adam_bashforth_by_method(order, 'inverse euler')
elif method == 'adam_bashforth_by_euler_aprimorado':
order = int(entrada[6])
print("Metodo de Adam-Bashforth por Euler Aprimorado")
pts = solver.adam_bashforth_by_method(order, 'improved euler')
elif method == 'adam_bashforth_by_runge_kutta':
order = int(entrada[6])
print("Metodo de Adam-Bashforth por Runge Kutta")
pts = solver.adam_bashforth_by_method(order, 'runge kutta')
elif method == 'adam_bashforth':
print("Metodo de Adam-Bashforth")
pts = solver.adam_bashforth_by_method(order, 'list')
elif method == 'adam_multon':
print("Metodo de Adam-Multon")
pts = solver.adam_multon_by_method(order-1, 'list')
elif method == 'adam_multon_by_euler':
order = int(entrada[6])
print("Metodo de Adam-Multon por Euler")
pts = solver.adam_multon_by_method(order-1, 'euler')
elif method == 'adam_multon_by_euler_inverso':
order = int(entrada[6])
print("Metodo de Adam-Multon por Euler Inverso")
pts = solver.adam_multon_by_method(order-1, 'inverse euler')
elif method == 'adam_multon_by_euler_aprimorado':
order = int(entrada[6])
print("Metodo de Adam-Multon por Euler Aprimorado")
pts = solver.adam_multon_by_method(order-1, 'improved euler')
elif method == 'adam_multon_by_runge_kutta':
order = int(entrada[6])
print("Metodo de Adam-Multon por Runge Kutta")
pts = solver.adam_multon_by_method(order-1, 'runge kutta')
elif method == 'formula_inversa':
print("Metodo Formula Inversa de Diferenciacao")
pts = solver.backward_diff(order-1, 'list')
elif method == 'formula_inversa_by_euler':
order = int(entrada[6])
print("Metodo Formula Inversa de Diferenciacao por Euler")
pts = solver.backward_diff(order-1, 'euler')
elif method == 'formula_inversa_by_euler_inverso':
order = int(entrada[6])
print("Metodo Formula Inversa de Diferenciacao por Euler Inverso")
pts = solver.backward_diff(order-1, 'inverse euler')
elif method == 'formula_inversa_by_euler_aprimorado':
order = int(entrada[6])
print("Metodo Formula Inversa de Diferenciacao por Euler Aprimorado")
pts = solver.backward_diff(order-1, 'improved euler')
elif method == 'formula_inversa_by_runge_kutta':
order = int(entrada[6])
print("Metodo Formula Inversa de Diferenciacao por Runge Kutta")
pts = solver.backward_diff(order-1, 'runge kutta')
print("y(%.2f) = %.2f" %(pts[0][0], pts[0][1]))
print("h = %.2f" %h)
i = 0
for [x, y] in pts:
print("%d %.10lf" %(i, y))
i += 1
######################### ploting the solution #############################
####### comment the folowing lines to not plot solution ######
toplot = np.array(pts)
plt.plot(toplot[:, 0], toplot[:, 1], ls = '-', color = 'black', linewidth = 1)
plt.show()
#################################################################################
print("\n")
| vserraa/Numerical-Methods | solver.py | solver.py | py | 9,570 | python | en | code | 0 | github-code | 36 |
16835981819 | import pytest
from backend.utils.assertions import assert_equals, assert_true
from backend.utils.helper import Helper
from front.data_for_tests.calender_data_for_tests import DataForTests
from front.pages.page import CalendarPage, CalendarConfiguration
@pytest.mark.usefixtures("setup", "test_config")
class TestCalender:
@pytest.fixture(scope="class")
def calendar_page(self, setup, test_config):
driver = setup
calendar_page = CalendarPage(driver)
calendar_page.open_url(Helper.get_config_value_by_name(test_config, ["calender", "url"]))
return calendar_page
@pytest.fixture(scope="class")
def switch_to_infinite_scroll_and_month_view(self, setup, calendar_page):
driver = setup
calendar_config = CalendarConfiguration(driver)
calendar_config.enable_infinite_scroll()
calendar_page.switch_view("Month")
@pytest.mark.parametrize("test_data", DataForTests.switch_to_infinite_scroll_and_month_view())
def test_switch_to_infinite_scroll_and_month_view(self, setup, test_data, calendar_page,
switch_to_infinite_scroll_and_month_view):
driver = setup
calendar_config = CalendarConfiguration(driver)
calendar_configuration = calendar_config.get_calendar_configuration()
assert_equals(calendar_configuration, test_data["configuration"])
calendar_page.verify_requested_view_checked(test_data["view_type"])
def test_create_events_check_element_count_increased(self, calendar_page,
switch_to_infinite_scroll_and_month_view):
event_resource_id = calendar_page.create_event()
assert_true(calendar_page.verify_event_was_created(event_resource_id),
"Error: After creating an event, Event not found!")
event_resource_id = calendar_page.create_event()
assert_true(calendar_page.verify_event_was_created(event_resource_id),
"Error: After creating an event, Event not found!")
def test_create_event_and_go_ahead_one_month_and_check_dom_decrease(self, setup, calendar_page,
switch_to_infinite_scroll_and_month_view):
event_resource_id = calendar_page.create_event()
calendar_page.navigation_forward(1)
assert_true(not calendar_page.verify_event_was_created(event_resource_id),
"Error: After creating an event and going one month forward, Event found!")
@pytest.mark.skip("This test have bug, so it will failed")
def test_create_event_change_month_and_check_event_still_exist(self, setup, calendar_page,
switch_to_infinite_scroll_and_month_view):
event_resource_id = calendar_page.create_event()
calendar_page.navigation_forward(1)
assert_true(not calendar_page.verify_event_was_created(event_resource_id),
"Error: After creating an event and going one month forward, Event found!")
calendar_page.navigation_backward(1)
assert_true(calendar_page.verify_event_was_created(event_resource_id),
"Error: After creating an event, change month and go back.Event not found!") | RivkaTestGit/MoonActive | front/tests/test_calender.py | test_calender.py | py | 3,347 | python | en | code | 0 | github-code | 36 |
74834157225 | import numpy as np
import random
import copy
# utils
from extra.utils import trans_vector, get_cards_small_extend, calculate_score
class RunfastGameEnv():
def __init__(self, cards=[], position=0, next_player=0, pattern=0):
self.position = position
self.next_player = next_player
self.cards = cards
self.pattern = pattern
self.set_dict()
self.cards_used = np.zeros(13)
self.boom_success = 0
self.status = np.array([0])
self.current_pattern = 0
def get_state(self):
original_vec = np.array([4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 1])
cards_used = self.cards_used
cards_inhand = self.cards
cards_inhand_small = trans_vector(cards_inhand)
next_player = self.get_next_player()
next_next_player = next_player.get_next_player()
next_cards_used = next_player.cards_used
next_next_cards_used = next_next_player.cards_used
status = self.status
cards_left = original_vec - cards_used - next_cards_used - next_next_cards_used
state = np.concatenate((cards_inhand_small, cards_used, next_cards_used,
next_next_cards_used, cards_left, status), axis=0)
return state
def set_dict(self):
number2card = {}
card2number = {}
j = 0
for i in range(3, 11):
number2card.update({j: "♠" + str(i)})
card2number.update({"♠" + str(i): j})
j = j + 1
number2card.update({j: "♥" + str(i)})
card2number.update({"♠" + str(i): j})
j = j + 1
number2card.update({j: "♣" + str(i)})
card2number.update({"♠" + str(i): j})
j = j + 1
number2card.update({j: "♦" + str(i)})
card2number.update({"♠" + str(i): j})
j = j + 1
number2card.update({j: "♠J"})
card2number.update({"♠J": j})
j = j + 1
number2card.update({j: "♥J"})
card2number.update({"♥J": j})
j = j + 1
number2card.update({j: "♣J"})
card2number.update({"♣J": j})
j = j + 1
number2card.update({j: "♦J"})
card2number.update({"♦J": j})
j = j + 1
number2card.update({j: "♠Q"})
card2number.update({"♠Q": j})
j = j + 1
number2card.update({j: "♥Q"})
card2number.update({"♥Q": j})
j = j + 1
number2card.update({j: "♣Q"})
card2number.update({"♣Q": j})
j = j + 1
number2card.update({j: "♦Q"})
card2number.update({"♦Q": j})
j = j + 1
number2card.update({j: "♠K"})
card2number.update({"♠K": j})
j = j + 1
number2card.update({j: "♥K"})
card2number.update({"♥K": j})
j = j + 1
number2card.update({j: "♣K"})
card2number.update({"♣K": j})
j = j + 1
number2card.update({j: "♦K"})
card2number.update({"♦K": j})
j = j + 1
number2card.update({j: "♠A"})
card2number.update({"♠A": j})
j = j + 1
number2card.update({j: "♥A"})
card2number.update({"♥A": j})
j = j + 1
number2card.update({j: "♣A"})
card2number.update({"♣A": j})
j = j + 1
number2card.update({j: "♦2"})
card2number.update({"♦2": j})
self.number2card = number2card
self.card2number = card2number
def cards2vec(self):
card_vec = [0] * 13
for i in self.cards:
if i < 47:
index = int(i / 4)
card_vec[index] += 1
else:
card_vec[12] = 1
return card_vec
def set_cards(self, cards):
self.cards = cards
def get_cards(self):
return self.cards
def get_next_player(self):
return self.next_player
def get_position(self):
return self.position
def show_cards(self):
print(self.position + "'s cards:")
cards = []
for i in self.cards:
cards.append(self.number2card[i])
print(cards)
def update_position(self, new_position):
self.position = new_position
def update_next(self, new_next):
self.next_player = new_next
def check_long(self, lenth):
cards_vec = self.cards2vec()
current_len = 0
for i in range(len(cards_vec) - lenth):
current_len = 0
for j in range(lenth):
if cards_vec[i + j] >= 1:
current_len += 1
if current_len == lenth:
break
else:
current_len = 0
break
if current_len == lenth:
return True
return False
# 检查连对子、飞机等情况
def check_plane(self, lenth, width):
cards_vec = self.cards2vec()
current_len = 0
for i in range(len(cards_vec) - lenth):
current_len = 0
for j in range(lenth):
if cards_vec[i + j] >= width:
current_len += 1
if current_len == lenth:
break
else:
current_len = 0
break
if current_len == lenth:
return True
return False
def search_pattern(self):
pattern_to_playcards = []
# pattern 0 代表出单牌
pattern_to_playcards.append(0)
# pattern 1 代表出对子
cards_vec = self.cards2vec()
for i in cards_vec:
if i >= 2:
pattern_to_playcards.append(1)
break
# pattern 2或32 代表出三张
for i in cards_vec:
if i >= 3:
# 如果不想让三带二先出,则注释下面这个添加32的操作
# pattern_to_playcards.append(32)
pattern_to_playcards.append(2)
break
# pattern 3 代表炸弹
for i in cards_vec:
if i == 4:
pattern_to_playcards.append(3)
break
# pattern 4 代表五张牌的顺子
lenth = 5
if self.check_long(lenth):
pattern_to_playcards.append(4)
# pattern 5代表6张牌的顺子
lenth = 6
if self.check_long(lenth):
pattern_to_playcards.append(5)
# pattern 6 代表7张牌的顺子
lenth = 7
if self.check_long(lenth):
pattern_to_playcards.append(6)
# pattern 7 代表8张牌的顺子
lenth = 8
if self.check_long(lenth):
pattern_to_playcards.append(7)
# pattern 8 代表9张牌的顺子
lenth = 9
if self.check_long(lenth):
pattern_to_playcards.append(8)
# pattern 9 代表10张牌的顺子
lenth = 10
if self.check_long(lenth):
pattern_to_playcards.append(9)
# pattern 10 代11张牌的顺子
lenth = 11
if self.check_long(lenth):
pattern_to_playcards.append(10)
# pattern 11 代表12张牌的顺子
lenth = 12
if self.check_long(lenth):
pattern_to_playcards.append(11)
# pattern 12 代表2连对
lenth = 2
width = 2
if self.check_plane(lenth, width):
pattern_to_playcards.append(12)
# pattern 13 代表3连对
lenth = 3
width = 2
if self.check_plane(lenth, width):
pattern_to_playcards.append(13)
# pattern 14 代表4连对
lenth = 4
width = 2
if self.check_plane(lenth, width):
pattern_to_playcards.append(14)
# pattern 15 代表5连对
lenth = 5
width = 2
if self.check_plane(lenth, width):
pattern_to_playcards.append(15)
# pattern 16 代表6连对
lenth = 6
width = 2
if self.check_plane(lenth, width):
pattern_to_playcards.append(16)
# pattern 17 代表7连对
lenth = 7
width = 2
if self.check_plane(lenth, width):
pattern_to_playcards.append(17)
# pattern 18 代表8连对
lenth = 8
width = 2
if self.check_plane(lenth, width):
pattern_to_playcards.append(18)
# pattern 19 代表二连飞机
lenth = 2
width = 3
if self.check_plane(lenth, width):
pattern_to_playcards.append(19)
# pattern 20 代表三连飞机
lenth = 3
width = 3
if self.check_plane(lenth, width):
pattern_to_playcards.append(20)
# pattern 21 代表四连飞机
lenth = 4
width = 3
if self.check_plane(lenth, width):
pattern_to_playcards.append(21)
return pattern_to_playcards
# 得到连对和飞机等出法
def get_air_solution(self, the_lenth=2, the_width=2, biggest=[-1, -1, -1, -1, -1]):
lenth = the_lenth
width = the_width
current_len = 0
cash_card = []
cards_vec = self.cards2vec()
way_to_playcards = []
if biggest[0] == -1:
for i in range(len(cards_vec) - lenth):
current_len = 0
for j in range(lenth):
if cards_vec[i + j] >= width:
current_len += 1
if current_len == lenth:
break
else:
current_len = 0
break
if current_len == lenth:
base = i
count = 0
cash_card = []
for element in self.cards:
if int(element / 4) == base:
cash_card.append(element)
count += 1
if count == width:
base += 1
count = 0
if len(cash_card) == lenth * width:
way_to_playcards.append(cash_card)
break
else:
base_line = int(biggest[0] / 4) + 1
if base_line >= len(cards_vec) - lenth:
pass
else:
for i in range(base_line, len(cards_vec) - lenth):
current_len = 0
for j in range(lenth):
if cards_vec[i + j] >= width:
current_len += 1
if current_len == lenth:
break
else:
current_len = 0
break
if current_len == lenth:
base = i
cash_card = []
count = 0
for element in self.cards:
if int(element / 4) == base:
cash_card.append(element)
count += 1
if count == width:
base += 1
count = 0
if len(cash_card) == lenth * width:
way_to_playcards.append(cash_card)
break
if width == 2:
return way_to_playcards
elif width == 3:
if len(way_to_playcards) > 0:
if biggest[0] == -1:
if len(self.cards) <= 5 * lenth:
choice_cards = list(set(self.cards) - set(way_to_playcards[0]))
if len(choice_cards) > 0:
choice_cards.sort(reverse=False)
for element in choice_cards:
way_to_playcards[0].append(element)
return way_to_playcards[0]
else:
return way_to_playcards[0]
else:
return_result = []
for index in range(len(way_to_playcards)):
choice_cards = list(set(self.cards) - set(way_to_playcards[index]))
for i_i in range(10):
appendage = random.sample(choice_cards, lenth * 2)
appendage.sort(reverse=False)
a = copy.deepcopy(way_to_playcards[index])
for element in appendage:
a.append(element)
return_result.append(a)
return return_result
else:
if len(self.cards) < 5 * lenth:
return []
elif len(self.cards) == 5 * lenth:
choice_cards = list(set(self.cards) - set(way_to_playcards[0]))
if len(choice_cards) > 0:
choice_cards.sort(reverse=False)
for element in choice_cards:
way_to_playcards[0].append(element)
return way_to_playcards[0]
else:
return_result = []
for index in range(len(way_to_playcards)):
choice_cards = list(set(self.cards) - set(way_to_playcards[index]))
for i_i in range(10):
appendage = random.sample(choice_cards, lenth * 2)
appendage.sort(reverse=False)
a = copy.deepcopy(way_to_playcards[index])
for element in appendage:
a.append(element)
return_result.append(a)
return return_result
else:
return []
# 得到顺子的各种出法
def get_solution(self, the_lenth=5, biggest=[-1, -1, -1, -1, -1]):
lenth = the_lenth
current_len = 0
cash_card = []
cards_vec = self.cards2vec()
way_to_playcards = []
if biggest[0] == -1:
for i in range(len(cards_vec) - lenth):
current_len = 0
for j in range(lenth):
if cards_vec[i + j] >= 1:
current_len += 1
if current_len == lenth:
break
else:
current_len = 0
break
if current_len == lenth:
base = i
cash_card = []
for element in self.cards:
if int(element / 4) == base:
cash_card.append(element)
base += 1
if len(cash_card) == lenth:
way_to_playcards.append(cash_card)
break
else:
base_line = int(biggest[0] / 4) + 1
if base_line >= len(cards_vec) - lenth:
pass
else:
for i in range(base_line, len(cards_vec) - lenth):
current_len = 0
for j in range(lenth):
if cards_vec[i + j] >= 1:
current_len += 1
if current_len == lenth:
break
else:
current_len = 0
break
if current_len == lenth:
base = i
cash_card = []
for element in self.cards:
if int(element / 4) == base:
cash_card.append(element)
base += 1
if len(cash_card) == lenth:
way_to_playcards.append(cash_card)
break
return way_to_playcards
def update_pattern(self, new_pattern):
self.pattern = new_pattern
def search_play_methods(self, pattern=0, biggest=[-1, -1, -1, -1, -1]):
way_to_playcards = []
# 出单牌
if pattern == 0:
if len(self.get_next_player().get_cards()) == 1:
big = biggest[0]
if big == -1:
base = self.cards[0]
elif big > 43 and big < 47:
base = 47
else:
base = (int(big / 4) + 1) * 4
if self.cards[-1] >= base:
way_to_playcards.append([self.cards[-1]])
else:
big = biggest[0]
all_cards = self.cards
if big == -1:
base = self.cards[0]
elif big > 43 and big < 47:
base = 47
else:
base = (int(big / 4) + 1) * 4
for i in all_cards:
list_a = []
if i >= base:
list_a.append(i)
way_to_playcards.append(list_a)
if base > 43 and base < 47:
base = 47
else:
base = (int(base / 4) + 1) * 4
elif pattern == 1:
if biggest[0] == -1:
base = 0
else:
base = int(biggest[0] / 4) + 1
if len(self.cards) <= 1:
pass
else:
for i in range(len(self.cards) - 1):
if int(self.cards[i] / 4) >= base and int(self.cards[i] / 4) == int(self.cards[i + 1] / 4) and \
self.cards[i + 1] != 47:
way_to_playcards.append([self.cards[i], self.cards[i + 1]])
base = int(self.cards[i] / 4) + 1
# 出三张模式最高,之前是2,改为了32
elif pattern == 2:
# elif pattern==32 or pattern==2:
if biggest[0] == -1:
base = 0
else:
base = int(biggest[0] / 4) + 1
if len(self.cards) <= 2:
pass
elif biggest[0] == -1 and len(self.cards) >= 3:
for i in range(len(self.cards) - 2):
if int(self.cards[i] / 4) >= base and int(self.cards[i] / 4) == int(self.cards[i + 2] / 4) and \
self.cards[i + 2] != 47:
base_list = [self.cards[i], self.cards[i + 1], self.cards[i + 2]]
# choice_cards = list(set(self.cards) - set(base_list))
if len(self.cards) == 3:
way_to_playcards.append([self.cards[i], self.cards[i + 1], self.cards[i + 2]])
elif len(self.cards) == 4:
choice_cards = list(set(self.cards) - set(base_list))
way_to_playcards.append(
[self.cards[i], self.cards[i + 1], self.cards[i + 2], choice_cards[0]])
elif len(self.cards) == 5:
choice_cards = list(set(self.cards) - set(base_list))
if choice_cards[0] < choice_cards[1]:
way_to_playcards.append(
[self.cards[i], self.cards[i + 1], self.cards[i + 2], choice_cards[0],
choice_cards[1]])
else:
way_to_playcards.append(
[self.cards[i], self.cards[i + 1], self.cards[i + 2], choice_cards[1],
choice_cards[0]])
else:
choice_cards = list(set(self.cards) - set(base_list))
for i_i in range(10):
random.shuffle(choice_cards)
if choice_cards[0] < choice_cards[1]:
way_to_playcards.append(
[self.cards[i], self.cards[i + 1], self.cards[i + 2], choice_cards[0],
choice_cards[1]])
else:
way_to_playcards.append(
[self.cards[i], self.cards[i + 1], self.cards[i + 2], choice_cards[1],
choice_cards[0]])
# way_to_playcards.append([self.cards[i], self.cards[i+1], self.cards[i+2] ])
base = int(self.cards[i] / 4) + 1
elif biggest[0] != -1 and len(self.cards) < 5:
pass
elif biggest[0] != -1 and len(self.cards) >= 5:
base = int(biggest[0] / 4) + 1
for i in range(len(self.cards) - 2):
if int(self.cards[i] / 4) >= base and int(self.cards[i] / 4) == int(self.cards[i + 2] / 4) and \
self.cards[i + 2] != 47:
base_list = [self.cards[i], self.cards[i + 1], self.cards[i + 2]]
choice_cards = list(set(self.cards) - set(base_list))
for i_i in range(10):
random.shuffle(choice_cards)
if choice_cards[0] < choice_cards[1]:
way_to_playcards.append(
[self.cards[i], self.cards[i + 1], self.cards[i + 2], choice_cards[0],
choice_cards[1]])
else:
way_to_playcards.append(
[self.cards[i], self.cards[i + 1], self.cards[i + 2], choice_cards[1],
choice_cards[0]])
if len(self.cards) == 5:
break
# way_to_playcards.append([self.cards[i], self.cards[i+1], self.cards[i+2] ])
base = int(self.cards[i] / 4) + 1
# 是不是四张牌
elif pattern == 3:
# 是不是炸弹
if biggest[0] == -1:
Cards2vec = self.cards2vec()
for i in range(len(Cards2vec)):
if Cards2vec[i] == 4:
a = [i * 4 + j for j in range(4)]
way_to_playcards.append(a)
else:
base = int(biggest[0] / 4)
Cards2vec = self.cards2vec()
for i in range(len(Cards2vec)):
if i > base and i < 11:
if Cards2vec[i] == 4:
a = [i * 4 + j for j in range(4)]
way_to_playcards.append(a)
# 是不是顺子
elif pattern == 4:
lenth = 5
result = self.get_solution(lenth, biggest)
if len(result) > 0:
for element in result:
way_to_playcards.append(element)
# 六张牌的顺子
elif pattern == 5:
lenth = 6
result = self.get_solution(lenth, biggest)
if len(result) > 0:
for element in result:
way_to_playcards.append(element)
# 7张牌的顺子
elif pattern == 6:
lenth = 7
result = self.get_solution(lenth, biggest)
if len(result) > 0:
for element in result:
way_to_playcards.append(element)
# 8张牌的顺子
elif pattern == 7:
lenth = 8
result = self.get_solution(lenth, biggest)
if len(result) > 0:
for element in result:
way_to_playcards.append(element)
# 9张牌的顺子
elif pattern == 8:
lenth = 9
result = self.get_solution(lenth, biggest)
if len(result) > 0:
for element in result:
way_to_playcards.append(element)
# 10张牌的顺子
elif pattern == 9:
lenth = 10
result = self.get_solution(lenth, biggest)
if len(result) > 0:
for element in result:
way_to_playcards.append(element)
# 11张牌的顺子
elif pattern == 10:
lenth = 11
result = self.get_solution(lenth, biggest)
if len(result) > 0:
for element in result:
way_to_playcards.append(element)
# 12张牌的顺子
elif pattern == 11:
lenth = 12
result = self.get_solution(lenth, biggest)
if len(result) > 0:
for element in result:
way_to_playcards.append(element)
# 如果出的是连对
elif pattern > 11 and pattern < 19:
the_width = 2
if biggest[0] == -1:
the_lenth = pattern - 10
else:
the_lenth = int(len(biggest) / the_width)
result = self.get_air_solution(the_lenth, the_width, biggest)
if len(result) > 0:
for element in result:
way_to_playcards.append(element)
elif pattern >= 19:
the_width = 3
if biggest[0] == -1:
the_lenth = pattern - 17
else:
the_lenth = int(len(biggest) / the_width)
result = self.get_air_solution(the_lenth, the_width, biggest)
if len(result) > 0:
for element in result:
way_to_playcards.append(element)
if pattern != 3 and biggest[0] != -1:
Cards2vec = self.cards2vec()
for i in range(len(Cards2vec)):
if Cards2vec[i] == 4:
a = [i * 4 + j for j in range(4)]
way_to_playcards.append(a)
return way_to_playcards
def play_cards(self, cards_toplay, test=False):
if len(cards_toplay) == 0:
if test:
print(self.position + '要不起!')
return False
cards_toplay_zu = ''
for i in cards_toplay:
cards_toplay_zu = cards_toplay_zu + self.number2card[i] + ','
if test:
print(self.position + ' 出' + cards_toplay_zu)
self.cards = list(set(self.cards) - set(cards_toplay))
if len(self.cards) == 0:
if test:
print(self.position + " wins.")
return True
self.cards.sort(reverse=False)
return False
| zawnpn/RL_RunFast | GameEnv/RunFastGame.py | RunFastGame.py | py | 28,130 | python | en | code | 6 | github-code | 36 |
41240226723 | # Importar Librerias
import pandas as pd
import json
# Opening JSON file
f = open('orderbooks_05jul21.json')
print(f)
# Returns JSON object as a dictionary
orderbooks_data = json.load(f)
ob_data = orderbooks_data['bitfinex']
# Drop Keys with none values
ob_data = {i_key: i_value for i_key,i_value in ob_data.items() if i_value is not None}
# Convert to DataFrame and rearange columns
ob_data = {i_ob: pd.DataFrame(ob_data[i_ob])[['bid_size', 'bid', 'ask', 'ask_size']]
if ob_data[i_ob] is not None else None for i_ob in list(ob_data.keys())}
| if722399/Laboratorio-1-MySt- | dataa.py | dataa.py | py | 578 | python | en | code | 0 | github-code | 36 |
43375661698 | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the fibonacciModified function below.
def fibonacciModified(t1, t2, n):
seq=[t1,t2]
if n<=2:
print(seq[n-1])
else:
for i in range(n-2):
seq.append(seq[-2]+seq[-1]**2)
return seq[-1]
return
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t1T2n = input().split()
t1 = int(t1T2n[0])
t2 = int(t1T2n[1])
n = int(t1T2n[2])
result = fibonacciModified(t1, t2, n)
fptr.write(str(result) + '\n')
fptr.close() | emfreak/Competitive-Programming | Hackerrank/Algorithms/Dynamic Programming/fibonacci_modified.py | fibonacci_modified.py | py | 598 | python | en | code | 0 | github-code | 36 |
496184917 | # -*- coding: utf-8 -*-
import time
import click
from click.testing import CliRunner
from dagster_aws.cli.term import Spinner, Term
def test_term():
def term_helper(term_cmd, prefix, exit_code=0):
@click.command()
def fn():
term_cmd('foo bar')
runner = CliRunner()
result = runner.invoke(fn)
assert result.exit_code == exit_code
assert result.output == prefix + u'foo bar\n'
expected = [
(Term.error, Term.ERROR_PREFIX),
(Term.info, Term.INFO_PREFIX),
(Term.success, Term.SUCCESS_PREFIX),
(Term.waiting, Term.WAITING_PREFIX),
(Term.warning, Term.WARNING_PREFIX),
]
for term_cmd, prefix in expected:
term_helper(term_cmd, prefix)
term_helper(Term.fatal, Term.FATAL_PREFIX, exit_code=1)
def test_spinner(capsys):
with Spinner():
time.sleep(0.5)
captured = capsys.readouterr()
assert captured.out.encode('unicode-escape').startswith(
b'\\u280b\\x08\\u2819\\x08\\u2839\\x08\\u2838\\x08'
)
| helloworld/continuous-dagster | deploy/dagster_modules/libraries/dagster-aws/dagster_aws_tests/cli_tests/test_term.py | test_term.py | py | 1,054 | python | en | code | 2 | github-code | 36 |
35147097028 | import nltk
from functools import lru_cache
from nltk.corpus import stopwords
from nltk.stem.snowball import EnglishStemmer
import re
from bs4 import BeautifulSoup
class Preprocessor:
def __init__(self):
# Stemming is the most time-consuming part of the indexing process, we attach a lru_cache to the stemmer
# which will store upto 100000 stemmed forms and reuse them when possible instead of applying the
# stemming algorithm.
self.stem = lru_cache(maxsize=100000)(EnglishStemmer().stem)
self.tokenize = nltk.tokenize.WhitespaceTokenizer().tokenize
def __call__(self, text):
text = re.sub(r'[\.\?\!\,\:\;\"]', ' ', text)
# text = re.sub('[-]', ' ', text)
text = re.sub(r'<.?p>', '', text)
# text = BeautifulSoup(text, "lxml").text
tokens = nltk.tokenize.word_tokenize(text)
tokens = [token.lower() for token in tokens if
token.isalpha()] # removing punctuations from tokens and converting to lower case
stop_words = stopwords.words('english')
tokens = [token for token in tokens if not token in stop_words]
tokens = [self.stem(token) for token in tokens]
return tokens
| sidsachan/movie_sentiment | preprocessor.py | preprocessor.py | py | 1,221 | python | en | code | 0 | github-code | 36 |
32782552053 | import logging
import cabby
from events.stix import parse_stix_package, STIXPackage
def collect_indicator_packages(configuration: dict) -> STIXPackage:
for repository in configuration['repositories']:
yield from poll_repository(repository)
def poll_repository(repository: dict) -> list:
logging.debug("Connecting to %s", repository['name'])
client = cabby.create_client(**repository['client'])
collections = (c for c in client.get_collections()
if c.name not in repository.get('exclusions', ()))
for collection in collections:
yield from poll_collection(client, collection.name)
logging.info("Repository %s exhausted", repository['name'])
def poll_collection(client: cabby.Client11, collection_name: str) -> list:
packages = 0
indicators = 0
logging.debug("Polling from collection %s", collection_name)
for block in client.poll(collection_name=collection_name):
package = parse_stix_package(block.content)
if package is not None:
packages += 1
indicators += len(package.indicators)
yield package
logging.info("Collection %s: Packages %d - IOCs %d",
collection_name, packages, indicators)
| noxdafox/iocep | events/taxii.py | taxii.py | py | 1,252 | python | en | code | 0 | github-code | 36 |
17305255074 | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 7 17:46:55 2021
@author: Administrator
"""
import SimpleITK as sitk
import numpy as np
import os
import cv2
from shutil import copyfile
import random
num=379
lists=['train_002_0000.nii.gz','train_019_0000.nii.gz','train_069_0000.nii.gz','train_101_0000.nii.gz','train_114_0000.nii.gz','train_127_0000.nii.gz','train_150_0000.nii.gz','train_134_0000.nii.gz','train_174_0000.nii.gz','train_195_0000.nii.gz']
for name in lists:
num=num+1
copyfile(os.path.join(r'.\data\Raw\TrainingImg',name),os.path.join(r'.\data\Raw\TrainingImg','train_'+str(num)+'_0000.nii.gz'))
copyfile(os.path.join(r'.\data\Raw\TrainingMask',name.replace('_0000','')),os.path.join(r'.\data\Raw\TrainingMask','train_'+str(num)+'.nii.gz'))
num=num+1
img=sitk.ReadImage(os.path.join(r'.\data\Raw\TrainingImg',name))
mask=sitk.ReadImage(os.path.join(r'.\data\Raw\TrainingMask',name.replace('_0000','')))
imgarr=sitk.GetArrayFromImage(img)
maskarr=sitk.GetArrayFromImage(mask)
imgarr1=imgarr.copy()
imgarr1[maskarr!=2]=0
imgarr2=imgarr1.copy()
imgarr2[imgarr2>50]=0
for i in range(imgarr2.shape[0]):
for j in range(imgarr2.shape[1]):
for k in range(imgarr2.shape[2]):
if imgarr2[i,j,k]!=0:
imgarr2[i,j,k]=imgarr2[i,j,k]+random.randint(100,150)
imgarr1[imgarr1<=50]=0
imgarr1=imgarr1+imgarr2
imgarr[maskarr==2]=0
imgarr=imgarr+imgarr1
saveimg=sitk.GetImageFromArray(imgarr)
saveimg.SetSpacing(img.GetSpacing())
saveimg.SetDirection(img.GetDirection())
saveimg.SetOrigin(img.GetOrigin())
sitk.WriteImage(saveimg,os.path.join(r'.\data\Raw\TrainingImg','train_'+str(num)+'_0000.nii.gz'))
sitk.WriteImage(mask,os.path.join(r'.\data\Raw\TrainingMask','train_'+str(num)+'.nii.gz')) | xyndameinv/FLARE21 | process0.py | process0.py | py | 1,837 | python | en | code | 2 | github-code | 36 |
39723292841 | #Find list of all sub_breed breed name
import requests
def get_json_dog_output_dict(url):
r = requests.get(url)
output = r.json()
return output
def get_breed_sub_breed_full_name():
dog_output = get_json_dog_output_dict(url = "https://dog.ceo/api/breeds/list/all")
dog_breed_output = dog_output["message"]
dog_full_name = dog_breed_output.items()
for breed, sub_breeds in dog_full_name:
# print(f"breed:{breed}; type:{sub_breed}")
# print(f"{sub_breed} {breed}")
if sub_breeds:
for sub_breed in sub_breeds:
print(f"{sub_breed}-{breed}")
else:
print(f"{breed}")
if __name__ == "__main__":
output = get_json_dog_output_dict(url = "https://dog.ceo/api/breeds/list/all")
dog_keys_fullname = get_breed_sub_breed_full_name()
#print(dog_keys_fullname)
| Swetha-Vootkuri/PythonSessions | dogs_api/breed_sub_breed_list.py | breed_sub_breed_list.py | py | 859 | python | en | code | 0 | github-code | 36 |
72312960103 | from typing import Dict, List, Optional, Tuple
import numpy as np
import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
from mmcv.utils import ConfigDict
from mmdet.core import bbox2roi
from mmdet.models.builder import HEADS
from mmfewshot.detection.models.roi_heads.meta_rcnn_roi_head import MetaRCNNRoIHead
class VAE(nn.Module):
def __init__(self,
in_channels: int,
latent_dim: int,
hidden_dim: int) -> None:
super(VAE, self).__init__()
self.latent_dim = latent_dim
self.encoder = nn.Sequential(
nn.Linear(in_channels, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.LeakyReLU()
)
self.fc_mu = nn.Linear(hidden_dim, latent_dim)
self.fc_var = nn.Linear(hidden_dim, latent_dim)
self.decoder_input = nn.Linear(latent_dim, hidden_dim)
self.decoder = nn.Sequential(
nn.Linear(hidden_dim, in_channels),
nn.BatchNorm1d(in_channels),
nn.Sigmoid()
)
def encode(self, input: Tensor) -> List[Tensor]:
result = self.encoder(input)
mu = self.fc_mu(result)
log_var = self.fc_var(result)
return [mu, log_var]
def decode(self, z: Tensor) -> Tensor:
z = self.decoder_input(z)
z_out = self.decoder(z)
return z_out
def reparameterize(self, mu: Tensor, logvar: Tensor) -> Tensor:
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu, std + mu
def forward(self, input: Tensor, **kwargs) -> List[Tensor]:
mu, log_var = self.encode(input)
z, z_inv = self.reparameterize(mu, log_var)
z_out = self.decode(z)
return [z_out, z_inv, input, mu, log_var]
def loss_function(self, input, rec, mu, log_var, kld_weight=0.00025) -> dict:
recons_loss = F.mse_loss(rec, input)
kld_loss = torch.mean(-0.5 * torch.sum(1 +
log_var - mu ** 2 - log_var.exp(), dim=1), dim=0)
loss = recons_loss + kld_weight * kld_loss
return {'loss_vae': loss}
@HEADS.register_module()
class VFARoIHead(MetaRCNNRoIHead):
def __init__(self, vae_dim=2048, *args, **kargs) -> None:
super().__init__(*args, **kargs)
self.vae = VAE(vae_dim, vae_dim, vae_dim)
def _bbox_forward_train(self, query_feats: List[Tensor],
support_feats: List[Tensor],
sampling_results: object,
query_img_metas: List[Dict],
query_gt_bboxes: List[Tensor],
query_gt_labels: List[Tensor],
support_gt_labels: List[Tensor]) -> Dict:
"""Forward function and calculate loss for box head in training.
Args:
query_feats (list[Tensor]): List of query features, each item
with shape (N, C, H, W).
support_feats (list[Tensor]): List of support features, each item
with shape (N, C, H, W).
sampling_results (obj:`SamplingResult`): Sampling results.
query_img_metas (list[dict]): List of query image info dict where
each dict has: 'img_shape', 'scale_factor', 'flip', and may
also contain 'filename', 'ori_shape', 'pad_shape', and
'img_norm_cfg'. For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
query_gt_bboxes (list[Tensor]): Ground truth bboxes for each query
image with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y]
format.
query_gt_labels (list[Tensor]): Class indices corresponding to
each box of query images.
support_gt_labels (list[Tensor]): Class indices corresponding to
each box of support images.
Returns:
dict: Predicted results and losses.
"""
query_rois = bbox2roi([res.bboxes for res in sampling_results])
query_roi_feats = self.extract_query_roi_feat(query_feats, query_rois)
support_feat = self.extract_support_feats(support_feats)[0]
support_feat_rec, support_feat_inv, _, mu, log_var = self.vae(
support_feat)
bbox_targets = self.bbox_head.get_targets(sampling_results,
query_gt_bboxes,
query_gt_labels,
self.train_cfg)
(labels, label_weights, bbox_targets, bbox_weights) = bbox_targets
loss_bbox = {'loss_cls': [], 'loss_bbox': [], 'acc': []}
batch_size = len(query_img_metas)
num_sample_per_imge = query_roi_feats.size(0) // batch_size
bbox_results = None
for img_id in range(batch_size):
start = img_id * num_sample_per_imge
end = (img_id + 1) * num_sample_per_imge
# class agnostic aggregation
# random_index = np.random.choice(
# range(query_gt_labels[img_id].size(0)))
# random_query_label = query_gt_labels[img_id][random_index]
random_index = np.random.choice(
range(len(support_gt_labels)))
random_query_label = support_gt_labels[random_index]
for i in range(support_feat.size(0)):
if support_gt_labels[i] == random_query_label:
bbox_results = self._bbox_forward(
query_roi_feats[start:end],
support_feat_inv[i].sigmoid().unsqueeze(0))
single_loss_bbox = self.bbox_head.loss(
bbox_results['cls_score'], bbox_results['bbox_pred'],
query_rois[start:end], labels[start:end],
label_weights[start:end], bbox_targets[start:end],
bbox_weights[start:end])
for key in single_loss_bbox.keys():
loss_bbox[key].append(single_loss_bbox[key])
if bbox_results is not None:
for key in loss_bbox.keys():
if key == 'acc':
loss_bbox[key] = torch.cat(loss_bbox['acc']).mean()
else:
loss_bbox[key] = torch.stack(
loss_bbox[key]).sum() / batch_size
# meta classification loss
if self.bbox_head.with_meta_cls_loss:
# input support feature classification
meta_cls_score = self.bbox_head.forward_meta_cls(support_feat_rec)
meta_cls_labels = torch.cat(support_gt_labels)
loss_meta_cls = self.bbox_head.loss_meta(
meta_cls_score, meta_cls_labels,
torch.ones_like(meta_cls_labels))
loss_bbox.update(loss_meta_cls)
loss_vae = self.vae.loss_function(
support_feat, support_feat_rec, mu, log_var)
loss_bbox.update(loss_vae)
bbox_results.update(loss_bbox=loss_bbox)
return bbox_results
def _bbox_forward(self, query_roi_feats: Tensor,
support_roi_feats: Tensor) -> Dict:
"""Box head forward function used in both training and testing.
Args:
query_roi_feats (Tensor): Query roi features with shape (N, C).
support_roi_feats (Tensor): Support features with shape (1, C).
Returns:
dict: A dictionary of predicted results.
"""
# feature aggregation
roi_feats = self.aggregation_layer(
query_feat=query_roi_feats.unsqueeze(-1).unsqueeze(-1),
support_feat=support_roi_feats.view(1, -1, 1, 1))[0]
cls_score, bbox_pred = self.bbox_head(
roi_feats.squeeze(-1).squeeze(-1), query_roi_feats)
bbox_results = dict(cls_score=cls_score, bbox_pred=bbox_pred)
return bbox_results
def simple_test_bboxes(
self,
query_feats: List[Tensor],
support_feats_dict: Dict,
query_img_metas: List[Dict],
proposals: List[Tensor],
rcnn_test_cfg: ConfigDict,
rescale: bool = False) -> Tuple[List[Tensor], List[Tensor]]:
"""Test only det bboxes without augmentation.
Args:
query_feats (list[Tensor]): Features of query image,
each item with shape (N, C, H, W).
support_feats_dict (dict[int, Tensor]) Dict of support features
used for inference only, each key is the class id and value is
the support template features with shape (1, C).
query_img_metas (list[dict]): list of image info dict where each
dict has: `img_shape`, `scale_factor`, `flip`, and may also
contain `filename`, `ori_shape`, `pad_shape`, and
`img_norm_cfg`. For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
proposals (list[Tensor]): Region proposals.
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.
rescale (bool): If True, return boxes in original image space.
Default: False.
Returns:
tuple[list[Tensor], list[Tensor]]: Each tensor in first list
with shape (num_boxes, 4) and with shape (num_boxes, )
in second list. The length of both lists should be equal
to batch_size.
"""
img_shapes = tuple(meta['img_shape'] for meta in query_img_metas)
scale_factors = tuple(meta['scale_factor'] for meta in query_img_metas)
rois = bbox2roi(proposals)
query_roi_feats = self.extract_query_roi_feat(query_feats, rois)
cls_scores_dict, bbox_preds_dict = {}, {}
num_classes = self.bbox_head.num_classes
for class_id in support_feats_dict.keys():
support_feat = support_feats_dict[class_id]
support_feat_rec, support_feat_inv, _, mu, log_var = self.vae(
support_feat)
bbox_results = self._bbox_forward(
query_roi_feats, support_feat_inv.sigmoid())
cls_scores_dict[class_id] = \
bbox_results['cls_score'][:, class_id:class_id + 1]
bbox_preds_dict[class_id] = \
bbox_results['bbox_pred'][:, class_id * 4:(class_id + 1) * 4]
# the official code use the first class background score as final
# background score, while this code use average of all classes'
# background scores instead.
if cls_scores_dict.get(num_classes, None) is None:
cls_scores_dict[num_classes] = \
bbox_results['cls_score'][:, -1:]
else:
cls_scores_dict[num_classes] += \
bbox_results['cls_score'][:, -1:]
cls_scores_dict[num_classes] /= len(support_feats_dict.keys())
cls_scores = [
cls_scores_dict[i] if i in cls_scores_dict.keys() else
torch.zeros_like(cls_scores_dict[list(cls_scores_dict.keys())[0]])
for i in range(num_classes + 1)
]
bbox_preds = [
bbox_preds_dict[i] if i in bbox_preds_dict.keys() else
torch.zeros_like(bbox_preds_dict[list(bbox_preds_dict.keys())[0]])
for i in range(num_classes)
]
cls_score = torch.cat(cls_scores, dim=1)
bbox_pred = torch.cat(bbox_preds, dim=1)
# split batch bbox prediction back to each image
num_proposals_per_img = tuple(len(p) for p in proposals)
rois = rois.split(num_proposals_per_img, 0)
cls_score = cls_score.split(num_proposals_per_img, 0)
bbox_pred = bbox_pred.split(num_proposals_per_img, 0)
# apply bbox post-processing to each image individually
det_bboxes = []
det_labels = []
for i in range(len(proposals)):
det_bbox, det_label = self.bbox_head.get_bboxes(
rois[i],
cls_score[i],
bbox_pred[i],
img_shapes[i],
scale_factors[i],
rescale=rescale,
cfg=rcnn_test_cfg)
det_bboxes.append(det_bbox)
det_labels.append(det_label)
return det_bboxes, det_labels
| csuhan/VFA | vfa/vfa_roi_head.py | vfa_roi_head.py | py | 12,474 | python | en | code | 56 | github-code | 36 |
35478284383 | import os
import glob
import h5py
import json
import copy
import torch
import librosa
import numpy as np
import soundfile as sf
import speech_recognition as sr
from jiwer import wer
from tqdm import tqdm
from scipy import signal
from trainer import Trainer
from hps.hps import hp, Hps
from torch.autograd import Variable
from preprocess import get_spectrograms
from model.tacotron.text.symbols import symbols
############
# CONSTANT #
############
MIN_LEN = 9
def griffin_lim(spectrogram): # Applies Griffin-Lim's raw.
def _invert_spectrogram(spectrogram): # spectrogram: [f, t]
return librosa.istft(spectrogram, hp.hop_length, win_length=hp.win_length, window="hann")
X_best = copy.deepcopy(spectrogram)
for i in range(hp.n_iter):
X_t = _invert_spectrogram(X_best)
est = librosa.stft(X_t, hp.n_fft, hp.hop_length, win_length=hp.win_length)
phase = est / np.maximum(1e-8, np.abs(est))
X_best = spectrogram * phase
X_t = _invert_spectrogram(X_best)
y = np.real(X_t)
return y
def spectrogram2wav(mag): # Generate wave file from spectrogram
mag = mag.T # transpose
mag = (np.clip(mag, 0, 1) * hp.max_db) - hp.max_db + hp.ref_db # de-noramlize
mag = np.power(10.0, mag * 0.05) # to amplitude
wav = griffin_lim(mag) # wav reconstruction
wav = signal.lfilter([1], [1, -hp.preemphasis], wav) # de-preemphasis
wav, _ = librosa.effects.trim(wav) # trim
return wav.astype(np.float32)
def synthesis(f0, sp, ap, sr=16000):
y = pw.synthesize(f0.astype(np.float64), sp.astype(np.float64), ap.astype(np.float64), sr, pw.default_frame_period)
return y
def convert_x(x, c, trainer, enc_only, verbose=False):
c_var = Variable(torch.from_numpy(np.array([c]))).cuda()
tensor = torch.from_numpy(np.expand_dims(x, axis=0)).type(torch.FloatTensor)
converted, enc = trainer.test_step(tensor, c_var, enc_only=enc_only, verbose=verbose)
converted = converted.squeeze(axis=0).transpose((1, 0))
enc = enc.squeeze(axis=0).transpose((1, 0))
return converted, enc
def encode_x(x, trainer):
tensor = torch.from_numpy(np.expand_dims(x, axis=0)).type(torch.FloatTensor)
enc = trainer.encoder_test_step(tensor)
enc = enc.squeeze(axis=0).transpose((1, 0))
return enc
def get_trainer(hps_path, model_path, g_mode, enc_mode, clf_path):
HPS = Hps(hps_path)
hps = HPS.get_tuple()
global MIN_LEN
MIN_LEN = MIN_LEN if hps.enc_mode != 'gumbel_t' else hps.seg_len
trainer = Trainer(hps, None, g_mode, enc_mode)
trainer.load_model(model_path, load_model_list=hps.load_model_list, clf_path = clf_path)
return trainer
def asr(fname):
r = sr.Recognizer()
with sr.WavFile(fname) as source:
audio = r.listen(source)
text = r.recognize_google(audio, language='en')
return text
def compare_asr(s_wav, t_wav):
try:
gt = asr(s_wav)
recog = asr(t_wav)
err_result = wer(gt, recog), wer(' '.join([c for c in gt if c != ' ']), ' '.join([c for c in recog if c != ' ']))
except sr.UnknownValueError:
err_result = [1., 1.]
except:
err_result = [-1., -1.]
return err_result
def parse_encodings(encodings):
return [' '.join([str(int(e)) for i, e in enumerate(enc)]) for enc in encodings]
def write_encodings(path, encodings):
with open(path, 'w') as file:
for enc in encodings:
for i, e in enumerate(enc):
file.write(str(int(e)) + (' ' if i < len(enc)-1 else ''))
file.write('\n')
def convert(trainer,
seg_len,
src_speaker_spec,
src_speaker,
tar_speaker,
utt_id,
speaker2id,
result_dir,
enc_only=True,
save=['wav', 'enc']):
# pad spec to minimum len
PADDED = False
if len(src_speaker_spec) < MIN_LEN:
padding = np.zeros((MIN_LEN - src_speaker_spec.shape[0], src_speaker_spec.shape[1]))
src_speaker_spec = np.concatenate((src_speaker_spec, padding), axis=0)
PADDED = True
if len(src_speaker_spec) <= seg_len:
converted_results, encodings = convert_x(src_speaker_spec, speaker2id[tar_speaker], trainer, enc_only=enc_only)
if PADDED:
encodings = encodings[:MIN_LEN//8] # truncate the encoding of zero paddings
else:
converted_results = []
encodings = []
for idx in range(0, len(src_speaker_spec), seg_len):
if idx + (seg_len*2) > len(src_speaker_spec):
spec_frag = src_speaker_spec[idx:-1]
else:
spec_frag = src_speaker_spec[idx:idx+seg_len]
if len(spec_frag) >= seg_len:
converted_x, enc = convert_x(spec_frag, speaker2id[tar_speaker], trainer, enc_only=enc_only)
converted_results.append(converted_x)
encodings.append(enc)
elif idx == 0:
raise RuntimeError('Please check if input is too short!')
converted_results = np.concatenate(converted_results, axis=0)
encodings = np.concatenate(encodings, axis=0)
wav_data = spectrogram2wav(converted_results)
if len(save) != 0:
if 'wav' in save:
wav_path = os.path.join(result_dir, f'{tar_speaker}_{utt_id}.wav')
sf.write(wav_path, wav_data, hp.sr, 'PCM_16')
if 'enc' in save:
enc_path = os.path.join(result_dir, f'{src_speaker}_{utt_id}.txt')
write_encodings(enc_path, encodings)
return wav_path, len(converted_results)
else:
return wav_data, encodings
def encode(src_speaker_spec, trainer, seg_len, s_speaker=None, utt_id=None, result_dir=None, save=True):
if save:
assert result_dir != None
assert s_speaker != None
assert utt_id != None
# pad spec to minimum len
PADDED = False
if len(src_speaker_spec) < MIN_LEN:
padding = np.zeros((MIN_LEN - src_speaker_spec.shape[0], src_speaker_spec.shape[1]))
src_speaker_spec = np.concatenate((src_speaker_spec, padding), axis=0)
PADDED = True
if len(src_speaker_spec) <= seg_len:
encodings = encode_x(src_speaker_spec, trainer)
if PADDED:
encodings = encodings[:MIN_LEN//8] # truncate the encoding of zero paddings
else:
encodings = []
for idx in range(0, len(src_speaker_spec), seg_len):
if idx + (seg_len*2) > len(src_speaker_spec):
spec_frag = src_speaker_spec[idx:-1]
else:
spec_frag = src_speaker_spec[idx:idx+seg_len]
if len(spec_frag) >= seg_len:
enc = encode_x(spec_frag, trainer)
encodings.append(enc)
elif idx == 0:
raise RuntimeError('Please check if input is too short!')
encodings = np.concatenate(encodings, axis=0)
if save:
enc_path = os.path.join(result_dir, f"{s_speaker}_{utt_id}.txt")
write_encodings(enc_path, encodings)
else:
return encodings
def test_from_list(trainer, seg_len, synthesis_list, data_path, speaker2id_path, result_dir, enc_only, flag='test', run_asr=False):
with open(speaker2id_path, 'r') as f_json:
speaker2id = json.load(f_json)
feeds = []
with open(synthesis_list, 'r') as f:
file = f.readlines()
for line in file:
line = line.split('\n')[0].split(' ')
feeds.append({'s_id' : line[0].split('/')[1].split('_')[0],
'utt_id' : line[0].split('/')[1].split('_')[1],
't_id' : line[1], })
print('[Tester] - Number of files to be resynthesize: ', len(feeds))
dir_path = os.path.join(result_dir, f'{flag}/')
os.makedirs(dir_path, exist_ok=True)
err_results = []
with h5py.File(data_path, 'r') as f_h5:
for feed in tqdm(feeds):
conv_audio, n_frames = convert(trainer,
seg_len,
src_speaker_spec=f_h5[f"test/{feed['s_id']}/{feed['utt_id']}/lin"][()],
src_speaker=feed['s_id'],
tar_speaker=feed['t_id'],
utt_id=feed['utt_id'],
speaker2id=speaker2id,
result_dir=dir_path,
enc_only=enc_only,
save=['wav'])
n_frames = len(f_h5[f"test/{feed['s_id']}/{feed['utt_id']}/lin"][()])
if run_asr:
if hp.frame_shift * (n_frames - 1) + hp.frame_length >= 3.0:
orig_audio = spectrogram2wav(f_h5[f"test/{feed['s_id']}/{feed['utt_id']}/lin"][()])
sf.write('orig_audio.wav', orig_audio, hp.sr, 'PCM_16')
err_results.append(compare_asr(s_wav='orig_audio.wav', t_wav=conv_audio))
os.remove(path='orig_audio.wav')
if run_asr:
err_mean = np.mean(err_results, axis=0)
print('WERR: {:.3f} CERR: {:.3f}, computed over {} samples'.format(err_mean[0], err_mean[1], len(err_results)))
def cross_test(trainer, seg_len, data_path, speaker2id_path, result_dir, enc_only, flag):
with h5py.File(data_path, 'r') as f_h5:
with open(speaker2id_path, 'r') as f_json:
speaker2id = json.load(f_json)
if flag == 'test':
source_speakers = sorted(list(f_h5['test'].keys()))
elif flag == 'train':
source_speakers = [s for s in sorted(list(f_h5['train'].keys())) if s[0] == 'S']
target_speakers = [s for s in sorted(list(f_h5['train'].keys())) if s[0] == 'V']
print('[Tester] - Testing on the {}ing set...'.format(flag))
print('[Tester] - Source speakers: %i, Target speakers: %i' % (len(source_speakers), len(target_speakers)))
print('[Tester] - Converting all testing utterances from source speakers to target speakers, this may take a while...')
for src_speaker in tqdm(source_speakers):
for tar_speaker in target_speakers:
assert src_speaker != tar_speaker
dir_path = os.path.join(result_dir, f'{src_speaker}_to_{tar_speaker}')
os.makedirs(dir_path, exist_ok=True)
for utt_id in f_h5[f'test/{src_speaker}']:
src_speaker_spec = f_h5[f'test/{src_speaker}/{utt_id}/lin'][()]
convert(trainer,
seg_len,
src_speaker_spec,
tar_speaker,
utt_id=utt_id,
speaker2id=speaker2id,
result_dir=dir_path,
enc_only=enc_only)
def test_single(trainer, seg_len, speaker2id_path, result_dir, enc_only, s_speaker, t_speaker):
with open(speaker2id_path, 'r') as f_json:
speaker2id = json.load(f_json)
if s_speaker == 'S015':
filename = './data/english/train/unit/S015_0361841101.wav'
elif s_speaker == 'S119':
filename = './data/english/train/unit/S119_1561145062.wav'
elif s_speaker == 'S130':
filename = './data/english/test/S130_3516588097.wav'
elif s_speaker == 'S089':
filename = './data/english/test/S089_1810826781.wav'
elif s_speaker == 'S378':
filename = './data/surprise/test/S378_117437.wav'
else:
raise NotImplementedError('Please modify path manually!')
_, spec = get_spectrograms(filename)
wav_data, encodings = convert(trainer,
seg_len,
src_speaker_spec=spec,
src_speaker=s_speaker,
tar_speaker=t_speaker,
utt_id='',
speaker2id=speaker2id,
result_dir=result_dir,
enc_only=enc_only,
save=[])
sf.write(os.path.join(result_dir, 'result.wav'), wav_data, hp.sr, 'PCM_16')
write_encodings(os.path.join(result_dir, 'result.txt'), encodings)
err_result = compare_asr(filename, os.path.join(result_dir, 'result.wav'))
print('Testing on source speaker {} and target speaker {}, output shape: {}'.format(s_speaker, t_speaker, wav_data.shape))
print('Comparing ASR result - WERR: {:.3f} CERR: {:.3f}'.format(err_result[0], err_result[1]))
def test_encode(trainer, seg_len, test_path, data_path, result_dir, flag='test'):
files = sorted(glob.glob(os.path.join(test_path, '*.wav')))
feeds = []
for line in files:
line = line.split('/')[-1]
feeds.append({'s_id' : line.split('_')[0],
'utt_id' : line.split('_')[1].split('.')[0]})
print('[Tester] - Number of files to encoded: ', len(feeds))
dir_path = os.path.join(result_dir, f'{flag}/')
os.makedirs(dir_path, exist_ok=True)
with h5py.File(data_path, 'r') as f_h5:
for feed in tqdm(feeds):
src_speaker_spec = f_h5[f"test/{feed['s_id']}/{feed['utt_id']}/lin"][()]
encode(src_speaker_spec, trainer, seg_len, s_speaker=feed['s_id'], utt_id=feed['utt_id'], result_dir=dir_path)
def target_classify(trainer, seg_len, synthesis_list, result_dir, flag='test'):
dir_path = os.path.join(result_dir, f'{flag}/')
with open(synthesis_list, 'r') as f:
file = f.readlines()
acc = []
for line in file:
# get wav path
line = line.split('\n')[0].split(' ')
utt_id = line[0].split('/')[1].split('_')[1]
tar_speaker = line[1]
wav_path = os.path.join(dir_path, f'{tar_speaker}_{utt_id}.wav')
# get spectrogram
_, spec = get_spectrograms(wav_path)
# padding spec
if len(spec) < seg_len:
padding = np.zeros((seg_len - spec.shape[0], spec.shape[1]))
spec = np.concatenate((spec, padding), axis=0)
# classification
logits = []
for idx in range(0, len(spec), seg_len):
if idx + (seg_len*2) > len(spec):
spec_frag = spec[idx:-1]
else:
spec_frag = spec[idx:idx+seg_len]
if len(spec_frag) >= seg_len:
x = torch.from_numpy(np.expand_dims(spec_frag[:seg_len, :], axis=0)).type(torch.FloatTensor)
logit = trainer.classify(x)
logits.append(logit)
elif idx == 0:
raise RuntimeError('Please check if input is too short!')
logits = np.concatenate(logits, axis=0)
#logits = np.sum(logits, axis = 0)
for logit in logits:
am = logit.argmax()
if am == 0:
clf_speaker = 'V001'
elif am ==1:
clf_speaker = 'V002'
else:
clf_speaker = 'None'
if clf_speaker == tar_speaker:
acc.append(1)
#print('[info]: {} is classified to {}'.format(wav_path, clf_speaker))
else:
acc.append(0)
#print('[Error]: {} is classified to {}'.format(wav_path, clf_speaker))
print('Classification Acc: {:.3f}'.format(np.sum(acc)/float(len(acc))))
def encode_for_tacotron(target, trainer, seg_len, multi2idx_path, wav_path, result_path):
wavs = sorted(glob.glob(os.path.join(wav_path, '*.wav')))
print('[Converter] - Number of wav files to encoded: ', len(wavs))
names = []
enc_outputs = []
for wav_path in tqdm(wavs):
name = wav_path.split('/')[-1].split('.')[0]
s_id = name.split('_')[0]
u_id = name.split('_')[1]
if s_id != target:
continue
y, sr = librosa.load(wav_path)
d = librosa.get_duration(y=y, sr=sr)
if d > 25:
continue # --> this filter out too long utts, 3523/3533 for V001 and V002 together in the english dataset
_, spec = get_spectrograms(wav_path)
encodings = encode(spec, trainer, seg_len, save=False)
encodings = parse_encodings(encodings)
enc_outputs.append(encodings)
names.append((s_id, u_id))
# build encodings to character mapping
idx = 0
multi2idx = {}
print('[Converter] - Building encoding to symbol mapping...')
for encodings in tqdm(enc_outputs):
for encoding in encodings:
if str(encoding) not in multi2idx:
multi2idx[str(encoding)] = symbols[idx]
idx += 1
print('[Converter] - Number of unique discret units: ', len(multi2idx))
with open(multi2idx_path, 'w') as file:
file.write(json.dumps(multi2idx))
result_path = result_path.replace('target', target)
print('[Converter] - Writing to meta file...')
with open(result_path, 'w') as file:
for i, encodings in enumerate(enc_outputs):
file.write(str(names[i][0]) + '_' + str(names[i][1] + '|'))
for encoding in encodings:
file.write(multi2idx[str(encoding)])
file.write('\n')
| andi611/ZeroSpeech-TTS-without-T | convert.py | convert.py | py | 14,769 | python | en | code | 109 | github-code | 36 |
22565647008 | import numpy as np
import torch as th
from .gaussian_diffusion import GaussianDiffusion, mean_flat
class KarrasDenoiser:
def __init__(self, sigma_data: float = 0.5):
self.sigma_data = sigma_data
def get_snr(self, sigmas):
return sigmas**-2
def get_sigmas(self, sigmas):
return sigmas
def get_scalings(self, sigma):
c_skip = self.sigma_data**2 / (sigma**2 + self.sigma_data**2)
c_out = sigma * self.sigma_data / (sigma**2 + self.sigma_data**2) ** 0.5
c_in = 1 / (sigma**2 + self.sigma_data**2) ** 0.5
return c_skip, c_out, c_in
def training_losses(self, model, x_start, sigmas, model_kwargs=None, noise=None):
if model_kwargs is None:
model_kwargs = {}
if noise is None:
noise = th.randn_like(x_start)
terms = {}
dims = x_start.ndim
x_t = x_start + noise * append_dims(sigmas, dims)
c_skip, c_out, _ = [append_dims(x, dims) for x in self.get_scalings(sigmas)]
model_output, denoised = self.denoise(model, x_t, sigmas, **model_kwargs)
target = (x_start - c_skip * x_t) / c_out
terms["mse"] = mean_flat((model_output - target) ** 2)
terms["xs_mse"] = mean_flat((denoised - x_start) ** 2)
if "vb" in terms:
terms["loss"] = terms["mse"] + terms["vb"]
else:
terms["loss"] = terms["mse"]
return terms
def denoise(self, model, x_t, sigmas, **model_kwargs):
c_skip, c_out, c_in = [append_dims(x, x_t.ndim) for x in self.get_scalings(sigmas)]
rescaled_t = 1000 * 0.25 * th.log(sigmas + 1e-44)
model_output = model(c_in * x_t, rescaled_t, **model_kwargs)
denoised = c_out * model_output + c_skip * x_t
return model_output, denoised
class GaussianToKarrasDenoiser:
def __init__(self, model, diffusion):
from scipy import interpolate
self.model = model
self.diffusion = diffusion
self.alpha_cumprod_to_t = interpolate.interp1d(
diffusion.alphas_cumprod, np.arange(0, diffusion.num_timesteps)
)
def sigma_to_t(self, sigma):
alpha_cumprod = 1.0 / (sigma**2 + 1)
if alpha_cumprod > self.diffusion.alphas_cumprod[0]:
return 0
elif alpha_cumprod <= self.diffusion.alphas_cumprod[-1]:
return self.diffusion.num_timesteps - 1
else:
return float(self.alpha_cumprod_to_t(alpha_cumprod))
def denoise(self, x_t, sigmas, clip_denoised=True, model_kwargs=None):
t = th.tensor(
[self.sigma_to_t(sigma) for sigma in sigmas.cpu().numpy()],
dtype=th.long,
device=sigmas.device,
)
c_in = append_dims(1.0 / (sigmas**2 + 1) ** 0.5, x_t.ndim)
out = self.diffusion.p_mean_variance(
self.model, x_t * c_in, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs
)
return None, out["pred_xstart"]
def karras_sample(*args, **kwargs):
last = None
for x in karras_sample_progressive(*args, **kwargs):
last = x["x"]
return last
def karras_sample_progressive(
diffusion,
model,
shape,
steps,
clip_denoised=True,
progress=False,
model_kwargs=None,
device=None,
sigma_min=0.002,
sigma_max=80, # higher for highres?
rho=7.0,
sampler="heun",
s_churn=0.0,
s_tmin=0.0,
s_tmax=float("inf"),
s_noise=1.0,
guidance_scale=0.0,
):
sigmas = get_sigmas_karras(steps, sigma_min, sigma_max, rho, device=device)
x_T = th.randn(*shape, device=device) * sigma_max
sample_fn = {"heun": sample_heun, "dpm": sample_dpm, "ancestral": sample_euler_ancestral}[
sampler
]
if sampler != "ancestral":
sampler_args = dict(s_churn=s_churn, s_tmin=s_tmin, s_tmax=s_tmax, s_noise=s_noise)
else:
sampler_args = {}
if isinstance(diffusion, KarrasDenoiser):
def denoiser(x_t, sigma):
_, denoised = diffusion.denoise(model, x_t, sigma, **model_kwargs)
if clip_denoised:
denoised = denoised.clamp(-1, 1)
return denoised
elif isinstance(diffusion, GaussianDiffusion):
model = GaussianToKarrasDenoiser(model, diffusion)
def denoiser(x_t, sigma):
_, denoised = model.denoise(
x_t, sigma, clip_denoised=clip_denoised, model_kwargs=model_kwargs
)
return denoised
else:
raise NotImplementedError
if guidance_scale != 0 and guidance_scale != 1:
def guided_denoiser(x_t, sigma):
x_t = th.cat([x_t, x_t], dim=0)
sigma = th.cat([sigma, sigma], dim=0)
x_0 = denoiser(x_t, sigma)
cond_x_0, uncond_x_0 = th.split(x_0, len(x_0) // 2, dim=0)
x_0 = uncond_x_0 + guidance_scale * (cond_x_0 - uncond_x_0)
return x_0
else:
guided_denoiser = denoiser
for obj in sample_fn(
guided_denoiser,
x_T,
sigmas,
progress=progress,
**sampler_args,
):
if isinstance(diffusion, GaussianDiffusion):
yield diffusion.unscale_out_dict(obj)
else:
yield obj
def get_sigmas_karras(n, sigma_min, sigma_max, rho=7.0, device="cpu"):
"""Constructs the noise schedule of Karras et al. (2022)."""
ramp = th.linspace(0, 1, n)
min_inv_rho = sigma_min ** (1 / rho)
max_inv_rho = sigma_max ** (1 / rho)
sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return append_zero(sigmas).to(device)
def to_d(x, sigma, denoised):
"""Converts a denoiser output to a Karras ODE derivative."""
return (x - denoised) / append_dims(sigma, x.ndim)
def get_ancestral_step(sigma_from, sigma_to):
"""Calculates the noise level (sigma_down) to step down to and the amount
of noise to add (sigma_up) when doing an ancestral sampling step."""
sigma_up = (sigma_to**2 * (sigma_from**2 - sigma_to**2) / sigma_from**2) ** 0.5
sigma_down = (sigma_to**2 - sigma_up**2) ** 0.5
return sigma_down, sigma_up
@th.no_grad()
def sample_euler_ancestral(model, x, sigmas, progress=False):
"""Ancestral sampling with Euler method steps."""
s_in = x.new_ones([x.shape[0]])
indices = range(len(sigmas) - 1)
if progress:
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
denoised = model(x, sigmas[i] * s_in)
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
yield {"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "pred_xstart": denoised}
d = to_d(x, sigmas[i], denoised)
# Euler method
dt = sigma_down - sigmas[i]
x = x + d * dt
x = x + th.randn_like(x) * sigma_up
yield {"x": x, "pred_xstart": x}
@th.no_grad()
def sample_heun(
denoiser,
x,
sigmas,
progress=False,
s_churn=0.0,
s_tmin=0.0,
s_tmax=float("inf"),
s_noise=1.0,
):
"""Implements Algorithm 2 (Heun steps) from Karras et al. (2022)."""
s_in = x.new_ones([x.shape[0]])
indices = range(len(sigmas) - 1)
if progress:
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
gamma = (
min(s_churn / (len(sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.0
)
eps = th.randn_like(x) * s_noise
sigma_hat = sigmas[i] * (gamma + 1)
if gamma > 0:
x = x + eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
denoised = denoiser(x, sigma_hat * s_in)
d = to_d(x, sigma_hat, denoised)
yield {"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigma_hat, "pred_xstart": denoised}
dt = sigmas[i + 1] - sigma_hat
if sigmas[i + 1] == 0:
# Euler method
x = x + d * dt
else:
# Heun's method
x_2 = x + d * dt
denoised_2 = denoiser(x_2, sigmas[i + 1] * s_in)
d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
d_prime = (d + d_2) / 2
x = x + d_prime * dt
yield {"x": x, "pred_xstart": denoised}
@th.no_grad()
def sample_dpm(
denoiser,
x,
sigmas,
progress=False,
s_churn=0.0,
s_tmin=0.0,
s_tmax=float("inf"),
s_noise=1.0,
):
"""A sampler inspired by DPM-Solver-2 and Algorithm 2 from Karras et al. (2022)."""
s_in = x.new_ones([x.shape[0]])
indices = range(len(sigmas) - 1)
if progress:
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
gamma = (
min(s_churn / (len(sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.0
)
eps = th.randn_like(x) * s_noise
sigma_hat = sigmas[i] * (gamma + 1)
if gamma > 0:
x = x + eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
denoised = denoiser(x, sigma_hat * s_in)
d = to_d(x, sigma_hat, denoised)
yield {"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigma_hat, "denoised": denoised}
# Midpoint method, where the midpoint is chosen according to a rho=3 Karras schedule
sigma_mid = ((sigma_hat ** (1 / 3) + sigmas[i + 1] ** (1 / 3)) / 2) ** 3
dt_1 = sigma_mid - sigma_hat
dt_2 = sigmas[i + 1] - sigma_hat
x_2 = x + d * dt_1
denoised_2 = denoiser(x_2, sigma_mid * s_in)
d_2 = to_d(x_2, sigma_mid, denoised_2)
x = x + d_2 * dt_2
yield {"x": x, "pred_xstart": denoised}
def append_dims(x, target_dims):
"""Appends dimensions to the end of a tensor until it has target_dims dimensions."""
dims_to_append = target_dims - x.ndim
if dims_to_append < 0:
raise ValueError(f"input has {x.ndim} dims but target_dims is {target_dims}, which is less")
return x[(...,) + (None,) * dims_to_append]
def append_zero(x):
return th.cat([x, x.new_zeros([1])])
| openai/shap-e | shap_e/diffusion/k_diffusion.py | k_diffusion.py | py | 9,973 | python | en | code | 10,619 | github-code | 36 |
23642938118 | from flask import Flask,render_template,request,redirect,session,flash
app = Flask(__name__)
app.secret_key = 'Farn'
@app.route ('/')
def index():
return render_template('index.html')
@app.route ('/result', methods=['POST'])
def result():
if len(request.form['name']) < 1:
flash("Name cannot be empty!")
return redirect('/')
# else:
# flash("Success! Your name is {}".format(request.form['name']))
elif len(request.form['comment']) < 1:
flash("Comments cannot be empty!")
return redirect('/')
elif len(request.form['comment']) >= 120:
flash("Comments cannot be longer than 120 char.!")
return redirect('/')
else:
flash("Success!")
return render_template('result.html', name = request.form['name'], location = request.form['location'], language = request.form['language'], comment = request.form['comment'])
app.run(debug = True)
| bmcconchie/DojoAssignments | Python/Flask/python_stack/flask_fundamentals/dataform/server.py | server.py | py | 946 | python | en | code | 0 | github-code | 36 |
31734592611 | from modules import aws_sript, firestorage_code
import os
from flask import Flask, jsonify
from flask import render_template, request, redirect, url_for
from werkzeug.utils import secure_filename
import os, shutil
from flask_cors import CORS
from decorater_file import crossdomain
global app
app = Flask(__name__)
CORS(app)
app.config['SECRET_KEY'] = 'the quick brown fox jumps over the lazy dog'
app.config['CORS_HEADERS'] = ['Content-Type']
cors = CORS(app, resources={r"/*": {"origins": "*"}})
# Create a directory in a known location to uploaded files to.
uploads_dir = os.path.join(app.instance_path, 'uploads')
if not os.path.exists(uploads_dir):
os.makedirs(uploads_dir)
# Create a directory in a known location to processed files to.
ml_output_dir = os.path.join(app.instance_path, 'ml_output')
if not os.path.exists(ml_output_dir):
os.makedirs(ml_output_dir)
@app.route('/upload', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
# save the single "profile" file
image = request.files['image']
print(image)
img_path = os.path.join(uploads_dir, secure_filename(image.filename))
ml_out_img_path = os.path.join(ml_output_dir, secure_filename(image.filename))
image.save(img_path)
print("img_path",img_path)
print("ml_out_img_path",ml_out_img_path)
# ML model start
print("Processing...")
aws_sript.convert_image(img_path, ml_out_img_path)
# ML model ends
print("Image converted successfully")
img_url = firestorage_code.upload_img(ml_out_img_path)
print("Image uploaded to firebase storage successfully")
print("Firebase Image URL ",img_url)
# cleaning folders
clean_folders()
# save each "charts" file
# for file in request.files.getlist('upload'):
# print(file.name)
# file.save(os.path.join(uploads_dir, file.name))
# return redirect(url_for('upload'))
data = {
"img_url":img_url
}
return jsonify(data),{'Access-Control-Allow-Origin': '*'}
def clean_folders():
folders = [uploads_dir,ml_output_dir]
for folder in folders:
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
if __name__ == '__main__':
app.run(debug=True) | akhlaq1/flask-aws-face-detect-api | app.py | app.py | py | 2,784 | python | en | code | 0 | github-code | 36 |
2312965094 | import unittest
from adt_extension import SwitchDict
class SwitchDictTest(unittest.TestCase):
def setUp(self):
"""New object for all tests."""
self.switch_dict = SwitchDict({
'test1': 1,
'test2': 2,
'test3': 3,
})
def test_overload_getitem(self):
"""Check default case."""
self.switch_dict.default_case = 'Default case here'
# Exist index
self.assertEqual(self.switch_dict['test1'], 1)
# Default case
self.assertEqual(self.switch_dict.default_case, 'Default case here')
# Not exist index
test_value = self.switch_dict['testtest']
self.assertEqual(test_value, 'Default case here')
# Not exist index and not exist default_case
self.switch_dict.default_case = None
with self.assertRaises(KeyError) as raises:
test_value = self.switch_dict['testest']
| alvarofpp/python-adt-extension | tests/test_switchdict.py | test_switchdict.py | py | 926 | python | en | code | 4 | github-code | 36 |
18045830902 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def get_reward_curve(agents):
"""
Extract rewards from list of agents used in training
:param agents: list of agents used in training
:return: array of rewards
"""
return np.array([agent.reward_total for agent in agents])
def choose_best(agents):
"""
Find episode with higest reward value
:param agents: list of agents used in training
:return: agent with highest reward
"""
rewards = get_reward_curve(agents)
return agents[np.argmax(rewards)]
def moving_average(v, n):
"""
Calculate a moving average (can be improved)
:param v: vector of values
:param n: number of samples across which to calculate average
:return:
"""
return np.convolve(v, np.ones(n)/n, 'valid'), np.array(range(n, len(v)+1))
def plot_average_reward_curve(agents, n=50):
"""
Plot moving averge of a reward curve
:param agents: list of agents corresponding to episodes
:param n: number of samples across which to calculate average
:return:
"""
rewards = get_reward_curve(agents)
mov_ave, episodes = moving_average(rewards, n=n)
plt.figure()
plt.plot(episodes, mov_ave)
plt.xlabel('Episode')
plt.ylabel('Average Reward for Last {:d} Episodes'.format(n))
plt.show()
def run_network(initial_state, env, model):
"""
Deterministically run a network after training to analyze performance
:param initial_state: initial state of agent
:param env: RL environment used for training
:param model: RL model generated by training
:return:
"""
done = False
obs = env.reset(initial_state=initial_state)
while not done:
action, _state = model.predict(obs, deterministic=True)
obs, _, done, __ = env.step(action)
env.agent.plot_state_history(style='segmented')
def run_network_for_shap(env, model, num_trials=100):
"""
Run network after training to analyze with SHAP
:param env: RL environment used for training
:param model: RL model generated by training
:param num_trials: number of trajectories to generate
:return:
"""
obs_list = []
act_list = []
done_list = []
rew_list = []
for trial in range(num_trials):
done = False
obs = env.reset()
while not done:
action, _state = model.predict(obs, deterministic=True)
obs_list.append(obs)
act_list.append(action)
obs, rew, done, __ = env.step(action)
rew_list.append(rew)
done_list.append(done)
return obs_list, act_list, rew_list, done_list
def run_network_stochastic(model, env, num_eps):
"""
Stochastically run a network after training to analyze performance
:param num_eps: number of episodes to use
:param env: RL environment used for training
:param model: RL model generated by training
:return:
"""
success_count = 0 # number of episodes within fpa tolerence and that reach target alt
terminal_alt = []
terminal_fpa = []
terminal_time = []
terminal_vel = []
for x in range(0, num_eps):
obs = env.reset()
done = False
while not done:
action, _states = model.predict(obs, deterministic=False)
obs, rewards, done, info = env.step(action)
if done:
terminal_time.append(obs[0])
terminal_alt.append(obs[1])
terminal_vel.append(obs[2])
terminal_fpa.append(obs[3] * 180 / np.pi)
if env.agent.success:
success_count += 1
success_percentage = success_count / num_eps
print("success percentage ", success_percentage)
# TODO make this into a function for post processing
num_bins = 20
counts_t, bins_t = np.histogram(terminal_time, bins=num_bins)
counts_alt, bins_alt = np.histogram(terminal_alt, bins=num_bins)
counts_vel, bins_vel = np.histogram(terminal_vel, bins=num_bins)
counts_fpa, bins_fpa = np.histogram(terminal_fpa, bins=num_bins)
fig, axs = plt.subplots(2, 2)
axs[0, 0].hist(bins_t[:-1], bins_t, weights=counts_t)
axs[0, 0].set_title('Final Time (s)')
axs[0, 1].hist(bins_alt[:-1], bins_alt, weights=counts_alt)
axs[0, 1].set_title('Terminal Alt (m)')
axs[1, 0].hist(bins_vel[:-1], bins_vel, weights=counts_vel)
axs[1, 0].set_title('Terminal Vel (m/s)')
axs[1, 1].hist(bins_fpa[:-1], bins_fpa, weights=counts_fpa)
axs[1, 1].set_title('Terminal FPA (deg)')
fig.tight_layout()
plt.show()
def run_network_save(initial_state, env, model, file = None, dir = None):
"""
Deterministically run a network after training and save run data to npy file
:param initial_state: initial state of agent
:param env: RL environment used for training
:param model: RL model generated by training
:param file: filename for text file which contains trajectory, time, and control data
:param dir: folder directory which contains saved run data
:return:
"""
done = False
obs = env.reset(initial_state=initial_state)
while not done:
action, _state = model.predict(obs, deterministic=True)
obs, _, done, __ = env.step(action)
env.agent.save_run_data(file = file, save = initial_state, dir = dir)
def run_network_control(initial_state, env, model, save = None):
"""
Deterministically run a network after training and plot control history and trajectory
:param initial_state: initial state of agent
:param env: RL environment used for training
:param model: RL model generated by training
:return:
"""
done = False
obs = env.reset(initial_state=initial_state)
while not done:
action, _state = model.predict(obs, deterministic=True)
obs, _, done, __ = env.step(action)
env.agent.plot_control(style='segmented', save =save)
def network_excel(initial_state, env, model, filename):
"""
Save run data to an excel file
:param initial_state: initial state of agent
:param env: RL environment used for training
:param model: RL model generated by training
:param filename: name of the excel file to be saved
:return:
"""
obs0 = []
obs1 = []
obs2 = []
obs3 = []
rewards = []
dones = []
actions = []
done = False
obs = env.reset(initial_state=initial_state)
reward = 0.
obs0.append(obs[0])
obs1.append(obs[1])
obs2.append(obs[2])
obs3.append(obs[3])
dones.append(done)
rewards.append(reward)
while not done:
action, _state = model.predict(obs, deterministic=True)
obs, _, done, __ = env.step(action)
actions.append(action)
obs0.append(obs[0])
obs1.append(obs[1])
obs2.append(obs[2])
obs3.append(obs[3])
dones.append(done)
rewards.append(reward)
d = {'Time': obs0, 'Altitude': obs1, 'Velocity': obs2, 'FPA': obs3, 'reward': rewards, 'done': dones}
df = pd.DataFrame(data=d)
Xy = df[df['done'] == False]
Xy = Xy[['Time', 'Altitude', 'Velocity', 'FPA']]
Xy['Action'] = actions
Xy.to_csv(filename+'.csv')
def run_network_success(initial_state, env, model):
"""
Deterministically run a network after training to analyze performance
:param initial_state: initial state of agent
:param env: RL environment used for training
:param model: RL model generated by training
:return:
"""
success = False
ctr = 0
done = False
obs = env.reset(initial_state=initial_state)
while not done:
action, _state = model.predict(obs, deterministic=True)
obs, _, done, __ = env.step(action)
if done == True:
if obs[0] <= 3000 & abs(obs[3]) * ((180 / np.pi) <= 0.25 * np.pi / 180):
success = False
else:
success = True
return success
| hmdmia/HighSpeedRL | backend/utils/analysis.py | analysis.py | py | 7,951 | python | en | code | 0 | github-code | 36 |
3302452099 | import telebot
import requests
import re
import os
from twilio.rest import Client
import pyrebase
bot = telebot.TeleBot("Replace this with telegram bot father key", parse_mode=None)
config = {
"apiKey": "",
"authDomain": "",
"databaseURL": "",
"storageBucket": ""
}
x = 0
y = 0
z = 0
q = 0
firebase = pyrebase.initialize_app(config)
db = firebase.database()
@bot.message_handler(func=lambda m: True)
def echo_all(message):
a = message.text
b = a.split('@')
print(b)
global x
global y
global z
global q
chatt = message.chat.id
if(b[0] == "/start"):
bot.reply_to(message, "Welcome to tele2WA bot")
elif(b[0] == "/setsid"):
x = b[1]
print(x)
bot.reply_to(message, "SID added")
elif(b[0] == "/settoken"):
y = b[1]
print(y)
bot.reply_to(message, "token added")
elif(b[0] == "/setfromphone"):
z = b[1]
print(z)
bot.reply_to(message, "fromphone added")
elif(b[0] == "/settophone"):
q = b[1]
print(q)
data = {
"sid":x,
"token":y,
"fromphone":z,
"tophone":q
}
db.child("users").child(chatt).set(data)
bot.reply_to(message, "details added")
elif(b[0] == "/updatetophone"):
data4 = {
"tophone": b[1]
}
db.child("users").child(chatt).update(data4)
bot.reply_to(message, "tophone updated")
elif(b[0] == "/updatefromphone"):
data5 = {
"fromphone": b[1]
}
db.child("users").child(chatt).update(data5)
bot.reply_to(message, "fromphone updated")
elif(b[0] =="/send"):
test = db.child("users").child(chatt).get()
p = test.val()['sid']
d = test.val()['token']
t = test.val()['fromphone']
r = test.val()['tophone']
client = Client(p,d)
client.messages.create(body=b[1],from_="whatsapp:"+ str(t),to="whatsapp:"+str(r))
else:
pass
bot.polling()
| harishsg99/Telegram-to-WA-bot | app.py | app.py | py | 2,041 | python | en | code | 0 | github-code | 36 |
14298749677 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/12/23 19:30
# @Author : lingxiangxiang
# @File : demonWrite.py
if __name__ == '__main__':
filename = input("Please input the name of file: ")
f = open(filename, "w", encoding="utf-8")
while 1:
context = input("please input context('EOF' will close file): ")
if context == "EOF":
f.close()
break
else:
f.write(context)
f.write("\n")
fRead = open(filename, encoding="utf-8")
readContext = fRead.read()
print("################start#####################")
print(readContext)
print("################end#######################")
fRead.close() | ajing2/python3 | Basics/fileOption/demonWrite.py | demonWrite.py | py | 715 | python | en | code | 2 | github-code | 36 |
27045322842 | # File: RookBishopQueen.py
# By: Christopher Luey
# Date: 2/10/20
# Rook, Bishop, Queen classes
from Piece import *
class Rook(Piece):
def __init__(self, coord, board, color, img):
# Call superclass constructor
super().__init__(coord, board, color, img)
def getPossibleMovesNoCheck(self):
# Change the x and y values to check which squares rook has available to move to
# List: the list of possible moves for the rook
x, y, list, board = 1, 1, [], self.board.getBoardList()
# Check whether the coord is within the board, if it is empty or occupied by an opponent piece, continue to increment x and y until the checking coord is off the board or occupied by same color piece
for i in [-1, 1]:
while (0 <= (self.coord[1] + i*x) <= 7) and (board[self.coord[0]][self.coord[1] + i*x] == None or board[self.coord[0]][self.coord[1] + i*x].getColor() != self.color):
list.append((self.coord[0],self.coord[1]+i*x))
# Exit loop if checking tile with piece on it
if board[self.coord[0]][self.coord[1] + i*x] != None: break
x+=1
x=1
for i in [-1,1]:
while (0<= (self.coord[0] + i*y) <= 7) and (board[self.coord[0] + i*y][self.coord[1]] == None or board[self.coord[0] + i*y][self.coord[1]].getColor() != self.color):
list.append((self.coord[0] + i*y, self.coord[1]))
if board[self.coord[0] + i*y][self.coord[1]]!= None: break
y+=1
y = 1
return list
class Bishop(Piece):
def __init__(self, coord, board, color, img):
# Call superclass constructor
super().__init__(coord, board, color, img)
def getPossibleMovesNoCheck(self):
x, y, board, list = 1, 1, self.board.getBoardList(), []
# Check all 4 diagonals by adjusting the x, y directions
for i in [-1, 1]:
for j in [-1, 1]:
# Check whether the tile is not off the grid, and is empty or occupied by opponent piece
while (0<=self.coord[1] + i*x <= 7) and (0 <=self.coord[0] + j*y <= 7) and (board[self.coord[0] + j*y][self.coord[1] + i*x] == None or board[self.coord[0] + j*y][self.coord[1] + i*x].getColor() != self.color):
list.append((self.coord[0]+j*y,self.coord[1]+i*x))
# Exit loop if hitting piece
if board[self.coord[0] + j*y][self.coord[1] + i*x] != None: break
x, y = x+1, y+1
x, y = 1,1
return list
class Queen(Rook, Bishop):
def __init__(self, coord, board, color,img):
# Call the superclass constructor
super().__init__(coord, board, color,img)
def getPossibleMovesNoCheck(self):
# Get the possible moves list by calling superclasses
rookMoves = super().getPossibleMovesNoCheck()
# By passing Rook, it looks for function getPossibleMovesNoCheck() in Bishop class
bishopMoves = super(Rook, self).getPossibleMovesNoCheck()
return rookMoves+bishopMoves
| clin155/chess-game | RookBishopQueen.py | RookBishopQueen.py | py | 3,106 | python | en | code | 0 | github-code | 36 |
20019809326 | #file used to take screenshot to baseline rectangle mappings off of
import cv2
cam = cv2.VideoCapture(0)
result, image = cam.read()
if result:
cv2.imshow("img_to_map", image)
cv2.imwrite("img_to_map.png", image)
cv2.waitKey(0)
cv2.destroyWindow("img_to_map")
else:
print("No image detected. Please! try again") | thqtcher/physical-computing-final | app/config/python/screenshotter.py | screenshotter.py | py | 346 | python | en | code | 0 | github-code | 36 |
22700582787 | import pathlib
import json
import numpy as np
import pandas as pd
from scipy.interpolate import SmoothBivariateSpline, UnivariateSpline
import matplotlib as mpl
import matplotlib.pyplot as plt
from .common import Timer, Tools
from .sim_ctr import RgbGrid
from .sim_reduce import Steps, ReduceModel
class SynthGrid:
POPT_PATH = 'rgb_calibr/Salaris-off_vary-both.json'
AMLT_KEY = 'ms2_mt1'
AMLT_MODEL = lambda x, a, b1, b2, c1: a + b1*x[0] + b2*x[0]**2 + c1*x[1]
OUT_MASS_LIST = [0.9, 1. , 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8]
OUT_FEH_LIST = [-0.4, -0.3, -0.2, -0.1, 0. , 0.1, 0.2, 0.3, 0.4]
OUT_MASS_MINI = [1. , 1.4, 1.8]
OUT_FEH_MINI = [-0.4, 0. , 0.4]
def __init__(self, aMLT_list: [float], mass_list: [float] = RgbGrid.MASS_LIST,
FeH_list: [float] = RgbGrid.FEH_LIST, **kwargs):
self.indir = pathlib.Path('rgb_grid')
assert self.indir.exists(), 'rgb_grid does not exist'
self.outdir = pathlib.Path('synth_grid')
self.outdir.mkdir(exist_ok=True)
popt_path = pathlib.Path(SynthGrid.POPT_PATH)
assert popt_path.exists(), 'popt_path does not exist'
with open(popt_path, 'r') as f:
popt_dict = json.load(f)
self.aMLT_popt = popt_dict[SynthGrid.AMLT_KEY]
self.aMLT_list = aMLT_list
self.mass_list = mass_list
self.FeH_list = FeH_list
self.kwargs = kwargs # Ybirth, Zbirth, Z_over_X_sun, YBBN
self.timer = Timer()
self()
def __call__(self):
self.extract_sim_data()
self.build_interps()
for mass in SynthGrid.OUT_MASS_LIST:
for FeH in SynthGrid.OUT_FEH_LIST:
aMLT_fit = SynthGrid.AMLT_MODEL([mass-1, FeH], *self.aMLT_popt)
self.synthesize_model(mass, FeH, aMLT_fit)
self.clear()
print(' > All models synthesized!', '@', self.timer(), flush=True)
def extract_sim_data(self):
shape = (len(self.aMLT_list), Steps.end+1,
len(self.mass_list), len(self.FeH_list))
self.existence = np.ones(shape[:1] + shape[2:], dtype=bool)
self.data_dict = {qty: np.zeros(shape) for qty in ReduceModel.QTY_LIST}
for k, aMLT in enumerate(self.aMLT_list):
for j, mass in enumerate(self.mass_list):
for i, FeH in enumerate(self.FeH_list):
Y, Z = RgbGrid.Y_Z_calc(FeH, **self.kwargs)
model = SynthModel(self, aMLT=aMLT, mass=mass, Z=Z, FeH=FeH)
if model.exists:
for qty in ReduceModel.QTY_LIST:
self.data_dict[qty][k, :, j, i] = model.data[qty]
else:
self.existence[k, j, i] = False
model.clear_data(); del model
def build_interps(self):
self.interp_dict = {}
for qty in ReduceModel.QTY_LIST:
self.interp_dict[qty] = [[None for step in range(Steps.end+1)]
for aMLT in self.aMLT_list]
for k in range(len(self.aMLT_list)):
for step in range(Steps.end+1):
self.interp_dict[qty][k][step] = SmoothBivariateSpline(
self.data_dict['star_mass'] [k, step][self.existence[k]],
self.data_dict['surface_[Fe/H]'][k, step][self.existence[k]],
self.data_dict[qty] [k, step][self.existence[k]], kx=2, ky=2)
def synthesize_model(self, mass: float, FeH: float, aMLT_fit: float):
Y, Z = RgbGrid.Y_Z_calc(FeH, **self.kwargs)
model_name = f'{mass:.2f}M_Z={Z:.4f}_FeH={FeH:+.2f}'
print(' > Synthesizing', model_name, '@', self.timer())
pred = {}; data = {}
for qty in ReduceModel.QTY_LIST:
pred[qty] = np.zeros((len(self.aMLT_list), Steps.end+1))
data[qty] = np.zeros(Steps.end+1)
for qty in ReduceModel.QTY_LIST:
for step in range(Steps.end+1):
for k, aMLT in enumerate(self.aMLT_list):
pred[qty] [k, step] = self.interp_dict[qty][k][step](mass, FeH)[0, 0]
data[qty][step] = UnivariateSpline(self.aMLT_list, pred[qty][:, step], k=1)(aMLT_fit)
if mass in SynthGrid.OUT_MASS_MINI and FeH in SynthGrid.OUT_FEH_MINI:
self._visualize_data(model_name, pred, data, aMLT_fit)
df = pd.DataFrame(data)
df.to_csv(self.outdir / f'{model_name}.csv')
pred.clear(); data.clear()
del df, pred, data
def _draw_curve(self, pred, data, ax, x, y, colors):
for k, aMLT in enumerate(self.aMLT_list):
ax.plot(pred[x][k], pred[y][k], '--', c=colors[k])
ax.plot(data[x], data[y], '-', c=colors[-1])
if x in ['Teff', 'log_g']: ax.invert_xaxis()
if y in ['Teff', 'log_g']: ax.invert_yaxis()
for step in range(Steps.end+1):
ax.plot([pred[x][k, step] for k in range(len(self.aMLT_list))],
[pred[y][k, step] for k in range(len(self.aMLT_list))],
ls='-', lw=0.5, c='lightgrey', zorder=-1)
for EEP in ['mid_PMS', 'ZAMS', 'mid_MS', 'TAMS', 'mid_SGB',
'pre_FDU', 'post_FDU', 'pre_RGBB', 'post_RGBB']:
idx = getattr(Steps, EEP)
color = getattr(Steps, f'{EEP}_c', 'tab:cyan')
ax.plot([pred[x][k, idx] for k in range(len(self.aMLT_list))],
[pred[y][k, idx] for k in range(len(self.aMLT_list))],
ls='-', lw=0.5, c=color, zorder=-1)
ax.plot(data[x][idx], data[y][idx], 'o', c=color, ms=4)
ax.set_xlabel(x)
ax.set_ylabel(y)
Tools.format_axis(ax)
def _visualize_data(self, model_name, pred, data, aMLT_fit):
cmap = mpl.colormaps['summer_r']
norm = mpl.colors.Normalize(vmin=self.aMLT_list[0],
vmax=self.aMLT_list[-1])
colors = [cmap(norm(a)) for a in self.aMLT_list + [aMLT_fit]]
# draw evolutionary tracks
fig, axs = plt.subplots(1, 2)
self._draw_curve(pred, data, axs[0], 'Teff', 'log_L', colors)
self._draw_curve(pred, data, axs[1], 'Teff', 'log_g', colors)
for i in range(2):
fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap),
ax=axs[i], orientation='horizontal', label=r'mixing length $\alpha$')
Tools.save_figure(fig, 'tracks')
# draw coordinates
fig, axs = plt.subplots(2, 1)
self._draw_curve(pred, data, axs[0], 'star_age', 'model_number', colors)
self._draw_curve(pred, data, axs[1], 'model_number', 'star_age', colors)
for i in range(2):
fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap),
ax=axs[i], orientation='vertical', label=r'ML $\alpha$')
Tools.save_figure(fig, 'coords')
# draw histories
for qty in ReduceModel.QTY_LIST[2:]:
fig, axs = plt.subplots(2, 1)
self._draw_curve(pred, data, axs[0], 'star_age', qty, colors)
self._draw_curve(pred, data, axs[1], 'model_number', qty, colors)
for i in range(2):
fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap),
ax=axs[i], orientation='vertical', label=r'ML $\alpha$')
Tools.save_figure(fig, qty.replace('/', '_'))
Tools.merge_plots(self.outdir, model_name, ['tracks', 'coords'] \
+ [qty.replace('/', '_') for qty in ReduceModel.QTY_LIST[2:]])
def clear(self):
for qty in ReduceModel.QTY_LIST:
for k in range(len(self.aMLT_list)):
self.interp_dict[qty][k].clear()
self.interp_dict[qty].clear()
self.data_dict.clear()
self.interp_dict.clear()
del self.existence, self.data_dict, self.interp_dict
class SynthModel:
def __init__(self, grid: SynthGrid, **kwargs) -> None:
self.grid = grid
self.model_name = f'aMLT={kwargs["aMLT"]:.4f}_{kwargs["mass"]:.2f}M_' \
f'Z={kwargs["Z"]:.4f}_FeH={kwargs["FeH"]:+.2f}'
fpath = grid.indir / f'{self.model_name}.csv'
self.exists = fpath.exists()
if not self.exists:
print(f' > Warning: {self.model_name} does not exist.')
return
self.data = pd.read_csv(fpath, index_col=0)
def clear_data(self):
if self.exists:
del self.data
| kailicao/mesa_apokasc | sim_synth.py | sim_synth.py | py | 8,766 | python | en | code | 0 | github-code | 36 |
30280408206 | import os
def delete_empty_folders(path):
if os.path.exists(path):
for root_folder, folders, files in os.walk(path):
for folder in folders:
if len(os.listdir(os.path.join(root_folder, folder))) == 0:
os.rmdir(os.path.join(root_folder, folder))
print("Empty folders deleted successfully")
else:
print("Path doesn't exists")
if __name__ == '__main__':
path = input("Enter path: ")
delete_empty_folders(path)
| hafeezulkareem/python_scripts | delete_empty_folders.py | delete_empty_folders.py | py | 496 | python | en | code | 0 | github-code | 36 |
29729372400 | import random
BS_feedback = dict[int, set['User']]
Group = dict[int, set['User']]
ChannelSet = set[int]
BS_response = list[int]
def rand_gen(probability):
gen = random.random()
return gen <= probability
def calculate_average_delay(users):
overall_delay = 0
for subscriber in users:
overall_delay += subscriber.sum()
return overall_delay / len(users) | krezefal/preamble-slotted-aloha-simulation | utils.py | utils.py | py | 402 | python | en | code | 2 | github-code | 36 |
71075249065 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def swapNodes(self, head: Optional[ListNode], k: int) -> Optional[ListNode]:
point = head
res = []
while point:
res.append(point.val)
point = point.next
one = k - 1
two = len(res) - k
res[one], res[two] = res[two], res[one]
final = ListNode(0)
tmp = final
for i in res:
tmp.next = ListNode(i)
tmp = tmp.next
return final.next | nango94213/Leetcode-solution | 1721-swapping-nodes-in-a-linked-list/1721-swapping-nodes-in-a-linked-list.py | 1721-swapping-nodes-in-a-linked-list.py | py | 767 | python | en | code | 2 | github-code | 36 |
9659107550 | #!/usr/bin/env python
# coding: utf-8
# @Author: lapis-hong
# @Date : 2018/4/12
"""Prob 167. Two Sum II - Input array is sorted
https://leetcode.com/problems/two-sum-ii-input-array-is-sorted/description/
Description:
Given an array of integers that is already sorted in ascending order, find two numbers such that they add up to a specific target number.
The function twoSum should return indices of the two numbers such that they add up to the target, where index1 must be less than index2. Please note that your returned answers (both index1 and index2) are not zero-based.
You may assume that each input would have exactly one solution and you may not use the same element twice.
Input: numbers={2, 7, 11, 15}, target=9
Output: index1=1, index2=2
"""
# two-pointer
def twoSum(numbers, target):
l, r = 0, len(numbers) - 1
while l < r:
s = numbers[l] + numbers[r]
if s == target:
return [l + 1, r + 1]
elif s < target:
l += 1
else:
r -= 1
# dictionary
def twoSum2(numbers, target):
dic = {}
for i, num in enumerate(numbers):
if target - num in dic:
return [dic[target - num] + 1, i + 1]
dic[num] = i
# binary search
def twoSum3(numbers, target):
for i in xrange(len(numbers)):
l, r = i + 1, len(numbers) - 1
tmp = target - numbers[i]
while l <= r:
mid = l + (r - l) // 2
if numbers[mid] == tmp:
return [i + 1, mid + 1]
elif numbers[mid] < tmp:
l = mid + 1
else:
r = mid - 1
if __name__ == '__main__':
print(twoSum([2, 7, 11, 15], 9))
print(twoSum2([2, 7, 11, 15], 9))
print(twoSum3([2, 7, 11, 15], 9)) | Lapis-Hong/Leetcode | python/easy/167.Two-Sum-II.py | 167.Two-Sum-II.py | py | 1,795 | python | en | code | 8 | github-code | 36 |
25600927243 | from odoo import models, fields, api
class PurchaseOrderInh(models.Model):
_inherit = 'purchase.order'
perc_discount = fields.Float('Discount', compute='_compute_discount')
net_total = fields.Float('Net Total', compute='_compute_net_total')
perc = fields.Float(compute='compute_percentage')
net_tax = fields.Float('Tax', compute='compute_taxes')
note_picklist = fields.Char('Note')
subtotal_amount = fields.Float('Subtotal Amount')
@api.model
def create(self, vals_list):
rec = super().create(vals_list)
rec.action_po_update_subtotal()
return rec
def action_po_update_subtotal(self):
for rec in self:
subtotal = 0
for line in rec.order_line:
subtotal = subtotal + line.subtotal
rec.subtotal_amount = subtotal
@api.depends('order_line')
def compute_taxes(self):
for order in self:
# amount_tax = 0.0
# for line in order.order_line:
# print(line.price_tax)
# amount_tax += line.price_tax
# order.net_tax = amount_tax
amount = 0
for rec in order.order_line:
if rec.taxes_id:
# if rec.taxes_id.filtered(lambda i:i.name != 'Reverse Charge Provision'):
if rec.taxes_id.filtered(lambda i:i.id == 19):
amount += rec.vat_amount
order.net_tax = amount - ((order.discount_rate / 100) * amount)
# flag = False
# amount = 0
# for rec in order.order_line:
# if rec.taxes_id and rec.taxes_id.filtered(lambda i: i.id != 23) and rec.taxes_id.filtered(
# lambda i: i.amount != 0):
# flag = True
# amount = True
# if flag:
# order.net_tax = (5 / 100) * order.net_total
# else:
# order.net_tax = 0
@api.depends('discount_rate', 'discount_type', 'subtotal_amount')
def compute_percentage(self):
for rec in self:
disc = 0
if rec.discount_type == 'percent':
disc = rec.discount_rate
else:
disc = (rec.discount_rate / rec.subtotal_amount) * 100
rec.perc = disc
@api.depends('order_line.price_total', 'order_line.subtotal', 'discount_rate', 'discount_type', )
def _amount_all(self):
"""
Compute the total amounts of the SO.
"""
for order in self:
amount_untaxed = amount_tax = amount_discount = subtotal = 0.0
for line in order.order_line:
# amount_untaxed += line.price_subtotal
# amount_tax += line.price_tax
# amount_discount += (line.product_qty * line.price_unit * line.discount) / 100
# amount_discount += (line.product_qty * line.price_unit) / 100
subtotal = subtotal + line.subtotal
order.update({
'amount_untaxed': amount_untaxed,
'amount_tax': amount_tax,
'amount_discount': amount_discount,
'amount_total': amount_untaxed + amount_tax,
'subtotal_amount': subtotal,
# 'net_total': subtotal - disc
})
@api.depends('order_line', 'discount_rate', 'discount_type', 'order_line.subtotal')
def _compute_net_total(self):
for rec in self:
# subtotal = 0
# for line in rec.order_line:
# subtotal = subtotal + line.subtotal
# rec.subtotal_amount = subtotal
rec.net_total = rec.subtotal_amount - rec.perc_discount
rec.amount_tax = rec.net_tax
rec.amount_total = rec.net_total + rec.amount_tax
# rec.total_amount_due = rec.amount_total
@api.depends('discount_rate', 'discount_type')
def _compute_discount(self):
for rec in self:
if rec.discount_type == 'percent':
rec.perc_discount = (rec.discount_rate / 100) * rec.subtotal_amount
else:
rec.perc_discount = rec.discount_rate
def action_show_sale_products(self):
return {
'type': 'ir.actions.act_window',
'name': 'Sale Order Products',
'view_id': self.env.ref('so_po_customization.view_sale_order_wizard_form', False).id,
'target': 'new',
'res_model': 'sale.order.wizard',
'view_mode': 'form',
}
class PurchaseOrderLineInh(models.Model):
_inherit = 'purchase.order.line'
remarks = fields.Char("Remarks")
number = fields.Integer(compute='_compute_get_number', store=True)
so_ref = fields.Integer('Ref')
sale_order = fields.Char('Sale Order')
vat_amount = fields.Float('VAT Amount', compute='_compute_vat_amount')
subtotal = fields.Float('Subtotal', compute='_compute_subtotal')
@api.depends('price_unit', 'product_qty', 'product_uom')
def _compute_subtotal(self):
for rec in self:
rec.subtotal = rec.product_qty * rec.price_unit
@api.depends('taxes_id', 'price_unit', 'product_qty')
def _compute_vat_amount(self):
for rec in self:
amount = 0
for tax in rec.taxes_id:
if tax.id == 19:
amount = amount + tax.amount
rec.vat_amount = (amount * rec.product_qty / 100) * rec.price_unit
@api.depends('sequence', 'order_id')
def _compute_get_number(self):
for order in self.mapped('order_id'):
number = 1
for line in order.order_line:
line.number = number
number += 1
@api.onchange('product_id')
def onchange_get_tax(self):
tax = self.env['account.tax'].search(
[('type_tax_use', '=', 'purchase'), ('amount', '=', 5), ('name', '=', 'VAT 5%')])
for rec in self:
rec.taxes_id = tax
# def _compute_tax_id(self):
# for line in self:
# line = line.with_company(line.company_id)
# fpos = line.order_id.fiscal_position_id or line.order_id.fiscal_position_id.get_fiscal_position(line.order_id.partner_id.id)
# filter taxes by company
# taxes = line.product_id.supplier_taxes_id.filtered(lambda r: r.company_id == line.env.company)
# line.taxes_id = fpos.map_tax(taxes, line.product_id, line.order_id.partner_id)
def unlink(self):
for res in self:
i = 1
for rec in res.order_id.order_line:
if rec.id != res.id:
rec.update({
'number': i
})
i = i + 1
record = super(PurchaseOrderLineInh, self).unlink()
| Gidwani/CRA | so_po_customization/models/purchase.py | purchase.py | py | 6,995 | python | en | code | 0 | github-code | 36 |
10556749786 | import serial
import tkinter
from tkinter import*
TTY_DEVICE = "COM"
s=serial.Serial()
def init(port,text2):
global s
try:
s=serial.Serial(TTY_DEVICE + str(port), 115200, timeout=10)
print('connect com'+str(port))
text2.configure(state=tkinter.NORMAL)
text2.insert(1.0,'connect COM'+str(port)+"\n")
text2.configure(state=tkinter.DISABLED)
except (OSError, serial.SerialException):
pass
def disconnect(text2):
global s
text2.configure(state=tkinter.NORMAL)
text2.insert(1.0,'COM-port Disconnect'+"\n")
text2.configure(state=tkinter.DISABLED)
s.close();
print('COM-port Disconnect')
def write(text):
global s
try:
s.write(text)
except serial.SerialException:
print('com port disconnect')
def read():
global s
try:
text=str(s.read(s.inWaiting()));
return text
except serial.SerialException:
print('com port disconnect')
def findCom():
global s
s.close()
print('com close')
comAvable=[]
for i in range(0,50):
try:
s = serial.Serial(TTY_DEVICE + str(i), 115200, timeout=10)
comAvable.append(i)
s.close()
print('Serial close')
except (OSError, serial.SerialException):
pass
else:
return comAvable | Zealua/PythonFirstTest | GUI/GUI_VGH/driver/comPort.py | comPort.py | py | 1,363 | python | en | code | 0 | github-code | 36 |
31050662738 | import re
import unicodedata
def slugify(value):
""" From django.utils.text """
value = unicodedata.normalize('NFKD', value).encode(
'ascii', 'ignore').decode('ascii')
value = re.sub('[^\w\s-]', '', value).strip().lower()
return re.sub('[-\s]+', '-', value)
| ryankask/esther | esther/utils.py | utils.py | py | 284 | python | fa | code | 17 | github-code | 36 |
3482612068 | class IterInt(int):
def __iter__(self):
for i in str(self):
yield int(i)
def __getitem__(self, index):
res = str(self)[index]
return int(res)
def __len__(self):
count = 0
for i in self:
count += 1
return count
# return len(str(self))
def __add__(self, other):
new_num = super().__add__(other)
return IterInt(new_num)
iter_num = IterInt(67545789)
for i in iter_num:
print(i * 2)
print(iter_num[1:5])
print(len(iter_num))
new_num = iter_num + 10
for i in new_num:
print(i)
| aanastasiyatuz/python23-lections | oop/iter_int.py | iter_int.py | py | 599 | python | en | code | 5 | github-code | 36 |
198794662 | import abc
from typing import Dict, List
from uuid import UUID
from moderation_ml_example.models import Post
class PostNotFoundError(Exception):
pass
class PostRepository:
__metaclass__ = abc.ABCMeta
async def save(self, post: Post) -> None:
...
async def get(self, id: UUID) -> Post:
...
async def list(self) -> List[Post]:
...
async def list_unmoderated(self) -> List[Post]:
...
class InMemoryPostRepository(PostRepository):
def __init__(self):
self._posts: Dict[UUID, Post] = {}
async def save(self, post: Post) -> None:
self._posts[post.id] = post.copy()
async def get(self, id: UUID) -> Post:
try:
return self._posts[id].copy()
except KeyError as exc:
raise PostNotFoundError(f"Post with id {id} cannot be found") from exc
async def list(self) -> List[Post]:
return [post.copy() for post in self._posts.values()]
async def list_unmoderated(self) -> List[Post]:
return [
post.copy()
for post in self._posts.values()
if post.requires_moderation
]
| mikeyjkmo/post-moderation-example | moderation_ml_example/repository.py | repository.py | py | 1,156 | python | en | code | 0 | github-code | 36 |
8373131694 |
import jwt
JWT_SECRET = "this_is_just_for_testing"
def create_jwt(payload):
token = jwt.encode(
payload,
JWT_SECRET,
algorithm="HS256"
)
return token
def validate_jwt(token):
try:
payload = jwt.decode(token, JWT_SECRET, "HS256")
except:
raise
return payload
| walterbrunetti/playground | auth/core/jwt_utils.py | jwt_utils.py | py | 338 | python | en | code | 0 | github-code | 36 |
70857518504 | import copy
with open('input.txt', 'r') as file:
input = [[line.strip(), False] for line in file if line.strip()]
# build up list of instructions
programs_to_try = []
jmp_instruction_indices = [idx for idx, (operation, _) in enumerate(input) if 'jmp' in operation]
nop_instruction_indices = [idx for idx, (operation, _) in enumerate(input) if 'nop' in operation]
for jmp_instruction_index in jmp_instruction_indices:
# flip the jmp to a nop and add it to the list of programs
temp = copy.deepcopy(input)
old_instruction = temp[jmp_instruction_index][0]
temp[jmp_instruction_index][0] = old_instruction.replace('jmp', 'nop')
programs_to_try.append(temp)
for nop_instruction_index in nop_instruction_indices:
# flip the nop to a jmp and add it to the list of programs
temp = copy.deepcopy(input)
old_instruction = temp[nop_instruction_index][0]
temp[nop_instruction_index][0] = old_instruction.replace('nop', 'jmp')
programs_to_try.append(temp)
for program in programs_to_try:
accumulator = 0
index = 0
broke_out = False
while index < len(program):
instruction, executed = program[index]
if executed:
broke_out = True
break
program[index][1] = True
operation, argument = instruction.split(' ')
if operation == 'nop':
index += 1
elif operation == 'acc':
accumulator += int(argument)
index += 1
elif operation == 'jmp':
index += int(argument)
if not broke_out:
print('we found a program that worked')
print(f'accumulator is {accumulator}')
exit()
| davsucks/AdventOfCode | 2020/8/part-two.py | part-two.py | py | 1,666 | python | en | code | 0 | github-code | 36 |
1060549613 | import unittest
from paint_calculator import api
from paint_calculator.run import app
class APITestCase(unittest.TestCase):
def setUp(self):
app.testing = True
self.app = app.test_client()
def test_calculate(self):
"""
Tests calculate function
"""
room1 = {'length':20,'width':20,'height':20}
room2 = {'length':10,'width':10,'height':10}
response = self.app.post(
'/api/v1/calculate',
json={
'room-1':room1,
'room-2':room2
}
)
ft_room1 = {}
ft_room1['ft'] = api.calculate_feet(room1)
gallons_room1 = api.calculate_gallons_required(ft_room1)
self.assertEqual(response.json['room-1']['ft'], ft_room1['ft'], "total ft for room-1 should be calculated from calculate_feet function")
self.assertEqual(response.json['room-1']['gallons'], gallons_room1, "total gallons for room-1 should be calculated from calculate_gallons_required function")
self.assertEqual(response.json['room-1']['room'], '1', "room number should be 1")
ft_room2 = {}
ft_room2['ft'] = api.calculate_feet(room2)
gallons_room2 = api.calculate_gallons_required(ft_room2)
self.assertEqual(response.json['room-2']['ft'], ft_room2['ft'], "total ft for room-2 should be calculated from calculate_feet function")
self.assertEqual(response.json['room-2']['gallons'], gallons_room2, "total gallons for room-2 should be calculated from calculate_gallons_required function")
self.assertEqual(response.json['room-2']['room'], '2', "room number should be 2")
total_gallons = gallons_room1 + gallons_room2
self.assertEqual(response.json['total_gallons'], total_gallons, "total gallons for all rooms should be calculated by calculate function")
def test_calculate_feet(self):
"""
Tests calculate_feet function
"""
room1 = {'length':20,'width':20,'height':20}
room2 = {'length':10,'width':10,'height':10}
self.assertEqual(api.calculate_feet(room1), 1600, "total ft for room-1 (20-20-20) should be 1600")
self.assertEqual(api.calculate_feet(room2), 400, "total ft for room-2 (10-10-10) should be 400")
def test_calculate_gallons_required(self):
"""
Tests calculate_gallons_required function
"""
room1 = {'length':20,'width':35,'height':20}
room2 = {'length':10,'width':10,'height':10}
room3 = {'length':25,'width':35,'height':45}
ft_room1 = {}
ft_room1['ft'] = api.calculate_feet(room1)
ft_room2 = {}
ft_room2['ft'] = api.calculate_feet(room2)
ft_room3 = {}
ft_room3['ft'] = api.calculate_feet(room3)
self.assertEqual(api.calculate_gallons_required(ft_room1), 6, "total gallons for room-1 (20,35,20) should be 6")
self.assertEqual(api.calculate_gallons_required(ft_room2), 1, "total gallons for room-2 (20,20,20) should be 1")
self.assertEqual(api.calculate_gallons_required(ft_room3), 14, "total gallons for room-3 (25,35,45) should be 14")
if __name__ == '__main__':
unittest.main() | robinf1/paint-calculator | test/test_api.py | test_api.py | py | 3,197 | python | en | code | 0 | github-code | 36 |
30954831491 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 19 04:39:33 2018
@author: hyeongyuy
"""
import numpy as np
class visGraph(object):
def __init__(self):
#node num
self.NODE_NUM = 0
#leaf node info in tree graph
self.LEAF_BASE = \
'[label=\"predict = {}\\nhomogeneity = {}\\ncoverage = {}\\nsamples/class = {}\"] ;'
self.ROOT_LEAF_BASE = \
'[label="Root_node\\npredict = {}\\nhomogeneity = {}\\nsamples = {}\\nsamples/class = {}\"] ;'
self.INTER_NODE_BASE = \
'[label=\"{} {} {}\\nsamples = {}\\nsamples/class = {}\"] ;'
def node_info(self, cnt_list, n_data, root = False):
base_string = self.ROOT_LEAF_BASE if root else self.LEAF_BASE
return base_string.format(\
np.argmax(cnt_list), np.round(max(cnt_list)/sum(cnt_list),3), \
np.round(sum(cnt_list)/n_data,3), cnt_list)
def get_graph_tree(self, feature, cut_val, cnt_list, condition_list, sub_tree_list):
pprint_tree={}
pprint_tree[self.INTER_NODE_BASE.\
format(feature, condition_list[0], cut_val, sum(cnt_list), cnt_list)]\
= sub_tree_list[0]
pprint_tree[self.INTER_NODE_BASE.\
format(feature, condition_list[1], cut_val, sum(cnt_list), cnt_list)]\
= sub_tree_list[1]
return pprint_tree
def tree_to_graph(self, pprint_tree, node = 'digraph Tree {\nnode [shape=box] ;', \
edge ='', node_no=0):
p_node_no = self.NODE_NUM
if not isinstance(pprint_tree,dict):
node += '\n{} {}'.format(p_node_no, pprint_tree)
return node, edge
if isinstance(pprint_tree,dict):
key = [k for k in pprint_tree.keys() if k.split()[1] == '>=']
if len(key) == 1:
key = key[0]
node += '\n{} {}'.format(self.NODE_NUM, key)
self.NODE_NUM += 1
edge += '\n{} -> {} [labeldistance=2.5, labelangle=45] ;'\
.format(p_node_no , self.NODE_NUM )
left_sub_pprint_tree= pprint_tree[key.replace('>=', '<')]
node, edge = self.tree_to_graph(left_sub_pprint_tree, node, edge)
self.NODE_NUM += 1
edge += '\n{} -> {};'.format(p_node_no , self.NODE_NUM)
right_sub_pprint_tree = pprint_tree[key]
node, edge = self.tree_to_graph(right_sub_pprint_tree, node, edge)
else:
key = [k for k in pprint_tree.keys() if k.split()[1] == '=='][0]
node += '\n{} {}'.format(p_node_no , key)
self.NODE_NUM += 1
edge += '\n{} -> {} [labeldistance=2.5, labelangle=45] ;'\
.format(p_node_no , self.NODE_NUM)
left_sub_pprint_tree= pprint_tree[key.replace('==', '!=')]
node, edge= self.tree_to_graph(left_sub_pprint_tree, node, edge)
self.NODE_NUM += 1
right_sub_pprint_tree = pprint_tree[key]
edge += '\n{} -> {};'.format(p_node_no , self.NODE_NUM)
node, edge = self.tree_to_graph(right_sub_pprint_tree, node, edge)
return node, edge
| hyeongyuy/DecisionTree_python | modules/visgraph.py | visgraph.py | py | 3,298 | python | en | code | 1 | github-code | 36 |
5155695157 | from uagents.setup import fund_agent_if_low
from uagents import Agent, Context, Model
class Message(Model):
message: str
RECIPIENT_ADDRESS = "agent1q0lqc50tgunfr8zumuj8744fqd9wl8hmh3akq0ygyzud9cp5yju524d7gcw"
agent = Agent(
name="alice",
port=8000,
seed="agent1 recovery seed phrase",
endpoint={
"http://127.0.0.1:8000/submit": {},
},
)
fund_agent_if_low(agent.wallet.address())
@agent.on_interval(period=2.0)
async def send_message(ctx: Context):
await ctx.send(RECIPIENT_ADDRESS, Message(message="hello there bob"))
@agent.on_message(model=Message)
async def on_message(ctx: Context, sender: str, msg: Message):
ctx.logger.info(f"Received message from {sender}: {msg.message}")
if __name__ == "__main__":
agent.run()
| cmaliwal/uAgents | examples/08-remote-agents-registration/agent1.py | agent1.py | py | 775 | python | en | code | null | github-code | 36 |
6031572605 | #this will be the personality quiz part of my program
from tkinter import*
from tkinter import messagebox as mb
window = Tk()
window.title("Giftlab")
window.geometry("500x500")
window.rowconfigure(0, weight = 1)
window.columnconfigure(0,weight = 1)
#creating different frames
picker = Frame(window) #this will pick who the gift is for, as it will change the results
quiz = Frame(window) #this will be the quiz
for frame in (picker, quiz):
frame.grid(row = 0, column = 0, sticky = "nsew")
def show_frame(frame):
frame.tkraise()
show_frame(picker)
#creating the window frame picker:
#================= Picker Frame ================
#setting the font and backgrounds
font_header = ("Courier", 30)
bg_header = "#BEE3BA"
bg_other = "#DDF2D1"
font_other = ("Garamond", 22)
picker.configure(bg = bg_other)
titlelabel = Label(picker, text = "GIFTLAB", font = font_header, bg = bg_header)
titlelabel.place(x=200,y=10)
friends_btn = Button(picker, text = "Friends", font = font_other, highlightbackground = bg_header, command = lambda: show_frame(quiz))
friends_btn.place(x=50, y =50)
parents_btn = Button(picker, text = "Parents", font = font_other, highlightbackground = bg_header, command = lambda: show_frame(quiz))
parents_btn.place(x=50, y=100)
quitbtn = Button(picker, text = "Quit", font = font_other, highlightbackground = bg_header, command = window.destroy)
quitbtn.place(x=250, y=450)
#creating the questions and answers lists
#this personality quiz will have three options (a, b, c)
#i will use radio buttons to achieve this
#i am going to create 3 sets of questions
| naviniii/giftlab | secondcomponent_v1.py | secondcomponent_v1.py | py | 1,614 | python | en | code | 0 | github-code | 36 |
35280214366 | # 숫자와 문자열의 다양한 기능
#
# 문자열 format() 함수
# - 문자열을 가지고 있는 함수
# - "{}".format(10) 형식이며
# - 중괄호의 개수와 괄호안의 매개변수의 개수가 반드시 같아야한다
String_a = "{}".format(10)
String_b = "{} {}".format(10, 20)
String_c = "{} {} {}".format(10, 20, 30)
print(String_a) # 10
print(String_b) # 10 20
print(String_c) # 10 20 30
print("----------------------")
print(type(String_a)) # str
print(type(String_b)) # str
print(type(String_c)) # str
String_d = "포르쉐 {}".format(911)
print(String_d) # 포르쉐 911
# format() 함수의 다양한 기능
output_a = "{:d}".format(52) #52
# 특정 칸에 출력
output_b = "{:5d}".format(52) # 52
output_c = "{:10d}".format(52) # 52
# 빈칸을 0으로 채우기
output_d = "{:05d}".format(52) #00052
output_e = "{:05d}".format(-52) #-0052
# 기호와 함께 출력하기
output_f = "{:+d}".format(52) #+52
output_g = "{:+d}".format(-52) #-52
output_h = "{: d}".format(52) # 52 # 공백이 채워지지 않는다
output_i = "{: d}".format(-52) #-52
#조합하기
output_j = "{:+5d}".format(52) # +52
output_k = "{:+5d}".format(-52) # -52
output_l = "{:=+5d}".format(52) #+ 52
output_m = "{:=-5d}".format(-52) #- 52
output_n = "{:+05d}".format(52) #+0052
output_o = "{:-05d}".format(-52) #-0052
# 부동 소수점 출력의 다양한 형태
output_1 = "{:f}".format(52.273) #52.273000 # 정수와 유사하다
output_2 = "{:15.3f}".format(52.273) # ... 52.273
output_3 = "{:15.2f}".format(52.273) # ... 52.27
output_4 = "{:15.1f}".format(52.273) # ... 52.2
# 의미 없는 부동 소수점 제거
output_g1 = 52.0
output_g2 = "{:g}".format(output_g1)
print(output_g1) # 52.0
print(output_g2) # 52
| juneglee/Deep_Learning | python-basic/chapter02/ex04_1.py | ex04_1.py | py | 1,803 | python | ko | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.