content stringlengths 0 1.05M | origin stringclasses 2
values | type stringclasses 2
values |
|---|---|---|
import argparse
import cv2
from glob import glob
from itertools import product
import numpy as np
import os
from tqdm import tqdm
from scipy.special import erf
import torch
import torch.nn as nn
from model.model import CompModel
import arithmetic_coding as ac
MAX_N = 65536
TINY = 1e-10
def load_img(path):
img = cv2.imread(path).astype(np.float32)[..., ::-1]
img = ((img / 255.) - 0.5) * 2.
img = torch.from_numpy(img.transpose((2, 0, 1))).unsqueeze(0)
_, _, h, w = img.size()
h_, w_ = h, w
if h % 16 != 0:
h_ = (h // 16 + 1) * 16
if w % 16 != 0:
w_ = (w // 16 + 1) * 16
img_ = torch.zeros((1, 3, h_, w_))
img_[:, :, :h, :w] = img
return img_, h_ - h, w_ - w
def load_model(args):
args.device = 'cpu'
comp_model = CompModel(args)
state_dict = torch.load(args.model_path, map_location='cpu')
comp_model.load_state_dict(state_dict['comp_model'])
comp_model.eval()
return comp_model
def compress(args):
comp_model = load_model(args)
os.makedirs('outputs/binary', exist_ok=True)
if os.path.isdir(args.image_path):
pathes = glob(os.path.join(args.image_path, '*'))
else:
pathes = [args.image_path]
for path in pathes:
bitpath = "outputs/binary/{}.pth".format(os.path.basename(path).split('.')[0])
img, pad_h, pad_w = load_img(path)
_, _, H, W = img.size()
with torch.no_grad():
y_hat, p = comp_model.compress(img)
_, yC, yH, yW = y_hat.size()
min_val = int(torch.max(torch.abs(y_hat)))
p = p.detach().numpy()
p = np.reshape(p, (1, args.gmm_K, args.bottleneck*3, yH, yW))
y_mu = p[:, :, :args.bottleneck, :, :] + min_val
y_std = np.abs(p[:, :, args.bottleneck:2*args.bottleneck, :, :])
y_w = p[:, :, 2*args.bottleneck:, :, :]
y_w = np.exp(y_w) / np.sum(np.exp(y_w), axis=1) #softmax
# store side information
fileobj = open(bitpath, mode='wb')
img_size = np.array([W, H], dtype=np.uint16)
img_size.tofile(fileobj)
pad_size = np.array([pad_w, pad_h], dtype=np.uint8)
pad_size.tofile(fileobj)
min_value = np.array([min_val], dtype=np.uint8)
min_value.tofile(fileobj)
fileobj.close()
print('=============================================================')
print(os.path.basename(path))
with open(bitpath, 'ab+') as fout:
bit_out = ac.CountingBitOutputStream(
bit_out=ac.BitOutputStream(fout))
enc = ac.ArithmeticEncoder(bit_out)
samples = np.arange(0, min_val*2+1).reshape(-1, 1)
with tqdm(product(range(yH), range(yW)), ncols=60, total=yH*yW) as qbar:
for h, w in qbar:
for ch in range(yC):
weight = y_w[:, :, ch, h, w]
mean = y_mu[:, :, ch, h, w]
std = y_std[:, :, ch, h, w]
high = weight * 0.5 * (1 + erf((samples + 0.5 - mean) / ((std + TINY) * 2 ** 0.5)))
low = weight * 0.5 * (1 + erf((samples - 0.5 - mean) / ((std + TINY) * 2 ** 0.5)))
pmf = np.sum(high - low, axis=1)
pmf_clip = np.clip(pmf, 1.0/MAX_N, 1.0)
pmf_clip = np.round(pmf_clip / np.sum(pmf_clip) * MAX_N).astype(np.uint32)
symbol = np.int(y_hat[0, ch, h, w].item() + min_val)
freq = ac.SimpleFrequencyTable(pmf_clip)
enc.write(freq, symbol)
enc.finish()
bit_out.close()
real_bpp = os.path.getsize(bitpath) * 8
print('bitrate : {0:.4}bpp'.format(real_bpp / H / W))
print('=============================================================\n')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('model_path')
parser.add_argument('image_path')
parser.add_argument('--bottleneck', type=int, default=32)
parser.add_argument('--main_channel', type=int, default=192)
parser.add_argument('--gmm_K', type=int, default=3)
args = parser.parse_args()
compress(args) | nilq/baby-python | python |
# This code is generated by [Atcoder_base64](https://github.com/kyomukyomupurin/AtCoder_base64)
from base64 import b85decode
import subprocess
from pathlib import Path
from zlib import decompress
binary = "c%0=~Z){uD6~DIg$7xf?EiD9ERs&&bS7PF}DJfls?ZoXfIB96o(v?4#7u$)w`A=p)mxLi^skEwB(nV7@`hiJB>;pr5fN4VefV!Xz#-wcuQ>P)pq>fQ7>Bh)3sImd^&VBEm=l8DPqZ`_{-N-rb{N6d|-h1xfbKbWivAs@*gJ5xyuMoyP+@kS8j-T78ivZR`THybUWD{uuzQG_T(q)I4y7hWAy&k8=i*yq)Q-^^z68lnaHB&--x*lt5YK?}b*7!{HIbQFQ%dF?*dS$Lx=JH4h*F%c^Yv8{Tj*GOPm}vv2Lzk2Ud;V~h#)~vY%oO$R1-&NnpBFEu2Y7vUemTqAixkJ<_Dp(o*UsBBiOx(qSDNgc?Ag`1Yp1`M_itC*<mIya1`n$8%&q7u_I^Fb*!!i(*iFwXr(dqV``Wh19Vh2MKKc9!ChH-!aU-M)o*Pt*mBR7Z#tmm%`|SF64u0z_@E91p*#iHyjr?5>Yy52+`4?>PlQ#G@Hug6YoPp+Yzm2@tMt+YCK4H@yHd;4>mxuU#!{x9I>Rn6PNzkiV^WsE+c~Ddm@on4K*zY2gj%V{ZT2$nMLMfrqkpY@W7Lw!XqLM6(4D@C4x#WmEnn`N!tlu0Nyk}VHjw@3W$!I*%lgQIY(urg)5@)Z5m7a9INJr&ja%VJ`R{8^xqks=9fnAg;sY3n{c$c7Yxu_gf<?}lxN{XgyG#l7K2Q<4h4Q3)-xO*&9DyF!}Ofd>}1M!qxph`hbD@9O@8~|(w)Sr&id{HSR<!m%QLL<Z7bm$(-CiY<ycgFK2h50Ow?ut*!R92FcX@xUm>71NNKMG!;Opd!UE2nd0Uo0B#quc%4{oUI0;lp%?f2ThXFtUWLG#CFhz|X0VqZ8JUwhA5a*X^jyo6-&G7ABSD<LXdlYx{(}{FIZe+$pZ3Z2hjA**nSQalgR*FUaT5gn)PPb7fM%372<!%{cP&Z5nMNwS9z@w+wiT0l#R#R~hgH1HRgTFB$Mw1HNp)*BEeI2kb0rGvH^rd^4%-m8_gI;4TCHoB?kz;4d0*<9YW}18&^!XAO9>0e{1QyA8N>?5}R=RO6*)ACZpFD$eS>bnNHuIa2K|093tIf&XorgYd-o6jNEeScP)y*BH;bwm7fyql{-mvN)^qM;On#xcH*VA7DJ|+TuBtznk%_ON%oqA7(u3%3@jN?*x8q2HP1Lk*2%%fO%=UcLv@ZWDQ^VJ_t&c*Q8TKIu%@qo%&(p637l5e@{6aDnHiILlj@A?Yz>_CsjUJ*sTixVj;vRbTTmeVCaFlSmn<P6p-kthK}=WhDg&J*k%s~yQJOo%4;eJQq}pk^Gknuc_9qKV>7DXrhBh-`^Z9&89dedQWN+P?1F)p+2eHgGBkAY-4!Muct2Ws^C7A7u5|3ZrJ<47bmI|L$@H2(sa5uFVtw9l2=qQ|+uW~0Q>u~10wcSZl?^aV{w8ghTB;Y@HkX-S<~RlUs8(s1k*zCIWl4JVZ+A(rF1sYh>(ZMm%6c%+$PKuw7su3kP`}(8g~pQ7?cmtn^NsK*RW2#5(5=hBEM%b*3n^&g>y0;ooa4bcq5j2hGPn5KHb0>b*Gy$;uEw=(CZYR6hoY6=hYp6I@3&rYvJsr_Tmd>9s(c!)T#0SHr26#LPhAV+pMpb=zpwZLZ=;D=<zKPNmHx^Hq3Zf~q+@dqY4;yWmzlfohfxeY7<wo~=VoeE{$q~MZFM$l`<Rr>Wb(K9?#mZ4iJKU<(X~h7>tPdAc?RgyKwkm+0?-AZUxtpahmpzvO#uG@bn^nx4A9#Ee+Q`I(C#s;I37Jf9Ftzh#@1%{jHB7haP{sA`r8ca&RU+34lZB*q*_%S>+$aOwBOyf`Vn`T+;zjAFYV~K8THwn@hy-uu4lF<Kfv^%KHOin5bd0Q1!PY%Anfrz>D=#WKjG^4_#R*7Y47uR!yb2NwPyEe`1zp!ekR-R@!scgt9AVZem!6}$!&bc8TPcFc11kC>4vap%ae_gr|U$M<moxqJm3iyJUt;#SIDy^?D2tISgk^B-zV^U80;AL@Bf#A=OLbdcqkd;8YAuI4|o<W^GzMkpbK38dd}ndi(wj`+c$8E=PkxlICnl;spc8Q`#qjDL4FwF88hoNHJhB_?ZY#)ozo4(TyQRy`8kAR{~V{d9`MY%j?<xLGcGlm+wJD{qdT$wFA%fb$5pL=4i2qwewlZ)%6V+p66Y`Q_y6R)@ofHI7Cnv>$`^SDc5%9w(?gt&ae9=~6UNQpa}>O1?d$8i)3@c|Xep<Ze1R_i4u4l?pro>a#{)h7uAPquw(=KOv*`k__gE};b}t>V#9c(O#2fVaX~rA%d1=O*^!aPXoAq%u<8FOEnDG{U-kI@L`usHGtMzqi##@QkVtib%nrAF=4>@Ovx9RKNjC;vNOMI=KH!$PtNN}|kevQ75nDO=c{$a*9Sk7M;xmI8AW*k<x*9x!2m1g|<I*cv*2J-i+h_ijb34X)<)z9%8Rh@Qn?^>U(&&q8!cJAPIW?IbnZjR3ic#Pvj@bf6IcSxu=r#{#F^LyM*P_XliDqlY?cI}^4<=2tNTE#ck^1o64*Uytbs&?wf_p<u`n!5IX%yF@u9tX2iKOZ`5@)*5pe0d%53iIJfCyP_-=TE<ym#EEuT=v5@c^-C7tTU{y(;Uw;JI?F$eGtoIHtk$-GQ560KIO2^pZwUy&WuxQ59V9(uJ|h(`QNJY_4~|Pr`CSaZ#O!v?F`!BMW=P1=i5$gTtxq0VKN|pc&!<K%BkUEd!Dta_YL#|_gj+VZ#k{=S08fuc`mQ#7G2i%Z*f`M*=~c!Z0r!0i1Ekcgzk&&4~Jru7E7Zvs^w=`h9}lTX*$mmN+e!Tib`p0%pWJU6c|;qG|p08MM7yJPscO)Q8_~sO1@B}a%qyp^Vx|^Qb{J@fVGrh8KN|m3k7+KCUZ(*ii{QHY?3BQ+3Xak7&tYtWfG-y?}5-jghmGYS$dA{8$3uO5(lOJ1BCW}bucs#?SpqF<%Ahfvr@Vf_V3+092ub_p>QlhjXAeq(3qH_$%L%Pghuy+ej=TtrD8H6q`kNX&vJnSig}una|taiSWC|dmOmq7Xh^XM#y?+zPpFxW4D_KpuaaOiX|k-HP1aKE8_yex6*FhzK+w@*5udH|kD|7kT#Oe>YTG_LQCpK`Q?EWTDUMlv3fL~qYn972H<b8`Q&~kG1*#M@n!=}a4oprEe=e^i{h@HQQ<29xH=Zl`M@#8UqBET!DwmRrDdJB|<-ndsm4f!>XtGdD=W`~8!n;B;BQrt%G?7tQmEbJ)=pWC+1I#M;uX@E_$g8v4pG@&3m`W6CoTjR+9!&$Ep$0jdj>Chh4g*R2unA;g!CGxV|6O76Alt%mf3*9~hQ%T9w-7A`Lw)f(mQRouuiXJbAMf`_JFcS7@())a219-Ed(;+=<Mmy<HV+alMn!#$7m${DhuJmXVbI5T@(?k9yTZ5v=^4RcqW>NuT8xSM7=Ix33GEj?Cz7udEoMa?;}oPrq}F~Xsqg=zMDr=?V?3kB(++d}V*j7u`eDvv+=CS3c&2}?g+9Aq)xP1?@)$@l-p3-2e?`#8I0|W5koOAojG&M47E;k)Zh`(l(8qft((OY0={2+DSwSD~i%5lVqOSixBIfmr_gtjn9YxfCNzk|Zo@}T8GeIBYMx6>1Ui9ZLiS`{A@4>b2%Nj5GOZ5Nmi1vL2^)c?4Tw`6IsL$e<CWC(M`#lGE$C?B^Y}Z+$^<T_mmH9`X@>S};Ea>a^XRo;)Q6I|^$53CtmvhB-!T&))|07<%n5;qlP64;dM_T1;gIs@!J8mPlC9eM#L)(p|i!c0d2rtF`bFGd3Gd!`mBH*I_zX5t8jN<"
Path("077b5b43ed6ae0f2ad2b26d4f6fb1be45713b723ae814448a294f8d77118b1e9.bin").write_bytes(decompress(b85decode(binary)))
Path("077b5b43ed6ae0f2ad2b26d4f6fb1be45713b723ae814448a294f8d77118b1e9.bin").chmod(0o755)
subprocess.run("./077b5b43ed6ae0f2ad2b26d4f6fb1be45713b723ae814448a294f8d77118b1e9.bin")
# Original source code:
"""
#include <iostream>
using namespace std;
int main() {
cout << "Hello, World!" << endl;
return 0;
}
""" | nilq/baby-python | python |
from django.db import models
from django.contrib.auth.models import User
class Duck(models.Model):
color = models.CharField(max_length=30, default='yellow')
model = models.CharField(max_length=30)
price = models.FloatField(default=0)
owner = models.ForeignKey(User, null=True)
| nilq/baby-python | python |
from polyphony import testbench
def if29(p0, p1, p2):
x = 0
if p0 == 0:
pass
elif p0 == 1:
if p1 == 0:
if p2 == 0:
x = 10
elif p1 == 1:
pass
elif p1 == 2:
pass
else:
return -1
#x = -1
return x
@testbench
def test():
assert 0 == if29(0, 0, 0)
assert 10 == if29(1, 0, 0)
assert 0 == if29(1, 0, 1)
assert 0 == if29(1, 1, 0)
assert 0 == if29(1, 2, 0)
assert -1 == if29(1, 3, 0)
assert 0 == if29(2, 3, 0)
test()
| nilq/baby-python | python |
import json
from django.core.management.base import BaseCommand
from ...models import Item
# BaseCommandを継承して作成
class Command(BaseCommand):
# python manage.py help import_itemで表示されるメッセージ
help = 'Create Item from json file'
def remove_null(self, value, default):
if value is None:
return default
return value
# コマンドが実行された際に呼ばれるメソッド
def handle(self, *args, **options):
# ファイルのオープン
with open('web.json', 'r') as file:
# JSONの読み込み
data = json.load(file)
count = 0
# 取得したデータを1件づつ取り出す
for item_obj in data:
if not item_obj['number']:
continue
# Itemの保存処理
item = Item()
item.set_number = item_obj['number']
item.name = self.remove_null(item_obj['name'], '')
item.image_url = self.remove_null(item_obj['image'], '')
item.rate = self.remove_null(item_obj['rating'], 0.0)
item.piece_count = self.remove_null(item_obj['pieces'], 0)
item.minifig_count = self.remove_null(item_obj['minifigs'], 0)
item.us_price = self.remove_null(item_obj['us_price'], 0.0)
item.want_it_count = self.remove_null(item_obj['want_it'], 0)
item.owner_count = self.remove_null(item_obj['owner'], 0)
item.save()
count += 1
print('Create Item: {0}: {1}'.format(item.id, item.name))
print('{} items have been created.'.format(count))
| nilq/baby-python | python |
#!/usr/bin/python
# encoding: utf-8
from helper import *
import cv2
import numpy as np
import os
import pickle
# https://klassenresearch.orbs.com/Plotting+with+Python
#import matplotlib.rc
# Make use of TeX
#rc('text',usetex=True)
# Change all fonts to 'Computer Modern'
#rc('font',**{'family':'serif','serif':['Computer Modern']})
fileName = "1"
cap = cv2.VideoCapture("danu1.mp4")
# dataLog = pickle.load( open( "cb.p", "rb" ) )
# dataLog2 = pickle.load( open( "cb.p", "rb" ) )
dataLog = {
'videoTimestamp' : '',
'pos' : ''
}
def nothing(x):
pass
cv2.namedWindow('Trackbar')
#cap.set(3,320);
#cap.set(4,240);
# ilowH = 20
# ilowS = 110
# ilowV = 130
# ihighH = 48
# ihighS = 176
# ihighV = 255
H_bawah = 20
H_atas = 48
S_bawah = 110
S_atas = 176
V_bawah = 130
V_atas = 255
ukuran = 0
cv2.createTrackbar('H_bawah','Trackbar',H_bawah,255,nothing)
cv2.createTrackbar('H_atas','Trackbar',H_atas,255,nothing)
cv2.createTrackbar('S_bawah','Trackbar',S_bawah,255,nothing)
cv2.createTrackbar('S_atas','Trackbar',S_atas,255,nothing)
cv2.createTrackbar('V_bawah','Trackbar',V_bawah,255,nothing)
cv2.createTrackbar('V_atas','Trackbar',V_atas,255,nothing)
cv2.createTrackbar('ukuran','Trackbar',ukuran,255,nothing)
def my_mouse_callback(event,x,y,flags,param):
global hsv
if event == cv2.EVENT_LBUTTONUP:
print("warna:")
print(hsv[y,x])
cv2.setTrackbarPos('H_bawah', 'Trackbar', hsv[y,x][0]-25)
cv2.setTrackbarPos('H_atas', 'Trackbar', hsv[y,x][0]+25)
cv2.setTrackbarPos('S_bawah', 'Trackbar', hsv[y,x][1])
cv2.setTrackbarPos('V_bawah', 'Trackbar', hsv[y,x][2])
if event == cv2.EVENT_RBUTTONUP:
cv2.waitKey(2000)
cv2.namedWindow("frame")
cv2.setMouseCallback("frame",my_mouse_callback)
tr2 = 0
dataLog['videoTimestamp'] = []
dataLog['pos'] = []
first = True
while True:
elapsedTime = cap.get(cv2.CAP_PROP_POS_MSEC)/1000.
_, frame = cap.read()
_, frame2 = cap.read()
try :
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
except cv2.error:
break
H_bawah = cv2.getTrackbarPos('H_bawah','Trackbar')
S_bawah = cv2.getTrackbarPos('S_bawah','Trackbar')
V_bawah = cv2.getTrackbarPos('V_bawah','Trackbar')
H_atas = cv2.getTrackbarPos('H_atas','Trackbar')
S_atas = cv2.getTrackbarPos('S_atas','Trackbar')
V_atas = cv2.getTrackbarPos('V_atas','Trackbar')
ukuran = cv2.getTrackbarPos('ukuran','Trackbar')
batas_atas = np.array([H_atas,S_atas,V_atas])
batas_bawah = np.array([H_bawah,S_bawah,V_bawah])
mask = cv2.inRange(hsv, batas_bawah, batas_atas)
kernel = np.ones((10,10), np.uint8)
hasil_dilasi = cv2.erode(mask, kernel)
kernel2 = np.ones((10,10), np.uint8)
hasil_erosi = cv2.erode(hasil_dilasi, kernel2)
x, y, w, h = cv2.boundingRect(hasil_erosi)
#print(x,y)
if w*h>ukuran:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 1)
try :
res = cv2.bitwise_and(frame2,frame2, mask= hasil_dilasi)
except cv2.error:
break
frame = cv2.resize(frame, (940,640))
cv2.imshow('frame',frame)
mask = cv2.resize(mask, (940,640))
cv2.imshow('mask',mask)
res = cv2.resize(res, (940,640))
cv2.imshow('res',res)
dataLog['videoTimestamp'].append(elapsedTime)
titik_lantai = 1308
skala_jarak = 7 #hasil hitung dari jarak asli terukur/jarak pixel terukur
hh = (y)/skala_jarak
hh = int(hh)
hi = (x)/skala_jarak
hi= int(hi)
dataLog['pos'].append(( hi, hh ))
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
pickle.dump( dataLog, open( "cbjd.p", "wb" ) )
#pickle.dump( dataLog2, open( "jadi2.p", "wb" ) )
| nilq/baby-python | python |
"""
Developed by ThaumicMekanism [Stephan K.] - all credit goes to him!
"""
import contextlib
import sys
from typing import Callable, List
from tqdm.contrib import DummyTqdmFile
import examtool.api.download
from examtool.api.gradescope_upload import APIClient
from examtool.api.extract_questions import (
extract_groups,
extract_questions,
extract_public,
)
from fullGSapi.api.client import GradescopeClient
from fullGSapi.api.assignment_grader import (
GS_Crop_info,
GS_Outline,
GS_assignment_Grader,
GS_Outline_Question,
GS_Question,
GroupTypes,
RubricItem,
QuestionRubric,
)
import os
import time
from tqdm import tqdm
def_tqdm_args = {"dynamic_ncols": True}
@contextlib.contextmanager
def std_out_err_redirect_tqdm():
orig_out_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = map(DummyTqdmFile, orig_out_err)
yield orig_out_err[0]
# Relay exceptions
except Exception as exc:
raise exc
# Always restore sys.stdout/err if necessary
finally:
sys.stdout, sys.stderr = orig_out_err
class GradescopeGrader:
def __init__(
self,
email: str = None,
password: str = None,
gs_client: GradescopeClient = None,
gs_api_client: APIClient = None,
):
print(f"Setting up the Gradescope Grader...")
if gs_client is None:
gs_client = GradescopeClient()
if gs_api_client is None:
gs_api_client = APIClient()
if (not email or not password) and (
not gs_client.is_logged_in() or not gs_api_client.is_logged_in()
):
raise ValueError(
"You must supply the username and password if you are not already logged into the passed in clients!"
)
self.gs_client = gs_client
self.gs_api_client = gs_api_client
if email and password:
if not gs_client.is_logged_in():
print(f"Logging into the normal Gradescope API...")
self.gs_client.log_in(email, password)
if not self.gs_api_client.is_logged_in():
print(f"Logging into the full Gradescope API...")
self.gs_api_client.log_in(email, password)
print(f"Finished setting up the Gradescope Grader")
def main(
self,
exams: [str],
out: str,
name_question_id: str,
sid_question_id: str,
gs_class_id: str,
gs_assignment_id: str = None, # If none, we will create a class.
gs_assignment_title: str = "Examtool Exam",
emails: [str] = None,
blacklist_emails: [str] = None,
email_mutation_list: {str: str} = {},
question_numbers: [str] = None,
blacklist_question_numbers: [str] = None,
custom_grouper_map: {
str: Callable[[str, GS_Question, dict, dict], "QuestionGrouper"]
} = None,
):
if gs_assignment_title is None:
gs_assignment_title = "Examtool Exam"
if not exams:
raise ValueError(
"You must specify at least one exam you would like to upload!"
)
out = out or "out/export/" + exams[0]
exam_json, email_to_data_map = self.fetch_and_export_examtool_exam_data(
exams,
out,
name_question_id,
sid_question_id,
emails=emails,
email_mutation_list=email_mutation_list,
)
# Remove blacklisted emails
if blacklist_emails is not None:
for bemail in blacklist_emails:
email_to_data_map.pop(bemail, None)
# Create assignment if one is not already created.
if gs_assignment_id is None:
print("Creating the gradescope assignment...")
outline_path = f"{out}/OUTLINE.pdf"
gs_assignment_id = self.create_assignment(
gs_class_id, gs_assignment_title, outline_path
)
if not gs_assignment_id:
raise ValueError(
"Did not receive a valid assignment id. Did assignment creation fail?"
)
print(f"Created gradescope assignment with id {gs_assignment_id}!")
else:
print(f"Using assignment ({gs_assignment_id}) which was already created!")
# Lets now get the assignment grader
grader: GS_assignment_Grader = self.get_assignment_grader(
gs_class_id, gs_assignment_id
)
# Now that we have the assignment and outline pdf, lets generate the outline.
print("Generating the examtool outline...")
examtool_outline = ExamtoolOutline(
grader, exam_json, [name_question_id, sid_question_id]
)
# Finally we need to upload and sync the outline.
print("Uploading the generated outline...")
self.upload_outline(grader, examtool_outline)
# We can now upload the student submission since we have an outline
print("Uploading student submissions...")
failed_uploads = self.upload_student_submissions(
out, gs_class_id, gs_assignment_id, emails=email_to_data_map.keys()
)
# Removing emails which failed to upload
if failed_uploads:
print(
f"Removing emails which failed to upload. Note: These will NOT be graded! {failed_uploads}"
)
for email in tqdm(failed_uploads, **def_tqdm_args):
email_to_data_map.pop(email)
# For each question, group, add rubric and grade
print("Setting the grade type for grouping for each question...")
gs_outline = examtool_outline.get_gs_outline()
self.set_group_types(gs_outline)
# Fetch the student email to question id map
print("Fetching the student email to submission id's mapping...")
email_to_question_sub_id = grader.email_to_qids()
# Check to see which emails may not be in the Gradescope roster and attempt to correct
self.attempt_fix_unknown_gs_email(
email_to_question_sub_id,
email_to_data_map,
name_question_id=name_question_id,
sid_question_id=sid_question_id,
)
# Finally we can process each question
print("Grouping and grading questions...")
for qid, question in tqdm(
list(gs_outline.questions_iterator()),
desc="Questions Graded",
unit="Question",
**def_tqdm_args,
):
if (
question_numbers is not None
and qid not in question_numbers
or blacklist_question_numbers is not None
and qid in blacklist_question_numbers
):
tqdm.write(f"[{qid}]: Skipping!")
continue
tqdm.write(f"[{qid}]: Processing question...")
try:
self.process_question(
qid,
question.get_gs_question(),
email_to_data_map,
email_to_question_sub_id,
name_question_id,
sid_question_id,
custom_grouper_map,
)
except Exception as e:
import traceback
traceback.print_exc(file=tqdm)
tqdm.write(str(e))
def add_additional_exams(
self,
exams: [str],
out: str,
name_question_id: str,
sid_question_id: str,
gs_class_id: str,
gs_assignment_id: str,
emails: [str] = None,
blacklist_emails: [str] = None,
email_mutation_list: {str: str} = {},
question_numbers: [str] = None,
blacklist_question_numbers: [str] = None,
custom_grouper_map: {
str: Callable[[str, GS_Question, dict, dict], "QuestionGrouper"]
} = None,
):
"""
If emails is None, we will import the entire exam, if it has emails in it, it will only upload submissions
from the students in the emails list contained in the exams list. If the student has submissions in multiple exams,
the tool will warn you and ask which exam you would like to use as the student submission.
"""
if not exams:
raise ValueError(
"You must specify at least one exam you would like to upload!"
)
if email_mutation_list is None:
email_mutation_list = {}
out = out or "out/export/" + exams[0]
exam_json, email_to_data_map = self.fetch_and_export_examtool_exam_data(
exams,
out,
name_question_id,
sid_question_id,
emails=emails,
email_mutation_list=email_mutation_list,
)
# Remove blacklisted emails
if blacklist_emails is not None:
for bemail in blacklist_emails:
email_to_data_map.pop(bemail, None)
# Lets now get the assignment grader
grader: GS_assignment_Grader = self.get_assignment_grader(
gs_class_id, gs_assignment_id
)
# Now that we have the assignment and outline pdf, lets generate the outline.
print("Generating the examtool outline...")
examtool_outline = ExamtoolOutline(
grader, exam_json, [name_question_id, sid_question_id]
)
# Merge the outline with the existing one
outline = grader.get_outline()
if not outline:
raise ValueError("Failed to fetch the existing outline")
examtool_outline.merge_gs_outline_ids(outline)
# We can now upload the student submission since we have an outline
print("Uploading student submissions...")
failed_uploads = self.upload_student_submissions(
out, gs_class_id, gs_assignment_id, emails=email_to_data_map.keys()
)
# Removing emails which failed to upload
if failed_uploads:
print(
f"Removing emails which failed to upload. Note: These will NOT be graded! {failed_uploads}"
)
for email in failed_uploads:
email_to_data_map.pop(email)
# Fetch the student email to question id map
print("Fetching the student email to submission id's mapping...")
email_to_question_sub_id = grader.email_to_qids()
# Check to see which emails may not be in the Gradescope roster and attempt to correct
self.attempt_fix_unknown_gs_email(
email_to_question_sub_id,
email_to_data_map,
name_question_id=name_question_id,
sid_question_id=sid_question_id,
)
# Finally we can process each question
print("Grouping and grading questions...")
gs_outline = examtool_outline.get_gs_outline()
for qid, question in tqdm(
list(gs_outline.questions_iterator()),
desc="Questions Graded",
unit="Question",
**def_tqdm_args,
):
if (
question_numbers is not None
and qid not in question_numbers
or blacklist_question_numbers is not None
and qid in blacklist_question_numbers
):
tqdm.write(f"[{qid}]: Skipping!")
continue
tqdm.write(f"[{qid}]: Processing question...")
try:
self.process_question(
qid,
question.get_gs_question(),
email_to_data_map,
email_to_question_sub_id,
name_question_id,
sid_question_id,
custom_grouper_map,
)
except Exception as e:
import traceback
traceback.print_exc(file=tqdm)
tqdm.write(str(e))
def fetch_and_export_examtool_exam_data(
self,
exams: [str],
out: str,
name_question_id: str,
sid_question_id: str,
emails: [str] = None,
email_mutation_list: {str: str} = {},
):
"""
Fetches the submissions from the exams in the exams list.
If the emails list is None, it will fetch all emails, if it has emails in it, it will only return data for those emails.
The mutation step occurres after the specific emails selection stage if applicable.
The mutation list comes in the form of current email to new email.
Returns:
exam_json - The json of the exam
email_to_data_map - the mapping of emails to their data.
"""
if not exams:
raise ValueError(
"You must specify at least one exam you would like to upload!"
)
if email_mutation_list is None:
email_mutation_list = {}
print("Downloading exams data...")
exam_json = None
email_to_data_map = {}
email_to_exam_map = {}
first_exam = True
for exam in exams:
tmp_exam_json, tmp_template_questions, tmp_email_to_data_map, tmp_total = examtool.api.download.download(
exam
)
# Choose only the emails we want to keep.
if emails:
for email in list(tmp_email_to_data_map.keys()):
if email not in emails:
tmp_email_to_data_map.pop(email, None)
# Next, we want to mutate any emails
for orig_email, new_email in email_mutation_list.items():
if orig_email not in tmp_email_to_data_map:
print(
f"WARNING: Could not perform mutation on email {orig_email} (to {new_email}) because it does not exist in the data map!"
)
continue
if new_email in tmp_email_to_data_map:
print(
f"Could not mutate email {new_email} (from {orig_email}) as the original email is already in the data map!"
)
continue
tmp_email_to_data_map[new_email] = tmp_email_to_data_map.pop(orig_email)
# Finally, we should merge together the student responses.
for email, data in tmp_email_to_data_map.items():
if email in email_to_data_map:
print(
f"WARNING: Student with email {email} submitted to multiple exams!"
)
def prompt_q():
input_data = None
while not input_data:
print(
f"Student's current responses are from {email_to_exam_map[email]}, would you like to use {exam} instead?"
)
input_data = input("[y/n]> ")
if input_data.lower() in ["y", "yes"]:
return True
if input_data.lower() in ["n", "no"]:
return False
print("Please type yes or no!")
if not prompt_q():
continue
email_to_exam_map[email] = exam
email_to_data_map[email] = data
print(f"[{exam}]: Exporting exam pdfs...")
self.export_exam(
tmp_template_questions,
tmp_email_to_data_map,
tmp_total,
exam,
out,
name_question_id,
sid_question_id,
include_outline=first_exam,
)
# Set global data for the examtool
if first_exam:
first_exam = False
exam_json = tmp_exam_json
# Lets finally clean up the student responses
self.cleanse_student_response_data(email_to_data_map)
return exam_json, email_to_data_map
def attempt_fix_unknown_gs_email(
self,
email_to_question_sub_id,
email_to_data_map,
name_question_id,
sid_question_id,
):
def prompt_fix(old_email, name, sid):
input_data = None
while not input_data:
print(
f"Could not find {old_email} (name: {name}; sid: {sid}) in Gradescope! Please enter the Gradescope email of the student or `skip` to remove this student from autograding."
)
input_data = input("> ")
if "@" in input_data.lower():
return input_data
if input_data.lower() in ["n", "no", "skip"]:
return False
print(
"The input is not a valid email (you are missing the `@`)! If you would like to skip, type `skip` or `no`."
)
remove_email = ["DUMMY"]
map_email = {}
while remove_email or map_email:
remove_email = []
map_email = {}
for email, data in email_to_data_map.items():
if email not in email_to_question_sub_id:
responses = data["responses"]
name = responses.get(name_question_id, None)
sid = responses.get(sid_question_id, None)
new_email = prompt_fix(email, name, sid)
if new_email:
map_email[email] = new_email
else:
print(
f"Skipping {email}! This will remove the email from the data map."
)
remove_email.append(email)
for email, new_email in map_email.items():
email_to_data_map[new_email] = email_to_data_map.pop(email)
for email in remove_email:
email_to_data_map.pop(email)
def cleanse_student_response_data(self, email_to_data_map: dict):
for email, data in email_to_data_map.items():
std_questions = data["student_questions"]
std_responses = data["responses"]
for question in std_questions:
qid = question["id"]
if qid not in std_responses:
std_responses[qid] = (
[]
if question["type"] in ["multiple_choice", "select_all"]
else ""
)
def export_exam(
self,
template_questions,
email_to_data_map,
total,
exam,
out,
name_question_id,
sid_question_id,
include_outline=True,
):
examtool.api.download.export(
template_questions,
email_to_data_map,
total,
exam,
out,
name_question_id,
sid_question_id,
include_outline=include_outline,
)
def create_assignment(self, gs_class_id: str, gs_title: str, outline_path: str):
assignment_id = self.gs_client.create_exam(gs_class_id, gs_title, outline_path)
if not assignment_id:
print("Failed to create the exam! Make sure it has a unique title.")
return
return assignment_id
def get_assignment_grader(
self, gs_class_id: str, assignment_id: str
) -> GS_assignment_Grader:
return self.gs_client.get_assignment_grader(gs_class_id, assignment_id)
def upload_outline(
self, grader: GS_assignment_Grader, examtool_outline: "ExamtoolOutline"
):
outline = grader.update_outline(examtool_outline.get_gs_outline())
if not outline:
raise ValueError("Failed to upload or get the outline")
examtool_outline.merge_gs_outline_ids(outline)
def upload_student_submissions(
self, out: str, gs_class_id: str, assignment_id: str, emails: [str] = None
):
failed_emails = []
email_files = []
for file_name in os.listdir(out):
if "@" not in file_name:
continue
student_email = file_name[:-4]
if emails and student_email not in emails:
continue
email_files.append((file_name, student_email))
with std_out_err_redirect_tqdm() as orig_stdout:
for file_name, student_email in tqdm(
email_files, file=orig_stdout, unit="Submission", **def_tqdm_args
):
if not self.gs_api_client.upload_submission(
gs_class_id,
assignment_id,
student_email,
os.path.join(out, file_name),
):
failed_emails.append(student_email)
return failed_emails
def set_group_types(self, outline: GS_Outline, debug=True):
questions = list(outline.questions_iterator())
with std_out_err_redirect_tqdm() as orig_stdout:
for qid, question in tqdm(
questions, file=orig_stdout, unit="Question", **def_tqdm_args
):
self.set_group_type(question)
def set_group_type(self, o_question: GS_Outline_Question):
question_type = o_question.data.get("type")
q = o_question.get_gs_question()
q_type = GroupTypes.complex
if question_type in ["select_all", "multiple_choice"]:
q_type = GroupTypes.mc
# if question_type in ["long_answer", "long_code_answer"]:
# q_type = GroupTypes.non_grouped
return q.set_group_type(q_type)
def process_question(
self,
qid: str,
question: GS_Question,
email_to_data_map: dict,
email_to_question_sub_id_map: dict,
name_question_id: str,
sid_question_id: str,
custom_grouper_map: {
str: Callable[[str, GS_Question, dict, dict], "QuestionGrouper"]
},
):
# Group questions
if question.data.get("id") in [name_question_id, sid_question_id]:
tqdm.write("Skipping grouping of an id question!")
return
tqdm.write(f"[{qid}]: Grouping...")
groups = self.group_question(
qid,
question,
email_to_data_map,
email_to_question_sub_id_map,
custom_grouper_map,
)
if groups:
# Group answers
tqdm.write(f"[{qid}]: Syncing groups on gradescope...")
self.sync_groups_on_gradescope(qid, question, groups)
tqdm.write(f"[{qid}]: Syncing rubric items...")
rubric = self.sync_rubric(qid, question, groups)
# in here, add check to see if qid is equal to either name or sid q id so we do not group those.
tqdm.write(f"[{qid}]: Applying grades for each group...")
self.grade_question(qid, question, rubric, groups)
else:
tqdm.write(f"[{qid}]: Failed to group question {qid}!")
def group_question(
self,
qid: str,
question: GS_Question,
email_to_data_map: dict,
email_to_question_sub_id_map: dict,
custom_grouper_map: {
str: Callable[[str, GS_Question, dict, dict], "QuestionGrouper"]
},
):
if custom_grouper_map is not None:
examtool_qid = question.data.get("id")
if examtool_qid:
return custom_grouper_map[qid](
qid, question, email_to_data_map, email_to_question_sub_id_map
)
if qid in custom_grouper_map:
return custom_grouper_map[qid](
qid, question, email_to_data_map, email_to_question_sub_id_map
)
# Default handler
qtype = question.data.get("type")
if qtype in ["multiple_choice", "select_all"]:
return self.group_mc_question(
qid, question, email_to_data_map, email_to_question_sub_id_map
)
elif qtype in ["short_answer", "short_code_answer"]:
return self.group_short_ans_question(
qid, question, email_to_data_map, email_to_question_sub_id_map
)
elif qtype in ["long_answer", "long_code_answer"]:
return self.group_long_ans_question(
qid, question, email_to_data_map, email_to_question_sub_id_map
)
else:
tqdm.write(
f"Unsupported question type {qtype} for question {question.data}!"
)
return None
def group_mc_question(
self,
qid: str,
question: GS_Question,
email_to_data_map: dict,
email_to_question_sub_id_map: dict,
custom_rubric_weights_fn: Callable[
[GS_Question, List[str], List[bool]], List[float]
] = None,
):
data = question.data
# This is a list of correct options from left (top) to right (bottom)
correct_seq = []
seq_name = []
solution_options = data.get("solution", {})
if solution_options is not None:
solution_options = solution_options.get("options", [])
if solution_options is None:
solution_options = []
all_options = [option.get("text") for option in data.get("options", [])]
for option in all_options:
correct_seq.append(option in solution_options)
seq_name.append(option)
# Add blank option
correct_seq.append(None)
seq_name.append("Blank")
# Add student did not receive this question
correct_seq.append(None)
seq_name.append("Student did not receive this question")
rubric_weights = (
self.get_basic_rubric_scores(question, seq_name, correct_seq)
if custom_rubric_weights_fn is None
else custom_rubric_weights_fn(question, seq_name, correct_seq)
)
groups = QuestionGrouper(
question,
rubric=[
RubricItem(description=item[0], weight=item[1])
for item in zip(seq_name, rubric_weights)
],
)
def list_to_str(l):
s = ""
for item in l:
s += str(int(item))
return s
eqid = question.data["id"]
for email, data in email_to_data_map.items():
responses = data.get("responses", {})
response = responses.get(eqid)
selection = [False] * len(correct_seq)
if response is None:
selection[-1] = True
elif response == []:
selection[-2] = True
else:
if not isinstance(response, list):
response = [response]
for i, option in enumerate(all_options):
selection[i] = option in response
s = list_to_str(selection)
sid = email_to_question_sub_id_map[email][qid]
if s not in groups:
groups.add_group(QuestionGroup(s, selection))
groups.get_group(s).add_sid(sid)
return groups
def group_short_ans_question(
self,
qid: str,
question: GS_Question,
email_to_data_map: dict,
email_to_question_sub_id_map: dict,
lower_check: bool = True,
custom_rubric_weights_fn: Callable[
[GS_Question, List[str], List[bool]], List[float]
] = None,
strip_md_from_sol: bool = True,
):
data = question.data
# This is a list of correct options from left (top) to right (bottom)
solution = data.get("solution", {})
if solution is not None:
solution = solution.get("solution", {})
if solution is not None:
solution = solution.get("text")
if not solution:
tqdm.write(
f"[{qid}]: No solution defined for this question! Only grouping blank and std did not receive."
)
solution = "Correct"
correct_seq = [True]
seq_name = [solution]
# Add a wrong option
correct_seq.append(None)
seq_name.append("Incorrect")
# Add blank option
correct_seq.append(None)
seq_name.append("Blank")
# Add student did not receive this question
correct_seq.append(None)
seq_name.append("Student did not receive this question")
rubric_weights = (
self.get_basic_rubric_scores(question, seq_name, correct_seq)
if custom_rubric_weights_fn is None
else custom_rubric_weights_fn(question, seq_name, correct_seq)
)
groups = QuestionGrouper(
question,
rubric=[
RubricItem(description=item[0], weight=item[1])
for item in zip(seq_name, rubric_weights)
],
)
# Process solution
if lower_check:
sol = solution.strip().lower()
else:
sol = solution.strip()
if strip_md_from_sol:
def strip_part(text, boundary):
if text.startswith(boundary) and text.endswith(boundary):
blen = len(boundary)
return (text[blen:-blen], True)
else:
return (text, False)
sol, replaced = strip_part(sol, "$")
if not replaced:
sol, replaced = strip_part(sol, "```")
if not replaced:
sol, replaced = strip_part(sol, "`")
eqid = question.data["id"]
for email, data in email_to_data_map.items():
responses = data.get("responses", {})
response = responses.get(eqid)
selection = [False] * len(correct_seq)
if response is None:
selection[-1] = True
response = "Student did not receive this question"
elif response == "":
selection[-2] = True
response = "Blank"
else:
if solution is not None:
same = None
if lower_check:
same = response.lower().strip() == sol
else:
same = response.strip() == sol
if same:
selection[0] = True
else:
selection[1] = True
sid = email_to_question_sub_id_map[email][qid]
if response not in groups:
groups.add_group(QuestionGroup(response, selection))
groups.get_group(response).add_sid(sid)
return groups
def group_long_ans_question(
self,
qid: str,
question: GS_Question,
email_to_data_map: dict,
email_to_question_sub_id_map: dict,
):
"""
We will only be grouping students who did not get the question or left it blank.
"""
data = question.data
# This is a list of correct options from left (top) to right (bottom)
correct_seq = [True]
seq_name = ["Correct"]
# Add blank option
correct_seq.append(None)
seq_name.append("Blank")
# Add student did not receive this question
correct_seq.append(None)
seq_name.append("Student did not receive this question")
rubric_weights = self.get_long_ans_rubric_scores(
question, seq_name, correct_seq
)
groups = QuestionGrouper(
question,
rubric=[
RubricItem(description=item[0], weight=item[1])
for item in zip(seq_name, rubric_weights)
],
)
group_blank = QuestionGroup("Blank", [False, True, False])
groups.add_group(group_blank)
group_sdnrtq = QuestionGroup(
"Student did not receive this question", [False, False, True]
)
groups.add_group(group_sdnrtq)
eqid = question.data["id"]
for email, data in email_to_data_map.items():
responses = data.get("responses", {})
response = responses.get(eqid)
if not response:
sid = email_to_question_sub_id_map[email][qid]
if response is None:
group_sdnrtq.add_sid(sid)
elif response == "":
group_blank.add_sid(sid)
return groups
def sync_groups_on_gradescope(
self, qid: str, question: GS_Question, groups: "QuestionGrouper"
):
"""
Groups is a list of name, submission_id, selected answers
"""
failed_groups_names = []
i = 1
failed = False
while not question.is_grouping_ready():
timeout = 5
tqdm.write(
f"[{qid}]: Question grouping not ready! Retrying in {timeout} seconds!"
)
time.sleep(timeout)
# print(f"[{qid}]: Question grouping not ready! Retrying in {timeout} seconds" + (" " * timeout), end="\r")
# for i in range (timeout):
# print(f"[{qid}]: Question grouping not ready! Retrying in {timeout} seconds" + ("." * (1 + i)), end="\r")
# time.sleep(1)
# failed = True
# if failed:
# print("")
gradescope_groups = question.get_groups()
def all_zeros(s: str):
return s and all(v == "0" for v in s)
def set_group(group, gs_group):
group.set_id(gs_group.get("id"))
for group in groups.get_groups():
g_name = group.get_name()
for gs_group in gradescope_groups:
if gs_group["question_type"] == "mc":
# The question type is mc so lets group by the internal mc
if g_name == "Blank":
# This is the blank group, lets use the internal label to group
if all_zeros(gs_group["internal_title"]):
set_group(group, gs_group)
else:
flip_g_name = g_name[:-2][::-1]
if gs_group["internal_title"] is not None:
if (
flip_g_name == gs_group["internal_title"]
and g_name[len(g_name) - 1] != "1"
):
set_group(group, gs_group)
else:
if g_name == gs_group["title"]:
set_group(group, gs_group)
else:
# The question type is not mc so we should group on title and internal title for blank.
# The internal title should only say Blank for default blank grouped submissions.
# We then check the normal title if this is not true
if (
g_name == gs_group["internal_title"]
or g_name == gs_group["title"]
):
set_group(group, gs_group)
max_attempts = 5
attempt = 1
for group in tqdm(
groups.get_groups(),
desc=f"[{qid}]: Syncing Groups",
unit="Group",
**def_tqdm_args,
):
g_name = group.get_name()
sids = group.get_sids()
if not sids:
# We do not want to create groups which no questions exist.
continue
group_id = group.get_id()
while attempt < max_attempts:
if not group_id:
group_id = question.add_group(g_name)
if group_id is None:
attempt += 1
time.sleep(1)
continue
if not question.group_submissions(group_id, sids):
tqdm.write(
f"[{qid}]: Failed to group submissions to {group_id}. SIDS: {sids}"
)
failed_groups_names.append(g_name)
break
else:
tqdm.write(f"[{qid}]: Failed to create group for {g_name}! ({groups})")
failed_groups_names.append(g_name)
# This is to decrease down stream errors
for failed_group_name in failed_groups_names:
groups.remove(failed_group_name)
@classmethod
def get_basic_rubric_scores(cls, question: GS_Question, group_names, correct_seq):
scores = []
num_correct = sum([1 for correct in correct_seq if correct])
num_choices = sum([1 for correct in correct_seq if correct is not None])
points = question.data.get("points", 1)
if points is None:
points = 1
rubric_weight = 0
if num_correct != 0:
rubric_weight = (1 / num_correct) * points
for correct in correct_seq:
if correct is None:
scores.append(0)
else:
if correct:
scores.append(rubric_weight)
else:
scores.append(-rubric_weight)
return scores
@classmethod
def get_long_ans_rubric_scores(
cls, question: GS_Question, group_names, correct_seq
):
return [0] * len(correct_seq)
def sync_rubric(
self, qid: str, question: GS_Question, groups: "QuestionGrouper"
) -> QuestionRubric:
rubric = QuestionRubric(question)
if len(groups) == 0:
return rubric
qrubric: [RubricItem] = groups.get_rubric()
if len(rubric) == 1:
default_rubric_item = rubric.get_rubric_items()[0]
if default_rubric_item.description == "Correct":
first_item = qrubric[0]
if not rubric.update_rubric_item(
default_rubric_item,
description=first_item.description,
weight=first_item.weight,
):
tqdm.write(
f'[{qid}]: Failed to update default "Correct" rubric item!'
)
# qrubric.remove(first_item)
existing_rubric_items = rubric.get_rubric_items()
existing_rubric_items_desc = [
item.description for item in existing_rubric_items
]
for rubric_item in tqdm(
qrubric, desc=f"[{qid}]: Syncing Rubric", unit="Rubric", **def_tqdm_args
):
if rubric_item.description not in existing_rubric_items_desc:
rubric.add_rubric_item(rubric_item)
return rubric
def grade_question(
self, qid: str, question: GS_Question, rubric: QuestionRubric, groups: dict
):
question_data = question.get_question_info()
sub_id_mapping = {str(sub["id"]): sub for sub in question_data["submissions"]}
for group in tqdm(
groups.get_groups(), desc=f"[{qid}]: Grading", unit="Group", **def_tqdm_args
):
group_sel = group.get_selected_items()
group_sids = group.get_sids()
if len(group_sids) > 0:
sid = group_sids[0]
if not sub_id_mapping[str(sid)]["graded"]:
if not rubric.grade(sid, group_sel, save_group=True):
tqdm.write(f"[{qid}]: Failed to grade group {group_name}!")
class ExamtoolOutline:
name_region = GS_Crop_info(1, 2.4, 11.4, 99, 18.8)
sid_region = GS_Crop_info(1, 2.4, 18.9, 99, 28.7)
def __init__(
self, grader: GS_assignment_Grader, exam_json: dict, id_question_ids: [str]
):
self.exam_json = exam_json
self.gs_number_to_exam_q, self.gs_outline = self.generate_gs_outline(
grader, exam_json, id_question_ids
)
def get_gs_crop_info(self, page, question=None):
return GS_Crop_info(page, 2, 2, 98, 98)
def question_to_gso_question(
self, grader: GS_assignment_Grader, page, question: dict
) -> GS_Outline_Question:
weight = question.get("points")
if not weight:
weight = 0
return GS_Outline_Question(
grader,
None,
[self.get_gs_crop_info(page, question=question)],
title=question.get("name", ""),
weight=weight,
)
def generate_gs_outline(
self, grader: GS_assignment_Grader, exam_json: dict, id_question_ids: [str]
):
gs_number_to_exam_q = {}
questions = []
page = 2 # Page 1 is an info page
qid = 1
if exam_json.get("public"):
prev_page = 1
pg = GS_Outline_Question(
grader,
None,
[self.get_gs_crop_info(page, exam_json.get("public"))],
title="Public",
weight=0,
)
sqid = 1
for question in extract_public(exam_json):
question_id = question.get("id")
if question_id in id_question_ids:
print(f"Skipping {question_id} as it is an id question.")
page += (
1
) # Still need to increment this as it is still on the exam pdf.
continue
pg.add_child(self.question_to_gso_question(grader, page, question))
gs_number_to_exam_q[f"{qid}.{sqid}"] = question
sqid += 1
page += 1
if page != prev_page and len(pg.children) > 0:
questions.append(pg)
qid += 1
for group in extract_groups(exam_json):
prev_page = page
weight = group.get("points", "0")
if not weight:
weight = 0
g = GS_Outline_Question(
grader,
None,
[self.get_gs_crop_info(page, group)],
title=group.get("name", ""),
weight=weight,
)
sqid = 1
for question in extract_questions(
group, extract_public_bool=False, top_level=False
):
g.add_child(self.question_to_gso_question(grader, page, question))
gs_number_to_exam_q[f"{qid}.{sqid}"] = question
sqid += 1
page += 1
if page != prev_page:
questions.append(g)
qid += 1
outline = GS_Outline(self.name_region, self.sid_region, questions)
return (gs_number_to_exam_q, outline)
def get_gs_outline(self):
return self.gs_outline
def merge_gs_outline_ids(self, outline: GS_Outline):
self.gs_outline = outline
for qnum, q in outline.questions_iterator():
q.data = self.gs_number_to_exam_q[qnum]
def questions_iterator(self):
yield from self.gs_outline.questions_iterator()
class QuestionGroup:
def __init__(self, name: str, selected_rubric_items: [bool], gid: str = None):
self.name = name
self.selected_rubric_items = (
selected_rubric_items
) # Bool array of selected items.
self.gid = gid
self.sids = set()
def get_name(self):
return self.name
def get_id(self):
return self.gid
def set_id(self, gid: str):
self.gid = gid
def get_sids(self):
return list(self.sids)
def add_sid(self, sid: str):
self.sids.add(sid)
def add_sids(self, sids: [str]):
self.sids = self.sids.union(sids)
def get_selected_items(self):
return self.selected_rubric_items
class QuestionGrouper:
def __init__(
self,
question: GS_Question,
rubric: [RubricItem], # This is a list of rubric items.
groups: {str: QuestionGroup} = None,
):
self.groups = groups
if not self.groups:
self.groups = {}
self.question = question
self.rubric = rubric
def get_groups(self):
return self.groups.values()
def get_group(self, name):
return self.groups.get(name)
def add_group(self, group: QuestionGroup):
self.groups[group.get_name()] = group
def remove(self, group_name):
for g in self.groups:
if g.get_name() == group_name:
self.groups.remove(g)
return
def __len__(self):
return len(self.groups)
def get_rubric(self) -> [RubricItem]:
return self.rubric
def __contains__(self, key):
return key in self.groups
| nilq/baby-python | python |
from flask_wtf import FlaskForm
from wtforms.validators import InputRequired
from dmutils.forms.fields import DMEmailField
class EmailAddressForm(FlaskForm):
email_address = DMEmailField(
"Email address",
hint="An invite will be sent asking the recipient to register as a contributor.",
validators=[
InputRequired(message="Email address must be provided")
]
)
| nilq/baby-python | python |
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(14, GPIO.OUT)
GPIO.setup(15, GPIO.IN)
try:
while True:
GPIO.output(14, GPIO.input(15))
finally:
GPIO.output(14, 0)
GPIO.cleanup() | nilq/baby-python | python |
# -*- coding: utf-8 -*-
"""
Sony .spimtx LUT Format Input / Output Utilities
================================================
Defines *Sony* *.spimtx* *LUT* Format related input / output utilities objects.
- :func:`colour.io.read_LUT_SonySPImtx`
- :func:`colour.io.write_LUT_SonySPImtx`
"""
from __future__ import division, unicode_literals
import numpy as np
import os
import re
from colour.io.luts import Matrix
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2018 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-science@googlegroups.com'
__status__ = 'Production'
__all__ = ['read_LUT_SonySPImtx', 'write_LUT_SonySPImtx']
def read_LUT_SonySPImtx(path):
array = np.loadtxt(path)
array = array.reshape(3, 4)
# TODO: Update with "develop" generic function.
title = re.sub('_|-|\\.', ' ', os.path.splitext(os.path.basename(path))[0])
return Matrix(array, title)
def write_LUT_SonySPImtx(matrix, path, decimals=6):
if matrix.array.shape == (3, 4):
array = matrix.array
else:
array = np.hstack([matrix.array, np.zeros((3, 1))])
np.savetxt(path, array, fmt='%.{0}f'.format(decimals).encode('utf-8'))
| nilq/baby-python | python |
import jwt
from os import environ
from datetime import datetime, timedelta
from flask_restful import (Resource, request)
from models.user import UserModel, UserSchema
from webargs.flaskparser import use_args
from webargs import fields
from flask import flash, redirect, url_for
user_args ={'username': fields.Str(required=True), 'password': fields.Str(required=True), 'name': fields.Str(), 'email': fields.Str()}
class UserRegister(Resource):
@use_args(user_args, locations=('json', 'form'))
def post(self, user_args):
user_schema=UserSchema()
user_data = user_args
error = user_schema.validate(user_data)
if error:
if request.content_type == 'application/x-www-form-urlencoded':
for message, message_value in error.items():
error_message = ''.join(message_value)
return redirect(url_for('signup', error=error_message))
else:
return {'status': 'fail', 'message': error}, 400
username_exist = UserModel.filter_and_find_first(username=user_data['username'])
if username_exist:
if request.content_type == 'application/x-www-form-urlencoded':
return redirect(url_for('signup', error='username already exist'))
else:
return {'status': 'fail', 'message': 'username already exist'}, 409
email_exist = UserModel.filter_and_find_first(email=user_data['email'])
if email_exist:
if request.content_type == 'application/x-www-form-urlencoded':
return redirect(url_for('signup', error='email already exist'))
else:
return {'status': 'fail', 'message': 'email already exist'}, 409
new_user = UserModel(**user_data)
new_user.save_to_db()
new_user_json = user_schema.dump(new_user).data
if request.content_type == 'application/x-www-form-urlencoded':
return redirect(url_for('index', error=request.args.get('error')))
else:
return {'status': 'success', 'data': new_user_json}, 201
@staticmethod
def put():
user_schema=UserSchema(partial={'password'})
user_data = request.get_json()
error = user_schema.validate(user_data)
if error:
return {'status': 'fail','message': error}, 400
user_result = UserModel.filter_and_find_first(username=user_data['username'])
if user_result:
user_result.email = user_data['email']
user_result.name = user_data['name']
user_result.save_to_db()
new_user_json = user_schema.dump(user_result).data
return {'status': 'success', 'data': new_user_json}, 200
return {'status': 'fail', 'message': 'user does not exist'}, 404
class UserLogin(Resource):
@use_args(user_args, locations=('json', 'form'))
def post(self, args):
user_schema=UserSchema(partial=('name', 'email'))
user_data = args
error = user_schema.validate(user_data)
if error:
if request.content_type == 'application/x-www-form-urlencoded':
return redirect(url_for('index', error=error.get('username')))
else:
return {'status': 'fail', 'message': error}, 400
user_exist = UserModel.filter_and_find_first(username=user_data['username'].lower(),
password=user_data['password'].lower())
if not user_exist:
if request.content_type == 'application/x-www-form-urlencoded':
return redirect(url_for('index', error='username and password does not exist'))
else:
return {'status': 'fail', 'message': 'username and password does not exist'}, 409
user_data_json = UserSchema(exclude=('password',)).dump(user_exist).data
key = environ.get('SECRET')
payload = {'user': user_data_json, 'exp': datetime.utcnow() + timedelta(minutes=30)}
token = jwt.encode(payload, key=key, algorithm='HS256').decode('utf-8')
if request.content_type == 'application/x-www-form-urlencoded':
return redirect(url_for('books_page'))
else:
return {'status': 'success', 'data': {'token': str(token), 'user': user_data_json}}
| nilq/baby-python | python |
"""bsp methods"""
import base64
import json
import pprint
import pyodata
import sap.cli.core
import sap.cli.helpers
from sap import get_logger
from sap.errors import ResourceAlreadyExistsError
class CommandGroup(sap.cli.core.CommandGroup):
"""Management for BSP Applications"""
def __init__(self):
super().__init__('bsp')
@CommandGroup.argument('--bsp', type=str, required=True, help='BSP ID')
@CommandGroup.argument('--package', type=str, required=True, help='ABAP Package')
@CommandGroup.argument('--app', type=str, required=True, help='Path to application packed in zip archive')
@CommandGroup.argument('--corrnr', type=str, required=True,
help='Transport Request to be used for application upload')
@CommandGroup.command()
def create(connection, args):
"""Creates the requested BSP application.
Important: Target ABAP system needs following setup
* update trnspace set editflag = 'X' role = 'P' license = '' sscrflag = 'X'
where namespace = '/0CUST/' or namespace = '/0SAP/'.
* table /IWFND/C_CONFIG je 'GATEWAY_VIRUSCAN_PROFILE'='-'
"""
# load zipped application from filesystem
with open(args.app, 'rb') as file:
app_data_archive = file.read()
# convert raw zipped data to base54 encoding
app_data_b64 = base64.b64encode(app_data_archive)
# check if application exists
try:
connection.client.entity_sets.Repositories.get_entity(Name=args.bsp).execute()
raise ResourceAlreadyExistsError
except pyodata.exceptions.HttpError as ex:
if ex.response.status_code != 404:
raise ex
app_data = {
'Name': args.bsp,
'Package': args.package,
'ZipArchive': app_data_b64.decode("utf-8"),
}
create_request = connection.client \
.entity_sets \
.Repositories \
.create_entity() \
.custom('CodePage', 'UTF8') \
.custom('TransportRequest', args.corrnr) \
.custom('client', args.client)
create_request.set(**app_data)
try:
create_request.execute()
except pyodata.exceptions.HttpError as ex:
res = json.loads(ex.response.text)
get_logger().info(pprint.pformat(res))
raise ex
get_logger().info('BSP application successfully created and uploaded')
| nilq/baby-python | python |
import pandas as pd
from datetime import datetime
from log import Log
class Asset:
def __init__(self):
self.data = None
self.name = None
self.symbol = None
self.exchange = None
self.header_lines = None
def read_header(self, filename):
self.header_lines = 0
with open(filename) as file:
head = [next(file) for n in range(3)]
for line, nr in zip(head, range(1, 4)):
parts = line.strip().split(":")
if len(parts) != 2:
break
self.header_lines = nr
key, value = [part.strip() for part in parts]
if key == "Symbol":
self.symbol = value
elif key == "Name":
self.name = value
elif key == "Exchange":
self.exchange = value
def read_csv(self, filename):
self.read_header(filename)
self.data = pd.read_csv(filename, skiprows=self.header_lines, sep=";", converters={0: lambda x: datetime.strptime(x, "%Y-%m-%d")})
self.data = self.data.set_index('Date')
def write_csv(self, filename):
outfile = open(filename, "w")
if self.symbol is not None:
outfile.write("Symbol: %s\n" % self.symbol)
if self.name is not None:
outfile.write("Name: %s\n" % self.name)
if self.exchange is not None:
outfile.write("Exchange: %s\n" % self.exchange)
self.data.to_csv(outfile, sep=";", line_terminator='\n')
def append(self, col, series: pd.Series):
self.data[col] = series
| nilq/baby-python | python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`ranges`
==================
.. module:: ranges
:platform: Unix, Windows
:synopsis:
.. moduleauthor:: hbldh <henrik.blidh@nedomkull.com>
Created on 2015-06-04, 15:12
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from operator import itemgetter
from itertools import groupby
import numpy as np
def int_array_to_ranges(array):
"""Converts an monotonically increasing (or decreasing) array of integers into a
list of index ranges with identical value.
:param array: The array to segment.
:type array: :py:class:`numpy.ndarray` or list
:return: The list of ranges as index tuples.
:rtype: list
"""
diffs = np.where(np.diff(array))[0]
if len(diffs) == 0:
ranges = [(0, len(array))]
elif len(diffs) == 1:
ranges = [(0, diffs[0] + 1), (diffs[0] + 1, len(array))]
else:
ranges = [(x + 1, y + 1) for x, y in zip(diffs[:-1], diffs[1:])]
ranges.insert(0, (0, ranges[0][0]))
ranges.append((ranges[-1][1], len(array)))
return ranges
def bool_array_to_ranges(array):
"""Converts a boolean array into a list of segments where it is ``True``
:param array: A boolean array to segment.
:type array: :py:class:`numpy.ndarray`
:return: A list of tuples with start and stop index of the ranges.
:rtype: list
"""
ranges = []
for k, g in groupby(enumerate(np.where(array > 0)[0]), lambda (i, x): i - x):
group = map(itemgetter(1), g)
ranges.append((group[0], group[-1]))
return ranges
| nilq/baby-python | python |
from setuptools import setup
setup(
name="sup",
author="Richard Liaw"
)
| nilq/baby-python | python |
#!/usr/bin/env python
"""
Monitor system calls with dockit.
"""
import os
import sys
import atexit
import logging
import traceback
import lib_uris
import lib_util
import lib_common
from lib_properties import pc
os.environ["PYTHONUNBUFFERED"] = "1"
if True:
# TODO: Make this cleaner.
# FIXME: This does not work yet because scripts/cim_objects_definitions.py needs survol/lib_event.py
# FIXME: ... which cannot be imported due to path issues.
if ".." not in sys.path:
sys.path.append("..")
if "../.." not in sys.path:
sys.path.append("../..")
try:
from scripts import dockit
except Exception as exc:
logging.error("exc=%s" % exc)
raise
else:
dockit = None
def Snapshot():
logging.info("Snapshot mode")
cgiEnv = lib_common.ScriptEnvironment()
process_id = cgiEnv.GetId()
logging.debug("Snapshot process_id=%s" % process_id)
# This just returns one triple.
grph = cgiEnv.GetGraph()
process_node = lib_uris.gUriGen.PidUri(process_id)
grph.add((process_node, pc.property_pid, lib_util.NodeLiteral(process_id)))
cgiEnv.OutCgiRdf()
# FIXME: Must finish this.
if dockit:
dockit_dirname = lib_util.standardized_file_path(os.path.dirname(dockit.__file__))
logging.debug("File=" + __file__ + " dockit_dirname=" + dockit_dirname)
def _atexit_handler_detach(process_id):
"""This is called when this CGI script leaves for any reason.
Its purpose is to detach from the target process."""
logging.info("_atexit_handler process_id=%d" % process_id)
def SendEvents():
"""This is called in a subprocess started by the Python module supervisor."""
logging.info("SendEvents")
# FIXME:
if not dockit:
logging.error("dockit not available")
return
logging.info("dockit available")
cgiEnv = lib_common.ScriptEnvironment()
process_id = cgiEnv.GetId()
logging.info("process_id=%s" % process_id)
atexit.register(_atexit_handler_detach, process_id)
logging.info("atexit handler set")
# This is called by dockit with one of event to be inserted in the global events graph.
def dockit_events_callback(rdf_triple):
grph = cgiEnv.ReinitGraph()
logging.info("dockit_events_callback rdf_triple=%s" % rdf_triple)
grph.add(rdf_triple)
cgiEnv.OutCgiRdf()
class DockitParameters:
"""
We want to monitor all system calls of the target process.
This class and its static values passed all parameters of the procvess to the module "dockit"
which monitors the calls by attaching to the process given its pid.
"""
verbose = 1
with_warning = 1
map_params_summary = dockit.full_map_params_summary
with_dockerfile = True
input_process_id = int(process_id)
command_line = []
output_format = "TXT"
summary_format = None
input_log_file = None
output_files_prefix = "dockit_output"
tracer = dockit.default_tracer(input_log_file, None)
G_UpdateServer = dockit_events_callback
aggregator = None
duplicate_input_log = False
output_makefile = None
logging.debug("SendEvents process_id=%s DockitParameters (s) created" % process_id)
# TODO: How to release the target process when this leaves ?
try:
dockit.start_processing(DockitParameters)
except Exception as exc:
logging.error("SendEvents caught (stderr): %s" % exc)
logging.info("SendEvents after processing")
def Main():
if lib_util.is_snapshot_behaviour():
logging.debug("system calls snapshot")
Snapshot()
else:
logging.debug("system calls events")
try:
SendEvents()
except Exception as err:
logging.error("Caught:%s" % err)
raise
if __name__ == '__main__':
Main()
| nilq/baby-python | python |
from .video_resnet_triplet_attention import encoder as encoder_attention
from .video_resnet_triplet_bilinear import encoder as encoder_bilinear
from .video_resnet_triplet_gap import encoder as encoder_gap
from .video_resnet_triplet_mxp import encoder as encoder_mxp
from .video_resnet_triplet_frame_wise import encoder as encoder_frame_wise
__all__ = [
'encoder_attention', 'encoder_bilinear', 'encoder_gap', 'encoder_mxp', 'encoder_frame_wise'
] | nilq/baby-python | python |
# -*- coding: utf-8 -*-
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module for creating clang-tidy builds."""
from __future__ import print_function
from chromite.cbuildbot import manifest_version
from chromite.cbuildbot.builders import generic_builders
from chromite.cbuildbot.stages import artifact_stages
from chromite.cbuildbot.stages import build_stages
from chromite.cbuildbot.stages import chrome_stages
from chromite.cbuildbot.stages import sync_stages
class ClangTidyBuilder(generic_builders.Builder):
"""Builder that creates builds for clang-tidy warnings in Chrome OS."""
def GetVersionInfo(self):
"""Returns the CrOS version info from the chromiumos-overlay."""
return manifest_version.VersionInfo.from_repo(self._run.buildroot)
def GetSyncInstance(self):
"""Returns an instance of a SyncStage that should be run."""
return self._GetStageInstance(sync_stages.ManifestVersionedSyncStage)
def RunStages(self):
"""Run stages for clang-tidy builder."""
assert len(self._run.config.boards) == 1
board = self._run.config.boards[0]
self._RunStage(build_stages.UprevStage)
self._RunStage(build_stages.InitSDKStage)
self._RunStage(build_stages.SetupBoardStage, board)
self._RunStage(chrome_stages.SyncChromeStage)
self._RunStage(build_stages.BuildPackagesStage, board)
self._RunStage(artifact_stages.GenerateTidyWarningsStage, board)
| nilq/baby-python | python |
from django.shortcuts import render
from rest_framework import status, generics, viewsets, views
from rest_framework.decorators import api_view
from rest_framework.response import Response
from django_filters.rest_framework import DjangoFilterBackend
from .models import People, Companies
from . import serializers
class FruitsAndVegetablesViewset(viewsets.ReadOnlyModelViewSet):
"""
Given a person index (id, name or guid) returns a list of fruits and vegetables they like.
"""
queryset = People.objects.all()
serializer_class = serializers.FruitsVegetablesSerializer
filter_backends = (DjangoFilterBackend,)
filter_fields = ('_id', 'name', 'guid', 'index')
class CompanyEmployeesViewset(viewsets.ReadOnlyModelViewSet):
"""
Given a company index (or name) returns all its employees.
"""
queryset = Companies.objects.all()
serializer_class = serializers.CompaniesEmployeesSerializer
filter_backends = (DjangoFilterBackend,)
filter_fields = ('index', 'company')
class TwoPeopleView(views.APIView):
"""
Given 2 people, provides their information and the list of their
friends in common which have brown eyes and are still alive.
"""
def get(self, request, pk1, pk2, format=None):
people = People.objects.filter(index__in=(pk1, pk2))
if people.count() != 2:
return Response({})
common_friends = people[0].friends.all().intersection(people[1].friends.all())
common_friends.filter(eyeColor='brown', has_died=False)
twopeople = {
'person1': people[0],
'person2': people[1],
'common_friends': common_friends
}
serializer = serializers.TwoPeopleSerializer(twopeople)
return Response(serializer.data)
| nilq/baby-python | python |
from django.db import models
# Create your models here.
class Locker(models.Model):
is_using = models.BooleanField(default=False)
| nilq/baby-python | python |
# coding=utf-8
# Copyright 2014 Janusz Skonieczny
"""
Gra w kółko i krzyżyk
"""
import pygame
import pygame.locals
import logging
# Konfiguracja modułu logowania, element dla zaawansowanych
logging_format = '%(asctime)s %(levelname)-7s | %(module)s.%(funcName)s - %(message)s'
logging.basicConfig(level=logging.DEBUG, format=logging_format, datefmt='%H:%M:%S')
logging.getLogger().setLevel(logging.INFO)
class Board(object):
"""
Plansza do gry. Odpowiada za rysowanie okna gry.
"""
def __init__(self, width):
"""
Konstruktor planszy do gry. Przygotowuje okienko gry.
:param width: szerokość w pikselach
"""
self.surface = pygame.display.set_mode((width, width), 0, 32)
pygame.display.set_caption('Tic-tac-toe')
# Przed pisaniem tekstów, musimy zainicjować mechanizmy wyboru fontów PyGame
pygame.font.init()
font_path = pygame.font.match_font('arial')
self.font = pygame.font.Font(font_path, 48)
# tablica znaczników 3x3 w formie listy
self.markers = [None] * 9
def draw(self, *args):
"""
Rysuje okno gry
:param args: lista obiektów do narysowania
"""
background = (0, 0, 0)
self.surface.fill(background)
self.draw_net()
self.draw_markers()
self.draw_score()
for drawable in args:
drawable.draw_on(self.surface)
# dopiero w tym miejscu następuje fatyczne rysowanie
# w oknie gry, wcześniej tylko ustalaliśmy co i jak ma zostać narysowane
pygame.display.update()
def draw_net(self):
"""
Rysuje siatkę linii na planszy
"""
color = (255, 255, 255)
width = self.surface.get_width()
for i in range(1, 3):
pos = width / 3 * i
# linia pozioma
pygame.draw.line(self.surface, color, (0, pos), (width, pos), 1)
# linia pionowa
pygame.draw.line(self.surface, color, (pos, 0), (pos, width), 1)
def player_move(self, x, y):
"""
Ustawia na planszy znacznik gracza X na podstawie współrzędnych w pikselach
"""
cell_size = self.surface.get_width() / 3
x /= cell_size
y /= cell_size
self.markers[int(x) + int(y) * 3] = player_marker(True)
def draw_markers(self):
"""
Rysuje znaczniki graczy
"""
box_side = self.surface.get_width() / 3
for x in range(3):
for y in range(3):
marker = self.markers[x + y * 3]
if not marker:
continue
# zmieniamy współrzędne znacznika
# na współrzędne w pikselach dla centrum pola
center_x = x * box_side + box_side / 2
center_y = y * box_side + box_side / 2
self.draw_text(self.surface, marker, (center_x, center_y))
def draw_text(self, surface, text, center, color=(180, 180, 180)):
"""
Rysuje wskazany tekst we wskazanym miejscu
"""
text = self.font.render(text, True, color)
rect = text.get_rect()
rect.center = center
surface.blit(text, rect)
def draw_score(self):
"""
Sprawdza czy gra została skończona i rysuje właściwy komunikat
"""
if check_win(self.markers, True):
score = u"Wygrałeś(aś)"
elif check_win(self.markers, True):
score = u"Przegrałeś(aś)"
elif None not in self.markers:
score = u"Remis!"
else:
return
i = self.surface.get_width() / 2
self.draw_text(self.surface, score, center=(i, i), color=(255, 26, 26))
class TicTacToeGame(object):
"""
Łączy wszystkie elementy gry w całość.
"""
def __init__(self, width, ai_turn=False):
"""
Przygotowanie ustawień gry
:param width: szerokość planszy mierzona w pikselach
"""
pygame.init()
# zegar którego użyjemy do kontrolowania szybkości rysowania
# kolejnych klatek gry
self.fps_clock = pygame.time.Clock()
self.board = Board(width)
self.ai = Ai(self.board)
self.ai_turn = ai_turn
def run(self):
"""
Główna pętla gry
"""
while not self.handle_events():
# działaj w pętli do momentu otrzymania sygnału do wyjścia
self.board.draw()
if self.ai_turn:
self.ai.make_turn()
self.ai_turn = False
self.fps_clock.tick(15)
def handle_events(self):
"""
Obsługa zdarzeń systemowych, tutaj zinterpretujemy np. ruchy myszką
:return True jeżeli pygame przekazał zdarzenie wyjścia z gry
"""
for event in pygame.event.get():
if event.type == pygame.locals.QUIT:
pygame.quit()
return True
if event.type == pygame.locals.MOUSEBUTTONDOWN:
if self.ai_turn:
# jeśli jeszcze trwa ruch komputera to ignorujemy zdarzenia
continue
# pobierz aktualną pozycję kursora na planszy mierzoną w pikselach
x, y = pygame.mouse.get_pos()
self.board.player_move(x, y)
self.ai_turn = True
class Ai(object):
"""
Kieruje ruchami komputera na podstawie analizy położenia znaczników
"""
def __init__(self, board):
self.board = board
def make_turn(self):
"""
Wykonuje ruch komputera
"""
if not None in self.board.markers:
# brak dostępnych ruchów
return
logging.debug("Plansza: %s" % self.board.markers)
move = self.next_move(self.board.markers)
self.board.markers[move] = player_marker(False)
@classmethod
def next_move(cls, markers):
"""
Wybierz następny ruch komputera na podstawie wskazanej planszy
:param markers: plansza gry
:return: index tablicy jednowymiarowe w której należy ustawić znacznik kółka
"""
# pobierz dostępne ruchy wraz z oceną
moves = cls.score_moves(markers, False)
# wybierz najlepiej oceniony ruch
score, move = max(moves, key=lambda m: m[0])
logging.info("Dostępne ruchy: %s", moves)
logging.info("Wybrany ruch: %s %s", move, score)
return move
@classmethod
def score_moves(cls, markers, x_player):
"""
Ocenia rekurencyjne możliwe ruchy
Jeśli ruch jest zwycięstwem otrzymuje +1, jeśli przegraną -1
lub 0 jeśli nie nie ma zwycięscy. Dla ruchów bez zwycięscy rekreacyjnie
analizowane są kolejne ruchy a suma ich punktów jest wynikiem aktualnego
ruchu.
:param markers: plansza na podstawie której analizowane są następne ruchy
:param x_player: True jeśli ruch dotyczy gracza X, False dla gracza O
"""
# wybieramy wszystkie możliwe ruchy na podstawie wolnych pól
available_moves = (i for i, m in enumerate(markers) if m is None)
for move in available_moves:
from copy import copy
# tworzymy kopię planszy która na której testowo zostanie
# wykonany ruch w celu jego późniejszej oceny
proposal = copy(markers)
proposal[move] = player_marker(x_player)
# sprawdzamy czy ktoś wygrywa gracz którego ruch testujemy
if check_win(proposal, x_player):
# dodajemy punkty jeśli to my wygrywamy
# czyli nie x_player
score = -1 if x_player else 1
yield score, move
continue
# ruch jest neutralny,
# sprawdzamy rekurencyjne kolejne ruchy zmieniając gracza
next_moves = list(cls.score_moves(proposal, not x_player))
if not next_moves:
yield 0, move
continue
# rozdzielamy wyniki od ruchów
scores, moves = zip(*next_moves)
# sumujemy wyniki możliwych ruchów, to będzie nasz wynik
yield sum(scores), move
def player_marker(x_player):
"""
Funkcja pomocnicza zwracająca znaczniki graczy
:param x_player: True dla gracza X False dla gracza O
:return: odpowiedni znak gracza
"""
return "X" if x_player else "O"
def check_win(markers, x_player):
"""
Sprawdza czy przekazany zestaw znaczników gry oznacza zwycięstwo wskazanego gracza
:param markers: jednowymiarowa sekwencja znaczników w
:param x_player: True dla gracza X False dla gracza O
"""
win = [player_marker(x_player)] * 3
seq = range(3)
# definiujemy funkcję pomocniczą pobierającą znacznik
# na podstawie współrzędnych x i y
def marker(xx, yy):
return markers[xx + yy * 3]
# sprawdzamy każdy rząd
for x in seq:
row = [marker(x, y) for y in seq]
if row == win:
return True
# sprawdzamy każdą kolumnę
for y in seq:
col = [marker(x, y) for x in seq]
if col == win:
return True
# sprawdzamy przekątne
diagonal1 = [marker(i, i) for i in seq]
diagonal2 = [marker(i, abs(i-2)) for i in seq]
if diagonal1 == win or diagonal2 == win:
return True
# Ta część powinna być zawsze na końcu modułu (ten plik jest modułem)
# chcemy uruchomić naszą grę dopiero po tym jak wszystkie klasy zostaną zadeklarowane
if __name__ == "__main__":
game = TicTacToeGame(300)
game.run()
| nilq/baby-python | python |
from django.db import models
# Create your models here.
# Con este archivo creo lo necesario para importar luego otro archivo que finalizara creando una BD en postgres
# Los metodos STR son los que van a mostrarse en la pagina en django
class Domicilio(models.Model):
calle = models.CharField(max_length=255)
no_calle = models.IntegerField()
pais = models.CharField(max_length=255)
def __str__(self):
cadena = f'Domicilio {self.id}: {self.calle} {self.no_calle} {self.pais}'
return cadena
class Persona(models.Model):
nombre = models.CharField(max_length=255)
apellido = models.CharField(max_length=255)
email = models.CharField(max_length=255)
domicilio = models.ForeignKey(Domicilio, on_delete=models.SET_NULL, null=True)
#domicilio = models.ForeignKey(Domicilio, on_delete=models.CASCADE(), null=True) # Esto es para que si se borra de una tabla se borra en cascada en la otra
def __str__(self):
cadena = f'Persona {self.id}: {self.nombre} {self.apellido} {self.email}'
return cadena | nilq/baby-python | python |
from PyObjCTools.TestSupport import *
from Foundation import *
try:
unicode
except NameError:
unicode = str
class TestNSLinguisticTagger (TestCase):
@min_os_level('10.7')
def testConstants(self):
self.assertIsInstance(NSLinguisticTagSchemeTokenType, unicode)
self.assertIsInstance(NSLinguisticTagSchemeLexicalClass, unicode)
self.assertIsInstance(NSLinguisticTagSchemeNameType, unicode)
self.assertIsInstance(NSLinguisticTagSchemeNameTypeOrLexicalClass, unicode)
self.assertIsInstance(NSLinguisticTagSchemeLemma, unicode)
self.assertIsInstance(NSLinguisticTagSchemeLanguage, unicode)
self.assertIsInstance(NSLinguisticTagSchemeScript, unicode)
self.assertIsInstance(NSLinguisticTagWord, unicode)
self.assertIsInstance(NSLinguisticTagPunctuation, unicode)
self.assertIsInstance(NSLinguisticTagWhitespace, unicode)
self.assertIsInstance(NSLinguisticTagOther, unicode)
self.assertIsInstance(NSLinguisticTagNoun, unicode)
self.assertIsInstance(NSLinguisticTagVerb, unicode)
self.assertIsInstance(NSLinguisticTagAdjective, unicode)
self.assertIsInstance(NSLinguisticTagAdverb, unicode)
self.assertIsInstance(NSLinguisticTagPronoun, unicode)
self.assertIsInstance(NSLinguisticTagDeterminer, unicode)
self.assertIsInstance(NSLinguisticTagParticle, unicode)
self.assertIsInstance(NSLinguisticTagPreposition, unicode)
self.assertIsInstance(NSLinguisticTagNumber, unicode)
self.assertIsInstance(NSLinguisticTagConjunction, unicode)
self.assertIsInstance(NSLinguisticTagInterjection, unicode)
self.assertIsInstance(NSLinguisticTagClassifier, unicode)
self.assertIsInstance(NSLinguisticTagIdiom, unicode)
self.assertIsInstance(NSLinguisticTagOtherWord, unicode)
self.assertIsInstance(NSLinguisticTagSentenceTerminator, unicode)
self.assertIsInstance(NSLinguisticTagOpenQuote, unicode)
self.assertIsInstance(NSLinguisticTagCloseQuote, unicode)
self.assertIsInstance(NSLinguisticTagOpenParenthesis, unicode)
self.assertIsInstance(NSLinguisticTagCloseParenthesis, unicode)
self.assertIsInstance(NSLinguisticTagWordJoiner, unicode)
self.assertIsInstance(NSLinguisticTagDash, unicode)
self.assertIsInstance(NSLinguisticTagOtherPunctuation, unicode)
self.assertIsInstance(NSLinguisticTagParagraphBreak, unicode)
self.assertIsInstance(NSLinguisticTagOtherWhitespace, unicode)
self.assertIsInstance(NSLinguisticTagPersonalName, unicode)
self.assertIsInstance(NSLinguisticTagPlaceName, unicode)
self.assertIsInstance(NSLinguisticTagOrganizationName, unicode)
self.assertEqual(NSLinguisticTaggerOmitWords, 1 << 0)
self.assertEqual(NSLinguisticTaggerOmitPunctuation, 1 << 1)
self.assertEqual(NSLinguisticTaggerOmitWhitespace, 1 << 2)
self.assertEqual(NSLinguisticTaggerOmitOther, 1 << 3)
self.assertEqual(NSLinguisticTaggerJoinNames, 1 << 4)
@min_os_level('10.7')
def testMethods(self):
self.assertArgHasType(NSLinguisticTagger.orthographyAtIndex_effectiveRange_,
1, b'o^' + NSRange.__typestr__)
self.assertArgIsBlock(NSLinguisticTagger.enumerateTagsInRange_scheme_options_usingBlock_,
3, b'v@' + NSRange.__typestr__ + NSRange.__typestr__ + b'o^' + objc._C_NSBOOL)
self.assertArgHasType(NSLinguisticTagger.tagAtIndex_scheme_tokenRange_sentenceRange_,
2, b'o^' + NSRange.__typestr__)
self.assertArgHasType(NSLinguisticTagger.tagAtIndex_scheme_tokenRange_sentenceRange_,
3, b'o^' + NSRange.__typestr__)
self.assertArgHasType(NSLinguisticTagger.tagsInRange_scheme_options_tokenRanges_,
3, b'o^@')
self.assertArgHasType(NSLinguisticTagger.possibleTagsAtIndex_scheme_tokenRange_sentenceRange_scores_,
2, b'o^' + NSRange.__typestr__)
self.assertArgHasType(NSLinguisticTagger.possibleTagsAtIndex_scheme_tokenRange_sentenceRange_scores_,
3, b'o^' + NSRange.__typestr__)
self.assertArgHasType(NSLinguisticTagger.possibleTagsAtIndex_scheme_tokenRange_sentenceRange_scores_,
4, b'o^@')
self.assertArgIsOut(NSString.linguisticTagsInRange_scheme_options_orthography_tokenRanges_, 4)
self.assertArgIsBlock(NSString.enumerateLinguisticTagsInRange_scheme_options_orthography_usingBlock_,
4, b'v@' + NSRange.__typestr__ + NSRange.__typestr__ + b'o^' + objc._C_NSBOOL)
if __name__ == "__main__":
main()
| nilq/baby-python | python |
# Written by Jeremy Lee, 2020-10-30
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('umap', '0007_auto_20190416_1757'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AlterField(
model_name='map',
name='share_status',
field=models.SmallIntegerField(choices=[(1, 'everyone (public)'), (2, 'anyone with link'), (3, 'editors only'), (4, 'viewers and editors'), (5, 'authenticated'), (9, 'blocked')], default=1, verbose_name='share status'),
),
migrations.AddField(
model_name='map',
name='viewers',
field=models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL, verbose_name='viewers', related_name='map_viewers'),
),
migrations.AlterField(
model_name='map',
name='editors',
field=models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL, verbose_name='editors', related_name='map_editors'),
),
]
| nilq/baby-python | python |
from pyomo.core import *
class Model:
model = AbstractModel()
model.T = Set() # Index Set for time steps of optimization horizon
model.S = Set() # Index Set for time steps of optimization horizon
################################## PARAMETERS #################################
################################################################################################
# definition of the PV
model.P_PV = Param(model.S, model.T, within=NonNegativeReals) # PV PMPP forecast
model.P_Load = Param(model.T, within=NonNegativeReals) | nilq/baby-python | python |
"""
This module contains event handlers related to the restart options
after a Codenames game ends.
"""
from flask_socketio import emit, leave_room
from flask_login import current_user
from app import socketio, db
from app.models.user_data import UserData
from app.games.codenames.models import CodenamesTeams
from app.utils import is_admin
from .constants import NAMESPACE, STATE_KEYS, TEAMS, STATES, SPYMASTER
from .utils import is_codenames_player, player_distribution_is_valid, create_word_list
#########################################################################
# #
# EVENT HANDLERS #
# ================== #
# #
# EVENTS EXPLANATION #
# #
# restart Fired when the room admin chooses the #
# "Start a new game" option. #
# #
# restart_with_same_teams Fired when the room admin chooses the #
# restart with same teams option. #
# #
#########################################################################
@socketio.on("restart", namespace=NAMESPACE)
@is_codenames_player
@is_admin
def on_restart():
"""
Handles the restart_with_same_teams event. This is fired when the room admin choses
to start a new game.
"""
if not current_user.room.codenames_room.state == STATES.GAME_OVER:
return
emit("set_state", {STATE_KEYS.GAME_STATE: STATES.JOIN}, room=current_user.room_id)
team = CodenamesTeams.query.filter_by(
room_id=current_user.room_id, team_name=TEAMS.NEUTRAL
).first()
current_user.room.codenames_room.state = STATES.JOIN
current_user.room.codenames_room.state_details = None
team_list = dict()
team_list["players"] = []
users = UserData.query.filter_by(room_id=current_user.room_id)
for user in users:
leave_room(user.room_id + user.codenames_player.team.team_name)
user.codenames_player.team = team
team_list["players"].append(
{"id": user.id, "user": user.username, "team": TEAMS.NEUTRAL}
)
if user.codenames_player.spymaster_of is not None:
user.codenames_player.spymaster_of.spymaster = None
db.session.commit()
team_list["currentTeam"] = TEAMS.NEUTRAL
team_list["state"] = STATES.JOIN
team_list[TEAMS.BLUE + SPYMASTER] = None
team_list[TEAMS.RED + SPYMASTER] = None
emit("team_list", team_list, room=current_user.room_id)
@socketio.on("restart_with_same_teams", namespace=NAMESPACE)
@is_codenames_player
@is_admin
def on_restart_with_same_teams():
"""
Handles the restart_with_same_teams event. This is fired when the room admin choses
to start a new game with the same teams.
"""
if not current_user.room.codenames_room.state == STATES.GAME_OVER:
return
if (
current_user.room.codenames_room.state_details == TEAMS.BLUE + SPYMASTER
or current_user.room.codenames_room.state_details == TEAMS.RED + SPYMASTER
):
return
if not player_distribution_is_valid:
return
current_user.room.codenames_room.state = STATES.STARTED
current_user.room.codenames_room.turns_left = 0
db.session.commit()
emit(
"set_state", {STATE_KEYS.GAME_STATE: STATES.STARTED}, room=current_user.room_id
)
create_word_list()
| nilq/baby-python | python |
from pyg_base import is_num, is_ts, df_concat
import pandas as pd
import numpy as np
import numba
@numba.njit
def _p(x, y, vol = 0):
if vol == 0:
return 1. if x<y else -1. if x>y else 0.0
else:
one_sided_tail = 0.5 * np.exp(-abs(y-x)/vol)
return 1-one_sided_tail if x<y else one_sided_tail
@numba.njit
def _xrank(a, w, b, vol, scale = 0 , reweight = False):
"""
performs a cross-sectional rank
a = np.random.normal(0,1,20)
a[np.random.normal(0,1,20) > 1] = np.nan
w = np.full(20, 1.)
b = np.full(20, 1.)
scale = 0; vol = -1; reweight = False
a
_xrank(a, w, b, vol)
ranks a from -1 to +1 such that:
i) a[i] < a[j] ==> rank[i] < rank[j]
ii) rank[i] in (-1, 1)
iii) \sum w[i] rank[i] = 0
Parameters
----------
a : TYPE
DESCRIPTION.
w : TYPE
DESCRIPTION.
b : TYPE
DESCRIPTION.
vol : TYPE
DESCRIPTION.
scale : TYPE, optional
DESCRIPTION. The default is 0.
reweight : TYPE, optional
DESCRIPTION. The default is False.
Returns
-------
None.
"""
not_ok = np.isnan(a)
ok = ~not_ok
if np.max(not_ok):
a = a.copy(); w = w.copy(); b = b.copy()
a[not_ok] = 0.0
b[not_ok] = 0.0
w[not_ok] = 0.0
wb = w * b
total_wb = np.sum(wb)
if total_wb == 0:
return np.full_like(a, np.nan)
else:
r = np.zeros_like(a)
wb = wb / total_wb
if vol < 0:
wba = wb * a
m1 = np.sum(wba)
m2 = np.sum(wba * a)
vol = (m2 - m1**2) ** 0.5
for i in range(a.shape[0]):
if ok[i] and w[i]!=0:
for j in range(i):
if ok[j] and w[j]!=0:
qq = _p(a[i], a[j], vol)
pp = 1-qq
r[i] += (2*pp-1) * wb[j]
r[j] += (2*qq-1) * wb[i]
if scale == 0:
std = 1
elif scale == 1: # scale weightes so that total weight = 1
total_w = np.sum(w)
w = w / total_w
std = np.sum((w*r)**2*(1-b**2)) ** 0.5
r = r/std
elif scale == 2:
std = (np.sum(r**2) - np.sum(r)**2) ** 0.5
r = r/std
elif scale == 3:
total_w = np.sum(w)
w = w / total_w
std = np.sum(w*(r**2)) ** 0.5
r = r/std
r[not_ok] = np.nan
if reweight:
r = r * w
return r
@numba.njit
def _xrank_2d(a, w, b, vol, scale, reweight):
res = np.empty_like(a)
for i in range(a.shape[0]):
res[i] = _xrank(a = a[i], w = w[i], b = b[i], vol = vol[i], scale = scale , reweight = reweight)
return res
def xrank(a, weight = None, beta = None, vol = True, scale = 0 , reweight = False, columns = None):
"""
performs a cross-sectional rank
a = np.random.normal(0,1,20)
a[np.random.normal(0,1,20) > 1] = np.nan
w = np.full(20, 1.)
b = np.full(20, 1.)
scale = 0; vol = -1; reweight = False
a
_xrank(a, w, b, vol)
ranks a from -1 to +1 such that:
i) a[i] < a[j] ==> rank[i] < rank[j]
ii) rank[i] in (-1, 1)
iii) \sum w[i] rank[i] = 0
Parameters
----------
a : TYPE
DESCRIPTION.
w : TYPE
DESCRIPTION.
b : TYPE
DESCRIPTION.
vol : TYPE
DESCRIPTION.
scale : TYPE, optional
DESCRIPTION. The default is 0.
reweight : TYPE, optional
DESCRIPTION. The default is False.
Returns
-------
None.
:Example:
---------
>>> a = pd.DataFrame(np.random.normal(0,1,(1000,20)), drange(-999))
>>> aa = cumsum(a)
>>> aa.plot()
>>> beta = weight = None
>>> vol = True; scale = 0; columns = None
>>> res = xrank(aa)
>>> res.plot()
"""
a = df_concat(a, columns).ffill()
index = a.index
cols = a.columns
a_ = a.values
if weight is None:
w = np.full_like(a_, 1.)
elif is_num(weight):
w = np.full_like(a_, weight)
else:
w = df_concat(weight, columns).reindex(index, method = 'ffill')
if beta is None:
b = np.full_like(a_, 1.)
elif is_num(beta):
b = np.full_like(a_, beta)
else:
b = df_concat(beta, columns).reindex(index, method = 'ffill')
if vol is True:
vol = -1
if is_ts(vol):
vol - vol.reindex(index, method = 'ffill')
if isinstance(vol, pd.DataFrame) and vol.shape[1] == 1:
vol = vol.iloc[:,0]
else:
vol = np.full(a_.shape[0], vol)
b, w, vol = [df.values if is_ts(df) else df for df in (b,w,vol)]
res = _xrank_2d(a_, w, b, vol, scale, reweight)
return pd.DataFrame(res, index, cols) | nilq/baby-python | python |
from .data import *
from .selector import *
from .utils import *
from .dataset import * | nilq/baby-python | python |
# Search for lines that start 'X' followed by any non whitespace
# characters and ':' then output the first group of non whitespace
# characters that follows
import re
hand = open('mbox-short.txt')
for line in hand:
line = line.rstrip()
x = re.findall('^X\S*: (\S+)', line)
if not x: continue
print(x)
| nilq/baby-python | python |
from datetime import datetime
import numpy as np
from multiprocessing import Pool, cpu_count
import os
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from .utils.preprocess_conllu import *
from .utils.helpers import *
from .utils.tools import *
from .models import *
# TODO put this in a config file
fcodebook = "/home/leo/projects/minibrain/predictors/sequence/text/utf8-codes/utf8_codebook_overfit_matrix_2seg_dim64.npy"
utf8codematrix = "/home/leo/projects/minibrain/predictors/sequence/text/utf8-codes/utf8_code_matrix_2seg.npy"
dataset_train = "/home/leo/projects/Datasets/text/UniversalDependencies/ud-treebanks-v2.5/traindev_np_batches_779000x3x1024_uint16.npy"
BASE_DATA_DIR_UD_TREEBANK = "/home/leo/projects/Datasets/text/UniversalDependencies/ud-treebanks-v2.5"
# cuda seems to reverse the GPU ids with CUDA id so ... mess
# Cuda maps cuda:0 to my RTX 2080ti (GPU#1) and
# Cuda maps cuda:1 to my GTX 1080 (GPU#0)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
def train_test(model, checkpoint_path, base_name, max_seq_len=384, test_loss=True, test_accuracy=False, max_data=45):
model = model.to(device)
data_train = np.load(dataset_train)
# optimizer = torch.optim.AdamW(model.parameters(), lr=1e-4, weight_decay=1e-4)
optimizer = torch.optim.AdamW(model.parameters())
# loss_function = F.nll_loss
loss_function = pos_loss_function
epoch_size = 10000
batch_size = 50
# TODO tis is for testing purposes
data = data_train
# data = data_train[-1000 * batch_size:, :, :] # just for the trials, use the last 1000 batches only
test_data = None
if test_loss:
test_data = load_test_data(BASE_DATA_DIR_UD_TREEBANK, max_data)
epochs = chunks(data, epoch_size, dim=0)
epoch_count = 1
for e in epochs:
batches = chunks(e, batch_size, dim=0)
train(model, optimizer, loss_function, batches, epoch_count, epoch_size, device, max_seq_len)
torch.cuda.empty_cache()
# checkpoint
cid = f"{epoch_count:04}" # cid = str(epoch_count).zfill(4)
model.save_checkpoint(checkpoint_path, base_name, cid)
# TODO test loss and accuracy to be measured in CPU (or another GPU)
# with batches bigger than 50 my GPU is out of memory
if test_loss:
test(model, loss_function, test_data, epoch_count, device, max_data, max_seq_len)
torch.cuda.empty_cache()
if test_accuracy:
test_accuracy(model, test_data, epoch_count, device, max_data)
torch.cuda.empty_cache()
epoch_count += 1
# model.network.save_model("./trained_models/conv1dcol", "conv1dcol_nll-loss_epoch-{}".format(epoch_count))
def test_async(checkpoint_path, test_data_path, epoch_count, device, max_data, test_acc=False):
# load checkpoint
# model is hardcoded for the moment
utf8codes = np.load(fcodebook)
utf8codes = utf8codes.reshape(1987, 64)
model = GatedConv1DPoS(utf8codes).to(device)
model.load_checkpoint(checkpoint_path)
test_data = load_test_data(test_data_path)
print("launching test in CPU")
test(model, pos_loss_function, test_data, epoch_count, device, max_data)
if test_acc:
print("launching Accuracy test in CPU")
test_accuracy(model, test_data, epoch_count, device, max_data)
def test_acc_async(checkpoint_path, test_data_path, epoch_count, device, max_data):
# load checkpoint
# model is hardcoded for the moment
utf8codes = np.load(fcodebook)
utf8codes = utf8codes.reshape(1987, 64)
model = GatedConv1DPoS(utf8codes).to(device)
model.load_checkpoint(checkpoint_path)
test_data = load_test_data(test_data_path)
print("launching Accuracy test in CPU")
test_accuracy(model, test_data, epoch_count, device, max_data)
def err_ckb(err):
print("error with the subprocess ", err)
# Note this is TOO slow, GPU test is 30-50 times faster than in CPU, so CPU not useful for practical purposes
def train_cputest(model, checkpoint_path, base_name, test_accuracy=True, max_data=45):
pool = Pool(cpu_count() - 2)
model = model.to(device)
data_train = np.load(dataset_train)
# optimizer = torch.optim.AdamW(model.parameters(), lr=1e-4, weight_decay=1e-4)
optimizer = torch.optim.AdamW(model.parameters())
# loss_function = F.nll_loss
loss_function = pos_loss_function
epoch_size = 10000
batch_size = 50
# TODO this is for testing purposes
data = data_train
# data = data_train[-1000*batch_size:, :, :] # just for the trials, use the last 1000 batches only
epochs = chunks(data, epoch_size, dim=0)
epoch_count = 1
for e in epochs:
batches = chunks(e, batch_size, dim=0)
train(model, optimizer, loss_function, batches, epoch_count, epoch_size, device)
torch.cuda.empty_cache()
# checkpoint
cid = f"{epoch_count:04}" # cid = str(epoch_count).zfill(4)
fchkpoint = model.save_checkpoint(checkpoint_path, base_name, cid)
# test loss and accuracy to be measured in CPU (or another GPU)
# with batches bigger than less than 50 my GPU is out of memory
res_test = pool.apply_async(test_async,
[fchkpoint, BASE_DATA_DIR_UD_TREEBANK, epoch_count, device,
max_data],
error_callback=err_ckb)
if test_accuracy:
res_acc = pool.apply_async(test_acc_async,
[fchkpoint, BASE_DATA_DIR_UD_TREEBANK, epoch_count, device,
max_data],
error_callback=err_ckb)
torch.cuda.empty_cache()
epoch_count += 1
# model.network.save_model("./trained_models/conv1dcol", "conv1dcol_nll-loss_epoch-{}".format(epoch_count))
def old_main_conv1d():
utf8codes = np.load(fcodebook)
utf8codes = utf8codes.reshape(1987, 64)
model = OLD_Conv1DPoS(utf8codes)
path = "./trained_models/conv1dcol"
base_name = "conv1dcol_nll-loss"
train_test(model, path, base_name)
def old_main_gatedconv1d():
utf8codes = np.load(fcodebook)
utf8codes = utf8codes.reshape(1987, 64)
model = GatedConv1DPoS(utf8codes)
path = "./trained_models/GatedConv1DCol"
base_name = "GatedConv1DPoS_nll-loss"
train_test(model, path, base_name)
def main_conv1dcolnet():
utf8codes = np.load(utf8codematrix)
# utf8codes = utf8codes.reshape(1987, 324)
encoder = Conv1DColNet(transpose_output=True) # use default parameters
decoder = LinearUposDeprelDecoder(transpose_input=False)
model = NetContainer(utf8codes, encoder, decoder)
path = "./trained_models/Conv1dColNet_try3"
base_name = "Conv1dColNet_nll-loss"
train_test(model, path, base_name)
CONV1D_PRETRAIN_FILE = "/home/leo/projects/minibrain/predictors/sequence/text/trained_models/Conv1dColNet/Conv1dColNet_nll-loss_0078.state_dict.pth"
def main_convattnet(conv1d_pretrain_file=CONV1D_PRETRAIN_FILE):
utf8codes = np.load(utf8codematrix)
# utf8codes = utf8codes.reshape(1987, 324)
# the convolutional encoder must NOT be retrained (that is what I'm trying to test)
with torch.no_grad():
conv1d_encoder = Conv1DColNet(transpose_output=False) # use default parameters
conv1d_decoder = LinearUposDeprelDecoder(transpose_input=False)
conv1d_model = NetContainer(utf8codes, conv1d_encoder, conv1d_decoder)
# load pre-trained conv1dcolnet
# conv1d_model.load_checkpoint(conv1d_pretrain_file)
# cleanup things that we'll not use, we just need the encoder
del conv1d_model
del conv1d_decoder
torch.cuda.empty_cache()
# conv1d_encoder = Conv1DColNet(transpose_output=False) # use default parameters
encoder = ConvAttColNet(conv1d_encoder, transpose_output=False)
decoder = LinearUposDeprelDecoder(transpose_input=False)
model = NetContainer(utf8codes, encoder, decoder)
print("Starting training for model with column type ConvAttNetCol and pretrained Conv1dColNet")
print("Parameter model details: ")
print("conv1d_encoder parameters {} from which {} are trainable ".
format(count_parameters(conv1d_encoder), count_parameters(conv1d_encoder)))
print("ConvAttColNet parameters {} from which {} are trainable ".
format(count_parameters(encoder), count_parameters(encoder)))
print("decoder parameters {} from which {} are trainable ".
format(count_parameters(decoder), count_parameters(decoder)))
print("Total model parameters {} from which {} are trainable ".
format(count_parameters(model), count_parameters(model)))
path = "./trained_models/ConvAttNet"
base_name = "ConvAttNet_nll-loss"
train_test(model, path, base_name, max_seq_len=384, max_data=60)
| nilq/baby-python | python |
import sys, re
from mk_yaml_ontology import ont_node, dump_yaml
def replace_token(pattern, replacement, s):
replacement = f' {replacement} '
s = re.sub(f' ?{pattern} ', replacement, s)
s = re.sub(f' {pattern} ?', replacement, s)
return s
def mk_ont_node(line_string):
fields = line_string.split("\t")
assert(len(fields) >= 4)
var_name = fields[0].strip()
description = fields[3].strip()
description = replace_token("C", "carbon", description)
description = replace_token("CO2", "carbon dioxide", description)
description = replace_token("CH2O", "formaldehyde", description)
description = replace_token("N", "nitrogen", description)
description = replace_token("NH3", "ammonia", description)
description = replace_token("NH4", "ammonium", description)
description = replace_token("NO3", "nitrate", description)
description = replace_token("P", "phosphorus", description)
return ont_node(var_name, [description], None, add_name = False) # the name isn't in a format we can use
def main():
flat_file = sys.argv[1]
ont_file = sys.argv[2]
ont_name = sys.argv[3]
with open(flat_file, "r") as f:
_ = f.readline() # read header
lines = [line.rstrip() for line in f.readlines()]
nodes = [mk_ont_node(line) for line in lines]
dump_yaml(nodes, ont_file, ont_name)
main() | nilq/baby-python | python |
import os
import pytest
from aztk.models.plugins import PluginConfiguration
from aztk.models.plugins.internal import PluginManager
from aztk.error import InvalidPluginReferenceError
dir_path = os.path.dirname(os.path.realpath(__file__))
fake_plugin_dir = os.path.join(dir_path, "fake_plugins")
def RequiredArgPlugin(req_arg):
return PluginConfiguration(name="required-arg")
def test_missing_plugin():
plugin_manager = PluginManager()
message = "Cannot find a plugin with name .*"
with pytest.raises(InvalidPluginReferenceError, match=message):
plugin_manager.get_plugin("non-existing-plugin")
def test_extra_args_plugin():
plugin_manager = PluginManager()
message = "Plugin JupyterPlugin doesn't have an argument called 'invalid'"
with pytest.raises(InvalidPluginReferenceError, match=message):
plugin_manager.get_plugin("jupyter", args=dict(invalid="foo"))
def test_missing_required_arg():
plugin_manager = PluginManager()
plugin_manager.plugins["required-arg"] = RequiredArgPlugin
message = "Missing a required argument req_arg for plugin RequiredArgPlugin"
with pytest.raises(InvalidPluginReferenceError, match=message):
plugin_manager.get_plugin("required-arg")
| nilq/baby-python | python |
from abt.cli import main
| nilq/baby-python | python |
import asyncio
import logging
from rsp1570serial.discovery import discover_source_aliases
if __name__ == "__main__":
logging.basicConfig(
level=logging.INFO, format="%(asctime)s %(levelname)s:%(message)s"
)
# asyncio.run(discover_source_aliases("socket://192.168.50.211:50002"))
asyncio.run(discover_source_aliases())
| nilq/baby-python | python |
from copy import deepcopy
class ensemble:
def __init__(self, obj):
self.vals = {None: (None,obj)}
def get(self):
if self.vals is None: return self.val
if len(self.vals)==1:
for nm in self.vals.values():
return nm[1]
return self
def add(self, guard, obj):
if id(obj) in self.vals:
(g,obj) = self.vals[id(obj)]
self.vals[id(obj)] = (g|guard,obj)
else:
self.vals[id(obj)] = (guard,obj)
def __call__(self):
if self.vals is not None:
self.val = self.vals[None][1]
for (g,obj) in self.vals.values():
if g is None: continue
if isinstance(obj,ensemble): obj=obj()
self.val = g.if_else(obj,self.val)
self.vals = None
return self.val
class values:
def __init__(self):
self.dic = {}
def __getitem__(self, var):
if not var in self.dic: raise NameError("name '" + var + "' is not always set")
if isinstance(self.dic[var], ensemble):
# print("ifthenelsing", var)
# if self.dic[var].guard is not None: print("* ifthenelse", var, self.dic[var].guard, "?", self.dic[var].ifval, ":", self.dic[var].elseval)
self.dic[var]=self.dic[var]()
return self.dic[var]
def get(self, var):
if isinstance(self.dic[var], ensemble):
return self.dic[var].get()
else:
return self.dic[var]
def __setitem__(self, var, val):
self.dic[var] = val
def __delitem__(self, var):
del self.dic[var]
def __iter__(self):
return self.dic.__iter__()
def clear(self):
self.dic = {}
def copy(self):
ret = values()
ret.dic = dict(self.dic)
return ret
def __repr__(self):
return repr(self.dic)
def apply_to_label(vals, orig):
if orig is None: return vals
ifguard = vals["__guard"]
ret = values()
for nm in orig:
if nm in vals:
if (vif:=vals.get(nm)) is (velse:=orig.get(nm)):
ret[nm] = vif
elif isinstance(velse, ensemble):
velse.add(ifguard, vif)
ret[nm] = velse
else:
ret[nm] = ensemble(velse)
ret.dic[nm].add(ifguard, vif)
return ret
def apply_to_labels(vals, orig1, orig2, cond):
if cond is True:
return [apply_to_label(vals, orig1), orig2]
elif cond is False:
return [orig1, apply_to_label(vals, orig2)]
guard = vals["__guard"]
guard1 = guard&cond
guard2 = guard&(1-cond)
vals["__guard"] = guard1
ret1 = apply_to_label(vals, orig1)
if orig1 is None and orig2 is None: vals = vals.copy()
vals["__guard"] = guard2
ret2 = apply_to_label(vals, orig2)
ret1["__guard"] = guard1 # because may be overwritten to guard2 if we do not copy vals
return [ret1,ret2]
def values_new():
return values() | nilq/baby-python | python |
'''
Created on May 2, 2016
@author: damianpa
'''
| nilq/baby-python | python |
import yaml
import os
import git
import logging
from .i_repository_parser import IRepositoryParser
class RosdistroRepositoryParser(IRepositoryParser):
"""
Pulls the rosdistro-package and gets all urls from the rosdistro files.
"""
def __init__(self, settings: dict):
"""
Creates a new instance of the RosdistroRepositoryParser class
:param settings: Settings containing information about rosdistro_workspace and rosdistro_url
"""
self.__settings = settings
def __get_rosdistro_repository(self) -> None:
"""
Clones the repository from rosdistro_url into rosdistro_workspace (defined in settings)
:return: None
"""
if not os.path.exists(self.__settings["rosdistro_workspace"]):
os.makedirs(self.__settings["rosdistro_workspace"])
try:
logging.info("[RosdistroRepositoryParser]: Cloning rosdistro repository...")
git.Repo.clone_from(self.__settings["rosdistro_url"], self.__settings["rosdistro_workspace"])
except git.exc.GitCommandError:
logging.warning("[RosdistroRepositoryParser]: Repository already exists, pulling changes...")
repo = git.Repo(self.__settings["rosdistro_workspace"])
repo.remotes.origin.pull()
logging.info("[RosdistroRepositoryParser]: Rosdistro up-to-date...")
def __get_urls_from_file(self, file_path: str, repository_dict: dict) -> None:
"""
Gets the URLs from a distribution.yaml that adheres to rosdistro-specs.
:param file_path: path to a distribution.yaml file
:param repository_dict: dictionary with repository-type (git, svn, hg, ...) as key and the repo-url as value
:return: None
"""
# Load file.
file = open(file_path, 'r')
rosdistro = yaml.load(file)
# Iterate repositories and add them to the repository_dict.
for repository in rosdistro["repositories"]:
try:
vcs_type = str(rosdistro["repositories"][repository]["doc"]["type"])
url = str(rosdistro["repositories"][repository]["doc"]["url"])
repository_dict[vcs_type].add(url)
except KeyError:
pass
try:
vcs_type = str(rosdistro["repositories"][repository]["doc"]["type"])
url = str(rosdistro["repositories"][repository]["source"]["url"])
repository_dict[vcs_type].add(url)
except KeyError:
pass
try:
# This has to be a git repository (required by bloom)
repository_dict["git"].add(rosdistro["repositories"][repository]["release"]["url"])
except KeyError:
pass
def parse_repositories(self, repository_dict: dict) -> None:
# Actually get the repository
self.__get_rosdistro_repository()
# Parse index.yaml
index_file = open(self.__settings["rosdistro_workspace"] + "index.yaml", "r")
index_yaml = yaml.load(index_file)
# Get all urls from all distribution.yaml files
for distribution in index_yaml["distributions"]:
logging.info("Parsing distribution " + index_yaml["distributions"][distribution]["distribution"][0])
self.__get_urls_from_file(self.__settings["rosdistro_workspace"]
+ index_yaml["distributions"][distribution]["distribution"][0],
repository_dict) | nilq/baby-python | python |
import sklearn as sk
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
faces = fetch_olivetti_faces()
print "DESCR"
print faces.DESCR
print "images.shape"
print faces.images.shape
print "data.shape"
print faces.data.shape
print "target.shape"
print faces.target.shape
| nilq/baby-python | python |
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-p","--principal",action="store", type=float,
help="Principal", default=10000, required=True)
parser.add_argument("-r", "--rate", action="store", type=float,
help="rate of interest", default=10, required=True)
parser.add_argument("-t", "--time", action="store", type=int,
help="time in years", default=1, required=True)
args = parser.parse_args()
principal = float(args.principal)
rate = float(args.rate)
time = int(args.time)
si = principal * time * rate / 100
ci = principal * ( 1 + (rate/100))**time
print(f"Simple Interest = {si} \nCompound Interest = {ci}")
| nilq/baby-python | python |
from ASTModels import Node
memory_size = 30000
def execute(ast: [Node]) -> ([int],int):
memory = [0]*memory_size
mp = 0
for node in ast:
memory, mp = _evaluate(node,memory,mp)
print()
return memory, mp
def _evaluate(node: Node, memory: [int], mp: int) -> ([int],int):
if node.node_type == "INCREMENT":
memory[mp] += node.val
if memory[mp] >= 256:
memory[mp] -= 256
elif node.node_type == "DECREMENT":
memory[mp] -= node.val
if memory[mp] < 0:
memory[mp] += 255
elif node.node_type == "INCREMENT_POINTER":
mp += node.val
if mp > memory_size:
mp -= memory_size-1
elif node.node_type =="DECREMENT_POINTER":
mp -= node.val
if mp < 0:
mp += memory_size-1
elif node.node_type == "OUTPUT":
print(chr(memory[mp]),end='')
elif node.node_type == "INPUT":
i = ''
while i == '':
i = input()
memory[mp] = ord(i[0])
elif node.node_type =="LOOP":
while memory[mp] != 0:
for block_node in node.nodes:
memory, mp = _evaluate(block_node,memory,mp)
return memory, mp
| nilq/baby-python | python |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import os
import sys
import tensorflow as tf
import numpy as np
from tensorflow.python.client import timeline
with tf.device("/cpu:0"):
a = tf.Variable([1],)
with tf.device("/cpu:1"):
b = tf.Variable([2],)
with tf.device("/cpu:2"):
c = tf.Variable([3],)
with tf.device("/cpu:3"):
d = tf.Variable([4],)
with tf.device("/cpu:0"):
total_a = tf.add_n([a, b])
with tf.device("/cpu:1"):
total_b = tf.add_n([a, b, c])
with tf.device("/cpu:2"):
total_c = tf.add_n([b, c, d])
with tf.device("/cpu:3"):
total_d = tf.add_n([c, d])
graph = tf.add_n([total_a, total_b, total_c, total_d])
config = tf.ConfigProto(device_count={"CPU": 4})
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
sess.run([total_a, total_b, total_c, total_d, graph], options=options, run_metadata=run_metadata)
fetched_timeline = timeline.Timeline(run_metadata.step_stats)
chrome_trace = fetched_timeline.generate_chrome_trace_format()
with open('timeline_01.json', 'w') as f:
f.write(chrome_trace)
| nilq/baby-python | python |
class Vehicle:
def __init__(self, vin):
self.vin=vin
def GetVin(self):
return self.vin
class Car(Vehicle):
def Accelerate(self):
print("Car accelerating...")
class Truck(Vehicle):
def Accelerate(self):
print("Truck accelerating...")
def main():
cars=[Car("A123456890"), Car("B123456890"),
Truck("C123456890"), Truck("D123456890"),
Car("E123456890")]
for car in cars:
car.Accelerate() # polymorphic site
if __name__ == "__main__":
main() | nilq/baby-python | python |
# -*- coding: utf-8 -*-
import webapp2
from webapp2_extras import routes
import json
from api import routes as apiRoutes
from fetch import routes as fetchRoutes
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.write('QCurrency is working.')
app = webapp2.WSGIApplication([
routes.PathPrefixRoute('/api', apiRoutes),
routes.PathPrefixRoute('/fetch', fetchRoutes),
('/', MainPage),
], debug=True)
| nilq/baby-python | python |
# -*- coding:utf-8 -*-
import unittest
from simple_ml.classify_data import DataCollector, get_iris
import numpy as np
class TestDataCollector(unittest.TestCase):
def test_get_iris(self):
dc = DataCollector()
x = dc.fetch_handled_data("iris")
self.assertIsInstance(x, np.ndarray)
self.assertEqual(x.shape[0], 150)
self.assertEqual(x.shape[1], 6)
def test_build_in_get_iris(self):
x, y = get_iris()
self.assertEqual(len(x.shape), 2)
self.assertEqual(len(y.shape), 1)
if __name__ == '__main__':
unittest.main()
| nilq/baby-python | python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#################
# Module-Import #
#################
#eegpy-modules
try:
import eegpy
from eegpy.events import EventTable
from eegpy.misc import FATALERROR
from eegpy.ui.widgets.windowwidgets import EegpyBaseWin
from eegpy.ui.icon import image_from_eegpy_stock, eegpy_logo
except ImportError:
raise FATALERROR('Your installation of EegPy seems to be incomplete.\nMaybe you need to set the PYTHONPATH environment-variable adequatly.')
#from eegpy.filter.filt_misc import filterRecursively
#Third-party
try:
import numpy
from scipy.signal import lfilter, butter
except ImportError:
raise FATALERROR('SciPy or NumPy not found!\nPlease visit www.scipy.org or numeric.scipy.org for more information.')
try:
import pygtk
pygtk.require('2.0')
import gobject
import gtk
except ImportError:
raise FATALERROR('GTK cannot be imported.')
#try:
# from matplotlib.axes import Subplot
# # uncomment to select /GTK/GTKAgg/GTKCairo
# from matplotlib.backends.backend_gtk import FigureCanvasGTK as FigureCanvas
# from matplotlib.backends.backend_gtk import NavigationToolbar2GTK as NavigationToolbar
# import matplotlib
# #from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg, NavigationToolbar
# from matplotlib.figure import Figure, SubplotParams
# from matplotlib.axis import Axis
# import matplotlib.cm
#except ImportError:
# raise FATALERROR('Error while importing matplotib. Please visit http://matplotlib.sf.net for more information.')
#native python
import sys
import os
import pickle
class EventManager(gtk.Frame):
_et = None
_fn = None
_keylist = None
def __init__(self, label=""):
gtk.Frame.__init__(self,label)
self.vbox=gtk.VBox()
self.tb_box = gtk.HBox()
self.add(self.vbox)
self.vbox.pack_start(self.tb_box,expand=False)
self.tb = gtk.Toolbar()
self.tooltips = gtk.Tooltips()
self.tb.set_style(gtk.TOOLBAR_ICONS)
self.add_toolbutton_from_stock(gtk.STOCK_OPEN, 'Load', 'Load an EventTable from a file', 'Private', self.load_et)
self.add_toolbutton_from_stock(gtk.STOCK_SAVE, 'Save', 'Save the EventTable back to the original file', 'Private', self.save_et, False)
self.add_toolbutton_from_stock(gtk.STOCK_SAVE_AS, 'Save to', 'Save the EventTable to a file, choose new file', 'Private', self.save_et, True)
self.tb.insert(gtk.SeparatorToolItem(),-1)
self.add_toolbutton_eegpy("add_trigger_type", "Add type", "Add a new trigger type", 'Private', self.cb_add_trigger_type, None)
self.add_toolbutton_eegpy("add_trigger", "Add trigger", "Add a new trigger", 'Private', self.cb_add_trigger, None)
self.tb_box.pack_start(self.tb,expand=True)
self.lb_fn = gtk.Label("New EventTable...")
self.lb_fn.set_max_width_chars(50)
self.lb_fn.set_justify(gtk.JUSTIFY_RIGHT)
self.tb_box.pack_end(self.lb_fn, expand=False)
#HBox für _keylist/triggerlist
self.pane_kl = gtk.HPaned()
self.vbox.pack_end(self.pane_kl)
self.setup_trees()
self._et = EventTable()
def setup_trees(self):
#First: Keys
self.tvsw_keys = gtk.ScrolledWindow()
self.tvsw_keys.set_policy(gtk.POLICY_AUTOMATIC,gtk.POLICY_AUTOMATIC)
self.tree_keys = gtk.TreeStore(gobject.TYPE_STRING)
#self.treeS = gtk.TreeModelSort(self.tree)
self.tv_keys = gtk.TreeView(self.tree_keys)
self.tv_keys.get_selection().connect("changed",self.key_selected)
#self.tv_keys.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
#renderer = gtk.CellRendererText()
#self.col1 = gtk.TreeViewColumn("File ...", renderer,text=0)
self.tv_keys.append_column(gtk.TreeViewColumn("Key", gtk.CellRendererText(),text=0))
#self.tv_keys.show()
self.tvsw_keys.add(self.tv_keys)
self.pane_kl.add1(self.tvsw_keys)
#Second: Triggers
self.tvsw_tr = gtk.ScrolledWindow()
self.tvsw_tr.set_policy(gtk.POLICY_AUTOMATIC,gtk.POLICY_AUTOMATIC)
self.tree_tr = gtk.TreeStore(gobject.TYPE_INT)
#self.treeS = gtk.TreeModelSort(self.tree)
self.tv_tr = gtk.TreeView(self.tree_tr)
self.tv_tr.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
#renderer = gtk.CellRendererText()
#self.col1 = gtk.TreeViewColumn("File ...", renderer,text=0)
self.tv_tr.append_column(gtk.TreeViewColumn("Timepoint", gtk.CellRendererText(),text=0))
#self.tv_keys.show()
#Setting up drag'n'drop
self.tv_tr.enable_model_drag_source( gtk.gdk.BUTTON1_MASK,
[('INT',0,0)],
gtk.gdk.ACTION_DEFAULT|
gtk.gdk.ACTION_MOVE)
self.tv_tr.enable_model_drag_dest([('INT',0,0)],
gtk.gdk.ACTION_DEFAULT)
self.tv_tr.connect("drag_data_get", self.tr_drag_get)
self.tv_tr.connect("drag_data_received", self.tr_drag_received)
self.tv_keys.connect("key_press_event", self.cb_key_pressed)
self.tv_tr.connect("key_press_event", self.cb_key_pressed)
self.tvsw_tr.add(self.tv_tr)
self.pane_kl.add2(self.tvsw_tr)
def add_toolbutton_eegpy(self, icon_name, text, tip_text, tip_private, clicked_function, clicked_param1=None):
iconSize = gtk.ICON_SIZE_SMALL_TOOLBAR
iconw = eegpy.ui.icon.image_from_eegpy_stock(icon_name)
toolitem = gtk.ToolButton(iconw, text)
#toolitem = gtk.ToolButton(iconw)
toolitem.set_icon_widget(iconw)
toolitem.show_all()
toolitem.set_tooltip(self.tooltips, tip_text, tip_private)
toolitem.connect("clicked", clicked_function, clicked_param1)
#toolitem.connect("scroll_event", clicked_function)
self.tb.insert(toolitem, -1)
def add_toolbutton_from_stock(self, icon_name, text, tip_text, tip_private, clicked_function, clicked_param1=None):
iconSize = gtk.ICON_SIZE_SMALL_TOOLBAR
iconw = gtk.Image()
iconw.set_from_stock(icon_name, iconSize)
toolitem = gtk.ToolButton(iconw, text)
#toolitem = gtk.ToolButton(iconw)
toolitem.set_icon_widget(iconw)
toolitem.show_all()
toolitem.set_tooltip(self.tooltips, tip_text, tip_private)
toolitem.connect("clicked", clicked_function, clicked_param1)
#toolitem.connect("scroll_event", clicked_function)
self.tb.insert(toolitem, -1)
def load_et(self,event,data):
dialog = gtk.FileChooserDialog("Open EventTable from file..", None, gtk.FILE_CHOOSER_ACTION_OPEN, (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
dialog.set_default_response(gtk.RESPONSE_OK)
filter = gtk.FileFilter()
filter.set_name("eegpy EventTable or similar")
filter.add_pattern("*.evt")
filter.add_pattern("*.vmrk")
dialog.add_filter(filter)
filter = gtk.FileFilter()
filter.set_name("All files")
filter.add_pattern("*")
dialog.add_filter(filter)
response = dialog.run()
if response == gtk.RESPONSE_OK:
self.set_filename(dialog.get_filename())
#print dialog.get_filename(), 'selected'
elif response == gtk.RESPONSE_CANCEL:
print 'Closed, no files selected'
dialog.destroy()
def save_et(self, event, do_save_as = True):
if do_save_as == False:
self._et.save(self._fn)
else:
dialog = gtk.FileChooserDialog("Save EventTable to file...", None, gtk.FILE_CHOOSER_ACTION_SAVE, (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_SAVE, gtk.RESPONSE_OK))
dialog.set_default_response(gtk.RESPONSE_OK)
filter = gtk.FileFilter()
filter.set_name("eegpy EventTable")
filter.add_pattern("*.evt")
dialog.add_filter(filter)
filter = gtk.FileFilter()
filter.set_name("All files")
filter.add_pattern("*")
dialog.add_filter(filter)
response = dialog.run()
if response == gtk.RESPONSE_OK:
fn = dialog.get_filename()
print fn, 'selected'
dialog.destroy()
self._fn = fn
#Now save...
self._et.save(self._fn)
lbtext = ""
if len(fn)>40:
lbtext = "..."+fn[-37:]
self.lb_fn.set_text(lbtext)
#fh.close()
else:# response == gtk.RESPONSE_CANCEL:
dialog.destroy()
print 'Closed, no files selected'
pass
def set_filename(self,fn):
print fn, "selected for opening"
#success = False
try:
if not os.path.exists(fn):
raise ValueError("File doesn't exist")
self._et = EventTable(fn)
if len(self._et.keys())==0:
print self._et.keys()
raise ValueError("EventTable empty!")
self._fn = fn
except ValueError, e:
print "Error opening EventTable", e
self._et=None
self._fn=None
return False
lbtext = ""
if len(fn)>40:
lbtext = "..."+fn[-37:]
self.lb_fn.set_text(lbtext)
self.setup_keylist()
def setup_keylist(self):
#if self._tv!=None:
# try:
# self._keylist.hide()
# self._keylist.destroy()
# except Exception,e:
# print "Cannot destroy keylist"
#TODO: Real functionalityself.tvsw_keys = gtk.ScrolledWindow()
keys = self._et.keys()
keys.sort()
self.tree_keys.clear()
for k in keys:
iter = self.tree_keys.append(None)
self.tree_keys.set(iter, 0, k)
self.tree_keys.set_sort_column_id(0,gtk.SORT_ASCENDING)
self.show_all()
def setup_triggerlist(self, key):
self.tree_tr.clear()
for tr in self._et[key]:
#print tr
iter = self.tree_tr.append(None)
self.tree_tr.set(iter, 0, int(tr))
self.tree_tr.set_sort_column_id(0,gtk.SORT_ASCENDING)
def key_selected(self,treeselection,*args):
#print tv, path, col, args, self.tree_keys.get(self.tree_keys.get_iter(path),0)[0]
self.tv_tr.get_selection().unselect_all()
#self.tree_tr.clear()
paths = treeselection.get_selected_rows()[1]
if len(paths)>0:
iter = self.tree_keys.get_iter(paths[0])
key = self.tree_keys.get(iter,0)[0]
self.setup_triggerlist(key)
def cb_add_trigger_type(self,event,data):
dialog_label = gtk.Dialog("Choose name...", None, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, (gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT, gtk.STOCK_OK, gtk.RESPONSE_OK))
entry1 = gtk.Entry()
entry1.set_text("Trigger")
dialog_label.vbox.pack_start(entry1)
entry1.show()
response = dialog_label.run()
print response
if response == gtk.RESPONSE_OK:
trig_name = entry1.get_text()
print trig_name
else:
print "Adding trigger-type aborted by user."
dialog_label.destroy()
return False
dialog_label.destroy()
self.add_trigger_type(trig_name, [])
def cb_add_trigger(self,event,data):
dialog_label = gtk.Dialog("Add trigger...", None, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, (gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT, gtk.STOCK_OK, gtk.RESPONSE_OK))
dialog_label.vbox.pack_start(gtk.Label("Timepoint:"))
sb_time = gtk.SpinButton(gtk.Adjustment(0,0,100000000,1,1000))
dialog_label.vbox.pack_start(sb_time)
dialog_label.vbox.show_all()
response = dialog_label.run()
print response
if response == gtk.RESPONSE_OK:
time = sb_time.get_value()
print time
else:
print "Adding trigger aborted by user."
dialog_label.destroy()
return False
dialog_label.destroy()
self.add_trigger(time)
def add_trigger_type(self,key,ts=[]):
if not self._et.has_key(key):
self._et.add_trigger_type(key, ts)
self.setup_keylist()
self.tree_tr.clear()
def add_trigger(self,time):
#find out key
path = self.tv_keys.get_selection().get_selected_rows()[1][0]
iter = self.tree_keys.get_iter(path)
k = self.tree_keys.get(iter,0)[0]
if self._et.has_key(k):
self._et.add_trigger(k, time)
self.setup_triggerlist(k)
def tr_drag_get(self, treeview, context, selection, target_id, etime):
pathlist = treeview.get_selection().get_selected_rows()[1]
model = treeview.get_model()
iterlist = [model.get_iter(row) for row in pathlist]
datalist = [model.get(iter,0)[0] for iter in iterlist]
#print datalist
selection.set(selection.target,8,pickle.dumps(datalist))
#print "Drag_get: ", treeview, context, selection, target_id, etime
def tr_drag_received(self, treeview, context, x, y, selection, info, etime):
#print pickle.loads(selection.data)
datalist = pickle.loads(selection.data)
self.add_trigger(datalist[0])
#print "Drag_received:", treeview, context, x, y, selection, info, etime
def cb_key_pressed(self, widget, event, data=None):
keyname = gtk.gdk.keyval_name(event.keyval)
#print "Key %s (%d) was pressed in widget %s" % (keyname, event.keyval, str(widget))
if keyname == "Delete":
#find out key
path = self.tv_keys.get_selection().get_selected_rows()[1][0]
iter = self.tree_keys.get_iter(path)
k = self.tree_keys.get(iter,0)[0]
if widget==self.tv_keys:
self._et.remove(k)
self.setup_keylist()
self.tv_keys.get_selection().unselect_all()
self.tree_tr.clear()
if widget==self.tv_tr:
pathlist = self.tv_tr.get_selection().get_selected_rows()[1]
iterlist = [self.tree_tr.get_iter(row) for row in pathlist]
datalist = [self.tree_tr.get(iter,0)[0] for iter in iterlist]
for tr in datalist:
self._et.remove(k,tr)
self.setup_triggerlist(k)
class EventTableEditorWin(EegpyBaseWin):
programName = "eegpy: Frequency-Filtering"
# Konstruktor
def __init__(self):
EegpyBaseWin.__init__(self)
self.inner_pane.set_position(300)
self.em1 = EventManager("EventTable 1")
self.em1.tv_tr.get_selection().connect("changed",self.cb_plot_marks)#, "blue")
self.em2 = EventManager("EventTable 2")
self.em2.tv_tr.get_selection().connect("changed",self.cb_plot_marks)#, "red")
self.pane_edit = gtk.HPaned()
self.upper_hbox.pack_start(self.pane_edit)
self.pane_edit.add1(self.em1)
self.pane_edit.pack2(self.em2,False)
self.pane_edit.set_position(self.get_size()[0]/2)
#self.setupOptions()
self.show_all()
#self.setupGUI()
def setupGUI(self):
EegpyBaseWin.setupGUI(self)
def cb_plot_marks(self, treeselection, *args):
#print "Color", color
self.a.cla()
pathlist = self.em1.tv_tr.get_selection().get_selected_rows()[1]
iterlist = [self.em1.tree_tr.get_iter(row) for row in pathlist]
datalist1 = [self.em1.tree_tr.get(iter,0)[0] for iter in iterlist]
pathlist = self.em2.tv_tr.get_selection().get_selected_rows()[1]
iterlist = [self.em2.tree_tr.get_iter(row) for row in pathlist]
datalist2 = [self.em2.tree_tr.get(iter,0)[0] for iter in iterlist]
#print datalist1, datalist2
for i in datalist1:
# print i,
self.a.axvline(i, lw=1, color="blue", ymin=0.5, ymax=1)
#self.a.plot(datalist1,numpy.zeros(len(datalist1)),"bD")
#self.a.plot(datalist2,numpy.ones(len(datalist2)),"rD")
#print ""
for i in datalist2:
# print i,
self.a.axvline(i, lw=1, color="red", ymin=0, ymax=0.5)
#print ""
# if len(datalist1) == 1:
# self.a.set_xlim(datalist1[0]-1000,datalist1[0]+1000)
# elif len(datalist2)==1:
# self.a.set_xlim(datalist2[0]-1000,datalist2[0]+1000)
# else:
# self.a.autoscale_view()
# elif:
# xlim0 = max(min(datalist1),min(datalist2))-500
# xlim1 = min(max(datalist1),max(datalist2))+500
# if xlim1<xlim0:
# xlim0 = min(min(datalist1),min(datalist2))-500
# xlim1 = max(max(datalist1),max(datalist2))+500
# self.a.set_xlim(xlim0,xlim1)
#self.a.set_xlim(numpy.array(datalist1+datalist2).min()-1000,numpy.array(datalist1+datalist2).max()+1000)
self.a.set_ylim(0,1)
self.a.set_yticks([])
self.canvas.draw()
def main():
gtk.main()
return 0
if __name__ == "__main__":
etew = EventTableEditorWin()
main() | nilq/baby-python | python |
#!/usr/bin/env python3
"""
Prepares the test environment prior to starting hyperglass.
"""
import os
import glob
import shutil
from logzero import logger
working_directory = os.path.dirname(os.path.abspath(__file__))
parent_directory = os.path.dirname(working_directory)
def ci_copy_config():
"""Copies test configuration files to usable config files"""
logger.info("Migrating test config files...")
config_dir = os.path.join(parent_directory, "hyperglass/configuration/")
test_files = glob.iglob(os.path.join(working_directory, "*.toml"))
config_files = glob.iglob(os.path.join(config_dir, "*.toml"))
logger.debug(config_dir)
logger.debug(working_directory)
logger.debug(parent_directory)
status = False
for file in config_files:
if os.path.exists(file):
logger.debug(f"{file} already exists")
os.remove(file)
logger.info(f"Deleted {file}")
for file in test_files:
try:
shutil.copy(file, config_dir)
logger.debug(f"Copied {file}")
logger.debug(os.listdir(config_dir))
logger.info("Successfully migrated test config files")
status = True
except:
logger.error(f"Failed to migrate {file}")
raise
return status
if __name__ == "__main__":
ci_copy_config()
| nilq/baby-python | python |
# Version: @VERSIONEER-VERSION@
"""The Versioneer - like a rocketeer, but for versions.
@README@
"""
# pylint:disable=invalid-name,import-outside-toplevel,missing-function-docstring
# pylint:disable=missing-class-docstring,too-many-branches,too-many-statements
# pylint:disable=raise-missing-from,too-many-lines,too-many-locals,import-error
# pylint:disable=too-few-public-methods,redefined-outer-name,consider-using-with
# pylint:disable=attribute-defined-outside-init,too-many-arguments
import configparser
import errno
import json
import os
import re
import subprocess
import sys
from typing import Callable, Dict
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
my_path = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(my_path)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(my_path), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise OSError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.ConfigParser()
with open(setup_cfg, "r") as cfg_file:
parser.read_file(cfg_file)
VCS = parser.get("versioneer", "VCS") # mandatory
# Dict-like interface for non-mandatory entries
section = parser["versioneer"]
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = section.get("style", "")
cfg.versionfile_source = section.get("versionfile_source")
cfg.versionfile_build = section.get("versionfile_build")
cfg.tag_prefix = section.get("tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = section.get("parentdir_prefix")
cfg.verbose = section.get("verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY: Dict[str, str] = {}
HANDLERS: Dict[str, Dict[str, Callable]] = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
HANDLERS.setdefault(vcs, {})[method] = f
return f
return decorate
| nilq/baby-python | python |
from dj_rest_auth.serializers import PasswordResetSerializer
from django.conf import settings
class PasswordResetSerializerFrontendHost(PasswordResetSerializer):
"""
Serializer for requesting a password reset e-mail.
"""
def save(self):
if "allauth" in settings.INSTALLED_APPS:
from allauth.account.forms import default_token_generator
else:
from django.contrib.auth.tokens import default_token_generator
request = self.context.get("request")
# Set some values to trigger the send_email method.
opts = {
"use_https": request.is_secure(),
"from_email": getattr(settings, "DEFAULT_FROM_EMAIL"),
"request": None, # None triggers to use the host from site object
"token_generator": default_token_generator,
}
opts.update(self.get_email_options())
self.reset_form.save(**opts)
| nilq/baby-python | python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#2018-05-29 08-49
# Standard Modules
import logging
# Extra Modules
dependencies_missing = False
try:
import teradata
except ImportError:
dependencies_missing = True
from metasploit import module, login_scanner
# Metasploit Metadata
metadata = {
'name': 'Teradata ODBC Login Scanner Module',
'description': '''
Login scanner module for ODBC connections to Teradata databases.
Port specification (TCP 1025 by default) is not necessary for ODBC connections.
Blank passwords are not supported by ODBC connections.
Requires ODBC driver and Python Teradata module.
''',
'authors': [
'Ted Raffle (actuated)'
],
'date': '2018-03-30',
'license': 'MSF_LICENSE',
'references': [
{'type': 'url', 'ref': 'https://developer.teradata.com/tools/reference/teradata-python-module'},
{'type': 'url', 'ref': 'https://downloads.teradata.com/download/connectivity/odbc-driver/linux'}
],
'type': 'single_host_login_scanner',
'options': {
'rhost': {'type': 'address', 'description': 'Host to target', 'required': True},
'rport': {'type': 'port', 'description': 'Port to target, ignored by the ODBC driver', 'required': True, 'default': 1025},
'userpass': {'type': 'string', 'description': 'A list of username/password combinations to try', 'required': False},
'sleep_interval': {'type': 'float', 'description': 'Time in seconds to wait between login attempts', 'required': False}
},
'service_name': 'teradata',
'notes': {
'AKA': ['Teradata ODBC Login Scanner']
}
}
def valid_login(udaExec, host, user, password):
try:
udaExec.connect(method="odbc", system=host, username=user, password=password)
except teradata.api.Error as e:
return False
else:
return True
def run(args):
if dependencies_missing:
module.log('Python Teradata module missing, cannot continue', level=error)
return
# Define UdaExec ODBC connection "application" globally, must be before LogHandler
udaExec = teradata.UdaExec(appName="Auth", version="1.0", logConsole=False, configureLogging=False)
module.LogHandler.setup(msg_prefix='{}:{} - '.format(args['rhost'], 1025))
scanner = login_scanner.make_scanner(lambda host, port, username, password: valid_login(udaExec, host, username, password))
scanner(args)
if __name__ == '__main__':
module.run(metadata, run)
| nilq/baby-python | python |
import itertools
import discord
from discord.ext import commands
from bot.constants import Colours
with open('bot/resources/evergreen/python_facts.txt') as file:
FACTS = itertools.cycle(list(file))
COLORS = itertools.cycle([Colours.python_blue, Colours.python_yellow])
class PythonFacts(commands.Cog):
"""Sends a random fun fact about Python."""
def __init__(self, bot: commands.Bot) -> None:
self.bot = bot
@commands.command(name='pythonfact', aliases=['pyfact'])
async def get_python_fact(self, ctx: commands.Context) -> None:
"""Sends a Random fun fact about Python."""
embed = discord.Embed(title='Python Facts',
description=next(FACTS),
colour=next(COLORS))
embed.add_field(name='Suggestions',
value="Suggest more facts [here!](https://github.com/python-discord/meta/discussions/93)")
await ctx.send(embed=embed)
def setup(bot: commands.Bot) -> None:
"""Load PythonFacts Cog."""
bot.add_cog(PythonFacts(bot))
| nilq/baby-python | python |
from django.core.management.base import BaseCommand, CommandError
from ghu_main.email import EmailAPI
class Command(BaseCommand):
"""This command refers to the API in email.py for sending emails in-app"""
def __init__(self):
super(Command, self).__init__()
def add_arguments(self, parser):
parser.add_argument('subject', type=str)
parser.add_argument('body', type=str)
parser.add_argument('recipients', type=str)
def handle(self, *args, **options):
EmailAPI.send_email(options['subject'], options['body'], options['recipients'].split(','))
| nilq/baby-python | python |
#!/usr/bin/python
#
# Copyright (c) 2016 Juniper Networks, Inc. All rights reserved.
#
import os
import sys
import argparse
import ConfigParser
import requests
from netaddr.ip import IPNetwork
from vnc_api.vnc_api import *
class ProvisionVgwInterface(object):
def __init__(self, args_str=None):
self._args = None
if not args_str:
args_str = ' '.join(sys.argv[1:])
self._parse_args(args_str)
headers = {'content-type': 'application/json'}
url = "http://localhost:9091/gateway"
if self._args.oper == "create":
print "Creating virtual-gateway ..."
with open("/proc/sys/net/ipv4/ip_forward", "w") as file:
file.write("1")
vif_command = '/usr/bin/vif --create ' + self._args.interface
vif_command += ' --mac 00:00:5e:00:01:00'
self.execute_command(vif_command)
ifconfig_command = 'ifconfig ' + self._args.interface + ' up'
self.execute_command(ifconfig_command)
for subnet in self._args.subnets:
route_command = 'route add -net ' + subnet
route_command += ' dev ' + self._args.interface
self.execute_command(route_command)
subnet_list = []
first = True
subnets_str = "\"subnets\":["
for subnet in self._args.subnets:
net = IPNetwork(subnet)
if not first:
subnets_str += ","
first = False
subnets_str += "{\"ip-address\":\"%s\", \"prefix-len\":%d}" % (str(net.ip), net.prefixlen)
subnets_str += "]"
route_list = []
first = True
routes_str = "\"routes\":["
for subnet in self._args.routes:
net = IPNetwork(subnet)
if not first:
routes_str += ","
first = False
routes_str += "{\"ip-address\":\"%s\", \"prefix-len\":%d}" % (str(net.ip), net.prefixlen)
routes_str += "]"
gw_str = "[{\"interface\":\"%s\", \"routing-instance\":\"%s\", %s, %s}]" %(self._args.interface, self._args.vrf, subnets_str, routes_str)
try:
r = requests.post(url, data=gw_str, headers=headers)
except ConnectionError:
print "Error: Error adding VGW interface"
return
if r.status_code != 200:
print "Failed to Add VGW interface"
return
print "Done creating virtual-gateway..."
else:
print "Deleting virtual-gateway ..."
gw_str = "[{\"interface\":\"%s\"}]" % (self._args.interface)
try:
r = requests.delete(url, data=gw_str, headers=headers)
except ConnectionError:
print "Error: Error deleting VGW interface"
return
if r.status_code != 200:
print "Failed to Delete VGW interface"
return
for subnet in self._args.subnets:
route_command = 'route del -net ' + subnet
route_command += ' dev ' + self._args.interface
self.execute_command(route_command)
ifconfig_command = 'ifconfig ' + self._args.interface + ' down'
self.execute_command(ifconfig_command)
interface_index = self.get_interface_index(self._args.interface)
if interface_index != -1:
vif_command = '/usr/bin/vif --delete ' + interface_index
self.execute_command(vif_command)
del_cmd = 'ip link del ' + self._args.interface
self.execute_command(del_cmd)
print "Done deleting virtual-gateway..."
# end __init__
def execute_command(self, cmd):
print cmd
out = os.system(cmd)
if out != 0:
print "Error executing : " + cmd
#end execute_command
def get_interface_index(self, interface):
import subprocess
proc = subprocess.Popen(["/usr/bin/vif", "--list"], stdout=subprocess.PIPE)
vif_list, err = proc.communicate()
vif_match = 'OS: ' + interface
lines = [line for line in vif_list.split('\n') if line.endswith(vif_match)]
for line in lines:
lineitems = line.split(' ')
first = lineitems[0]
index = first.split('/')
return index[1]
return -1
#end get_interface_index
def _parse_args(self, args_str):
'''
Eg. python provision_vgw_interface.py
--oper <create | delete>
--interface vgw1
--subnets 1.2.3.0/24 7.8.9.0/24
--routes 8.8.8.0/24 9.9.9.0/24
--vrf default-domain:admin:vn1:vn1
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
args, remaining_argv = conf_parser.parse_known_args(args_str.split())
defaults = {
'oper': 'create',
'interface': '',
'subnets': [],
'routes': [],
'vrf': '',
}
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.set_defaults(**defaults)
parser.add_argument(
"--oper", help="Operation : create / delete")
parser.add_argument(
"--interface", help="Name of the gateway interface")
parser.add_argument(
"--subnets", nargs='+',
help="List of subnets in virtual-network configured for gateway (Ex: 1.1.1.0/24 2.2.2.0/24)")
parser.add_argument(
"--routes", nargs='+',
help="List of public routes injected into virtual-network routing-instance (Ex: 8.8.8.0/24 9.9.9.0/24)")
parser.add_argument(
"--vrf",
help="Routing instance for virtual-network configured for gateway (as FQDN)")
self._args = parser.parse_args(remaining_argv)
if not self._args.interface:
parser.error('Missing argument interface')
if not self._args.subnets:
parser.error('Missing argument subnets')
if self._args.oper == "create":
if not self._args.routes:
parser.error('Missing argument routes')
if not self._args.vrf:
parser.error('Missing argument vrf')
# end _parse_args
# end class ProvisionVgwInterface
def main(args_str=None):
ProvisionVgwInterface(args_str)
# end main
if __name__ == "__main__":
main()
| nilq/baby-python | python |
#!/usr/bin/env python
def plain_merge(array_a: list, array_b: list) -> list:
pointer_a, pointer_b = 0, 0
length_a, length_b = len(array_a), len(array_b)
result = []
while pointer_a < length_a and pointer_b < length_b:
if array_a[pointer_a] <= array_b[pointer_b]:
result.append(array_a[pointer_a])
pointer_a += 1
else:
result.append(array_b[pointer_b])
pointer_b += 1
if pointer_a != length_a:
result += array_a[pointer_a:]
elif pointer_b != length_b:
result += array_b[pointer_b:]
return result
| nilq/baby-python | python |
class RuleWriterMount(type):
def __init__(cls, name, bases, attrs):
if not hasattr(cls, 'rule_writers'):
cls.rule_writers = {}
else:
cls.register_rule_writer(cls)
def register_rule_writer(cls, rule_writer):
instance = rule_writer()
cls.rule_writers[instance.rule_name] = instance
class RuleWriter(metaclass=RuleWriterMount):
pass | nilq/baby-python | python |
#!/usr/bin/env python
import os
import re
import shutil
import subprocess
import sys
toplevel = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
re_setup = re.compile(r'setup\(')
re_version = re.compile(r'(?<=\bversion=[\'"])([0-9a-zA-Z._+-]+)')
def update_version(gitversion, foundversion):
"""Chooses version string to write to setup.py.
"""
return gitversion
def make_pkg():
# Get version from git describe
version = subprocess.check_output(['git', 'describe',
'--always', '--tags'],
cwd=toplevel).strip()
dest = os.path.join(toplevel, 'dist')
if not os.path.exists(dest):
os.mkdir(dest)
#for project in ('reprozip', 'reprounzip', 'reprounzip-docker',
# 'reprounzip-vagrant', 'reprounzip-vistrails'):
project = 'reprozip'
pdir = os.path.join(toplevel, project)
setup_py = os.path.join(pdir, 'setup.py')
# Update setup.py file
with open(setup_py, 'rb') as fp:
lines = fp.readlines()
i = 0
setup_found = False
while i < len(lines):
line = lines[i]
if not setup_found and re_setup.search(line):
setup_found = True
if setup_found:
m = re_version.search(line)
if m is not None:
version = update_version(version, m.group(1))
lines[i] = re_version.sub(version, line)
break
i += 1
with open(setup_py, 'wb') as fp:
for line in lines:
fp.write(line)
# Run sdist
subprocess.check_call([sys.executable, setup_py, 'sdist'])
# Run bdist_wheel
try:
__import__('wheel')
except ImportError:
pass
else:
subprocess.check_call([sys.executable, setup_py, 'bdist_wheel'])
# Move output to top-level dist/
for f in os.listdir(os.path.join(pdir, 'dist')):
shutil.copyfile(os.path.join(pdir, 'dist', f),
os.path.join(dest, f))
if __name__ == '__main__':
make_pkg()
| nilq/baby-python | python |
"""
Merge the tools
Consider the following:
A string, s, of length n.
An integer, k, where k is a factor of n.
We can split s into n/k subsegments where each subsegment, t(i), consists of a contiguous block of k characters in s.
Then, use each t(i) to create string u(i) such that:
The characters in u(i) are a subsequence of the characters in t(i).
Any repeat occurrence of a character is removed from the string such that each character in u(i) occurs exactly once.
In other words, if the character at some index j in t(i) occurs at a previous index < j in t(i), then do not include the
character in string u(i).
Given s and k, print n/k lines where each line i denotes string u(i).
Input Format
The first line contains a single string denoting s.
The second line contains an integer, k, denoting the length of each subsegment.
Output Format
Print n/k lines where each line i contains string u(i).
Sample Input
AABCAAADA
3
Sample Output
AB
CA
AD
"""
import textwrap
def merge_the_tools(string, k):
for i in textwrap.wrap(string, k):
d = dict()
print(''.join([ d.setdefault(c, c) for c in i if c not in d ]))
if __name__ == '__main__':
string, k = input(), int(input())
merge_the_tools(string, k)
| nilq/baby-python | python |
import string
def is_pangram(sentence: str) -> bool:
"""
Determine if a given string contains all the characters from a to z.
sentence -- Any string.
returns -- true/false for if string contains all letters from a to z.
"""
letters = set(string.ascii_lowercase)
return letters.issubset(sentence.lower())
| nilq/baby-python | python |
import numpy as np
from scipy.linalg import solve
from js.geometry.quaternion import Quaternion
for path in [ "../data/gazebo_winter/", "../data/mountain_plain/", "../data/gazebo_summer/" ]:
#for path in [ "../data/stairs/", "../data/apartment/", "../data/wood_summer/" ]:
with open(path+"pose_scanner_leica.csv") as f:
f.readline()
x = np.loadtxt(f,delimiter=",")
for i in range(x.shape[0]):
T_wc = np.reshape(x[i,2:],(4,4))
R_wc = T_wc[:3,:3]
q_wc = Quaternion()
q_wc.fromRot3(R_wc)
t_wc = T_wc[:3,3]
print t_wc, q_wc
with open(path+"pose_{}.csv".format(i),"w") as fout:
fout.write("q_w q_x q_y q_z t_x t_y t_z\n")
fout.write("{} {} {} {} {} {} {}".format(q_wc.q[0],\
q_wc.q[1],q_wc.q[2],q_wc.q[3],t_wc[0],t_wc[1],t_wc[2]))
| nilq/baby-python | python |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.template.defaultfilters import title
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from horizon.utils.filters import replace_underscores
from trove_dashboard import api
from django.core import urlresolvers
LOG = logging.getLogger(__name__)
ACTIVE_STATES = ("COMPLETED", "FAILED")
def date(string):
"""Strip off the T from the datetime string"""
return string.replace('T', ' ')
class LaunchLink(tables.LinkAction):
name = "create"
verbose_name = _("Create Backup")
url = "horizon:project:database_backups:create"
classes = ("btn-launch", "ajax-modal")
def allowed(self, request, datum):
return True # The action should always be displayed
class RestoreLink(tables.LinkAction):
name = "restore"
verbose_name = _("Restore Backup")
url = "horizon:project:databases:launch"
classes = ("btn-launch", "ajax-modal")
def get_link_url(self, datam):
url = urlresolvers.reverse(self.url)
return url + '?backup=%s' % datam.id
class DeleteBackup(tables.BatchAction):
name = "delete"
action_present = _("Delete")
action_past = _("Scheduled deletion of")
data_type_singular = _("Backup")
data_type_plural = _("Backups")
classes = ('btn-danger', 'btn-terminate')
def allowed(self, request, instance=None):
return True
def action(self, request, obj_id):
api.trove.backup_delete(request, obj_id)
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, backup_id):
backup = api.trove.backup_get(request, backup_id)
try:
backup.instance = api.trove.instance_get(request,
backup.instance_id)
except:
pass
return backup
def db_link(obj):
if not hasattr(obj, 'instance'):
return
if hasattr(obj.instance, 'name'):
return reverse(
'horizon:project:databases:detail',
kwargs={'instance_id': obj.instance_id})
def db_name(obj):
if hasattr(obj.instance, 'name'):
return obj.instance.name
return obj.instance_id
class BackupsTable(tables.DataTable):
STATUS_CHOICES = (
("BUILDING", None),
("COMPLETED", True),
("DELETE_FAILED", False),
("FAILED", False),
("NEW", None),
("SAVING", None),
)
name = tables.Column("name",
link=("horizon:project:database_backups:detail"),
verbose_name=_("Name"))
created = tables.Column("created", verbose_name=_("Created At"),
filters=[date])
location = tables.Column(lambda obj: _("Download"),
link=lambda obj: obj.locationRef,
verbose_name=_("Backup File"))
instance = tables.Column(db_name, link=db_link,
verbose_name=_("Database"))
status = tables.Column("status",
filters=(title, replace_underscores),
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES)
class Meta:
name = "backups"
verbose_name = _("Backups")
status_columns = ["status"]
row_class = UpdateRow
table_actions = (LaunchLink, DeleteBackup)
row_actions = (RestoreLink, DeleteBackup)
| nilq/baby-python | python |
nome = str(input('Digite seu nome: ')).strip().upper()
print(f'Seu nome tem SILVA: ','SILVA' in nome) | nilq/baby-python | python |
from .clip import *
from .esresnet import *
from .audioclip import AudioCLIP
from .audioclip_finetune import AudioCLIPFinetune | nilq/baby-python | python |
loan_amount = eval(input('Loan Amount: '))
r = eval(input('Annual Interest Rate: '))
n = eval(input('Loan Duration in Months: '))
payment = (r*loan_amount)/(1-((1+r)**-n))
print('$', payment)
| nilq/baby-python | python |
from .settings import *
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myapp',
]
# Uncomment if you want to use a mysql/mariadb database. Don't forget to change docker-compose.yml!
# DATABASES = {
# 'default': {
# 'NAME': 'mydjango',
# 'ENGINE': 'django.db.backends.mysql',
# 'USER': 'root',
# 'PASSWORD': 'root',
# 'HOST': 'db',
# 'PORT': 3306,
# 'OPTIONS': {
# 'init_command': "SET sql_mode='STRICT_TRANS_TABLES'",
# },
# 'CONN_MAX_AGE': 550,
# }
# }
# ... add more settings, will override default settings.py
| nilq/baby-python | python |
# coding:utf-8
""" 以tushare为源的市场数据表
"""
import loguru
import random
import time
import datetime
import tushare as ts
import pandas as pd
import matplotlib.pyplot as plt
from .sqldata import SqlDayManager, SqlBaseManager
import sys,os
sys.path.append(os.path.abspath("../timedata"))
import settings
from loguru import logger
class StockBasics(SqlBaseManager):
""" 获取此刻有意义的股票列表
依据 tushare的get_stock_basic
同时去掉 暂停上市的,终止上市的,风险警示
"""
def stock_basic_fun(self):
pro = ts.pro_api()
data = pro.stock_basic()
if type(data) != pd.DataFrame:
logger.info('从tushare获取stock_basic数据更新失败')
return None
if data.empty:
logger.info('数据为空,从tushare获取stock_basic数据更新失败')
return None
return data
def __init__(self):
SqlBaseManager.__init__(self)
self.table_name = 'stock_basics'
self.data_fun = self.stock_basic_fun
# class StockMeaning(SqlBaseManager):
# """日常有用的stock,运行正常的stock
# """
# def stock_meaning_fun(self, THRESHOLD=50):
# sb = StockBasics()
# sb_data = sb.read()
# filter_stock = []
# # 过滤规则
# # 近2个月有交易,最后一个交易日价格在50以下
# start_day = datetime.datetime.now() - datetime.timedelta(days=14)
# start_day_str = start_day.strftime('%Y-%m-%d')
# hd = HistData()
# for code in sb_data.code:
# temp = hd.read(code, start=start_day_str)
# if not temp.empty:
# if 5 < temp.iloc[0]['high'] < THRESHOLD:
# filter_stock.append(code)
# print code
# result = sb_data[sb_data.code.isin(filter_stock)]
# return result
# def __init__(self):
# SqlBaseManager.__init__(self)
# self.table_name = 'stock_meaning'
# self.data_fun = self.stock_meaning_fun
class HistData(SqlDayManager):
""" 以tushare为数据源的历史天的数据
数据源是Hist_DATA
"""
def __init__(self):
SqlDayManager.__init__(self)
self.table_name = 'hist_data'
pro = ts.pro_api()
self.get_data_fun = pro.daily
def add_all(self):
"""遍历所有code,把所有数据新增
"""
sb = StockBasics()
AllStocks = sb.read()
no_data_code = [] # 没有数据,或者没有更新数据的code
for code in AllStocks.ts_code:
logger.debug(u"add %s" % code)
is_success = self.add(code)
if not is_success:
no_data_code.append(code)
sleeptime=random.randint(0, 15)
time.sleep(sleeptime)
return no_data_code
# def plot_code_box(self, code, start='2015-11-01',end=None,):
# """画出code的时间蜡烛图
# Args:
# code: str| 代码code
# flag: str or list of str| code返回数据中指定的列名
# start_day: str|样式'2017-01-01'|开始时间
# end_day: str|样式'2017-01-01'|结束时间
# eg:
# dm = DataManager()
# dm.plot_code_line('300254')
# """
# data = self.read(code, start, end)
# data.get(['open','high','close','low']).T.plot.box()
# plt.show()
# class IndustryClassified( SqlBaseManager):
# """工业分类的类
# """
# def __init__(self):
# SqlBaseManager.__init__(self)
# self.table_name = 'industry_classified'
# self.data_fun = ts.get_industry_classified
| nilq/baby-python | python |
from localstack.dashboard import infra
from localstack.config import USE_SSL
def test_infra_graph_generation():
try:
graph = infra.get_graph()
except Exception as e:
if USE_SSL:
print('TODO: the Web UI in combination with USE_SSL=true is currently broken.')
return
assert 'nodes' in graph
assert 'edges' in graph
# TODO add more tests/assertions
| nilq/baby-python | python |
from typing import Optional
from ..helpers.const import *
class ConfigData:
name: str
host: str
port: int
username: Optional[str]
password: Optional[str]
password_clear_text: Optional[str]
unit: int
update_entities_interval: int
update_api_interval: int
monitored_devices: list
monitored_interfaces: list
device_trackers: list
log_level: str
log_incoming_messages: bool
consider_away_interval: int
def __init__(self):
self.name = DEFAULT_NAME
self.host = ""
self.port = 0
self.username = None
self.password = None
self.password_clear_text = None
self.unit = ATTR_BYTE
self.update_entities_interval = DEFAULT_UPDATE_ENTITIES_INTERVAL
self.update_api_interval = DEFAULT_UPDATE_API_INTERVAL
self.monitored_devices = []
self.monitored_interfaces = []
self.device_trackers = []
self.log_level = ""
self.log_incoming_messages = False
self.store_debug_files = False
self.consider_away_interval = DEFAULT_CONSIDER_AWAY_INTERVAL
@property
def unit_size(self):
return ALLOWED_UNITS[self.unit]
@property
def has_credentials(self):
has_username = self.username and len(self.username) > 0
has_password = self.password_clear_text and len(self.password_clear_text) > 0
has_credentials = has_username or has_password
return has_credentials
@property
def url(self):
url = API_URL_TEMPLATE.format(self.host)
return url
def __repr__(self):
obj = {
CONF_NAME: self.name,
CONF_HOST: self.host,
CONF_USERNAME: self.username,
CONF_PASSWORD: self.password,
CONF_UNIT: self.unit,
CONF_UPDATE_API_INTERVAL: self.update_api_interval,
CONF_UPDATE_ENTITIES_INTERVAL: self.update_entities_interval,
CONF_MONITORED_DEVICES: self.monitored_devices,
CONF_MONITORED_INTERFACES: self.monitored_interfaces,
CONF_TRACK_DEVICES: self.device_trackers,
CONF_LOG_LEVEL: self.log_level,
CONF_LOG_INCOMING_MESSAGES: self.log_incoming_messages,
CONF_CONSIDER_AWAY_INTERVAL: self.consider_away_interval,
}
to_string = f"{obj}"
return to_string
| nilq/baby-python | python |
# Generated by Django 3.2.8 on 2021-11-30 17:55
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.CharField(choices=[(0, 'Active'), (1, 'Inactive'), (2, 'Archived')], max_length=1, verbose_name='Activate Account')),
('role', models.CharField(choices=[('shipping', 'Shipping Address'), ('billing', 'Billing Address'), ('both', 'Billing/Shipping')], max_length=10, verbose_name='Role')),
('created_on', models.DateField(auto_now_add=True, verbose_name='Created Date')),
('last_modified', models.DateTimeField(auto_now_add=True, verbose_name='Last Modified Date')),
('address_1', models.CharField(max_length=50, verbose_name='Address 1')),
('address_2', models.CharField(max_length=50, verbose_name='Address 2')),
('city', models.CharField(max_length=50, verbose_name='City')),
('state', models.CharField(choices=[('AL', 'Alabama'), ('AK', 'Alaska'), ('AS', 'American Samoa'), ('AZ', 'Arizona'), ('AR', 'Arkansas'), ('CA', 'California'), ('CO', 'Colorado'), ('CT', 'Connecticut'), ('DE', 'Delaware'), ('DC', 'District of Columbia'), ('FL', 'Florida'), ('GA', 'Georgia'), ('GU', 'Guam'), ('HI', 'Hawaii'), ('ID', 'Idaho'), ('IL', 'Illinois'), ('IN', 'Indiana'), ('IA', 'Iowa'), ('KS', 'Kansas'), ('KY', 'Kentucky'), ('LA', 'Louisiana'), ('ME', 'Maine'), ('MD', 'Maryland'), ('MA', 'Massachusetts'), ('MI', 'Michigan'), ('MN', 'Minnesota'), ('MS', 'Mississippi'), ('MO', 'Missouri'), ('MT', 'Montana'), ('NE', 'Nebraska'), ('NV', 'Nevada'), ('NH', 'New Hampshire'), ('NJ', 'New Jersey'), ('NM', 'New Mexico'), ('NY', 'New York'), ('NC', 'North Carolina'), ('ND', 'North Dakota'), ('MP', 'Northern Mariana Islands'), ('OH', 'Ohio'), ('OK', 'Oklahoma'), ('OR', 'Oregon'), ('PA', 'Pennsylvania'), ('PR', 'Puerto Rico'), ('RI', 'Rhode Island'), ('SC', 'South Carolina'), ('SD', 'South Dakota'), ('TN', 'Tennessee'), ('TX', 'Texas'), ('UT', 'Utah'), ('VT', 'Vermont'), ('VI', 'Virgin Islands'), ('VA', 'Virginia'), ('WA', 'Washington'), ('WV', 'West Virginia'), ('WI', 'Wisconsin'), ('WY', 'Wyoming')], max_length=50, verbose_name='State')),
('zip_code', models.CharField(max_length=50, verbose_name='Zip Code')),
('phone', models.CharField(max_length=15, validators=[django.core.validators.RegexValidator('^(\\d{10}$)', 'Please use numerical format without any spaces or special characters')], verbose_name='Phone')),
('country', models.CharField(max_length=2, verbose_name='Country')),
],
options={
'verbose_name_plural': 'Addresses',
},
),
migrations.CreateModel(
name='Contact',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='Name')),
('position', models.CharField(max_length=50, verbose_name='Position or Role')),
('description', models.TextField(verbose_name='Contact Notes')),
('phone_1', models.CharField(max_length=15, validators=[django.core.validators.RegexValidator('^(\\d{10}$)', 'Please use numerical format without any spaces or special characters')], verbose_name='Phone 1')),
('phone_2', models.CharField(max_length=15, validators=[django.core.validators.RegexValidator('^(\\d{10}$)', 'Please use numerical format without any spaces or special characters')], verbose_name='Phone 2')),
('email_1', models.EmailField(max_length=254, verbose_name='')),
('email_2', models.EmailField(max_length=254, verbose_name='')),
],
),
migrations.CreateModel(
name='Customer',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('role', models.CharField(choices=[('customer', 'Customer'), ('client', 'Client'), ('vendor', 'Vendor'), ('employee', 'Employee')], max_length=50, verbose_name='Role')),
('dba', models.CharField(max_length=50, verbose_name='dba')),
('name', models.CharField(max_length=50, verbose_name='Legal Business Entity')),
('start_date', models.DateField(verbose_name='Start Date')),
('end_date', models.DateField(blank=True, null=True, verbose_name='End Date')),
('active', models.CharField(choices=[(0, 'Active'), (1, 'Inactive'), (2, 'Archived')], max_length=1, verbose_name='Active')),
('created_date', models.DateTimeField(auto_now_add=True, verbose_name='Created Date')),
('ein', models.CharField(max_length=50, verbose_name='EIN')),
('industry', models.CharField(choices=[('agriculture', 'Agriculture'), ('arts entertainment', 'Arts & Entertainment'), ('construction', 'Construction'), ('education', 'Education'), ('energy', 'Energy'), ('food', 'Food & Hospitality'), ('finance', 'Finance and Insurance'), ('healthcare', 'Healthcare'), ('manufacturing', 'Manufacturing'), ('mining', 'Mining'), ('other', 'Other Services'), ('services', 'Professional, Scientific, and Tech Services'), ('real estate', 'Real Estate'), ('retail', 'Retail'), ('transportation', 'Transportation & Logistics'), ('utilities', 'Utilities'), ('wholesale', 'Wholesale')], max_length=100, verbose_name='Industry')),
('website', models.URLField(verbose_name='Webiste')),
('account_manager', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='customer_Account', to=settings.AUTH_USER_MODEL, verbose_name='Account Manager')),
('billing_address', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='customer_billing', to='customer.address', verbose_name='Address')),
('contact', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='customer_employee', to='customer.contact', verbose_name='Contact')),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='created_by_customer', to=settings.AUTH_USER_MODEL, verbose_name='Created by')),
('shipping_address', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='customer_location', to='customer.address', verbose_name='Address')),
],
options={
'verbose_name_plural': 'Customers',
},
),
migrations.AddField(
model_name='contact',
name='employer',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='contact_employer', to='customer.customer', verbose_name='Employer'),
),
]
| nilq/baby-python | python |
from user.models import CourseRegistration
# TODO:
# send mail with formatted relevant student test results to course adviser
# generate a list of courses registered buy a student for the current semester and session
def get_registered_courses(student, session, semester):
reg = CourseRegistration.objects.filter(student=student, session=session, semester=semester)
return reg
def get_current_registered_courses(student, semester):
reg = CourseRegistration.objects.filter(student=student, session__is_current=True, semester=semester)
return reg
| nilq/baby-python | python |
from output.models.ms_data.regex.re_l32_xsd.re_l32 import (
Regex,
Doc,
)
__all__ = [
"Regex",
"Doc",
]
| nilq/baby-python | python |
from app import app
'''
set debug=False bellow when deploying to prod
'''
app.run(host='0.0.0.0', debug=True) | nilq/baby-python | python |
#!/usr/bin/python3
# -*- mode: python -*-
"""
s3_gateway:
bottle/boto3 interface to view an s3 bucket in a web browser.
2021-02-15 slg - updated to use anonymous s3 requests,
per https://stackoverflow.com/questions/34865927/can-i-use-boto3-anonymously
2021-02-20 slg - add support for database queries to augment what's in S3
"""
import json
import logging
import mimetypes
import os
import sys
import urllib.parse
from os.path import dirname
import boto3
import botocore
import botocore.exceptions
from botocore import UNSIGNED
from botocore.client import Config
import bottle
#from botocore.exceptions import ClientError
from bottle import request, response, redirect
import db_lookup
DESCRIPTION="""
This is the testing program for the gateway that
allows S3 files to be accessed from the website.
"""
DEFAULT_BUCKET = 'digitalcorpora'
BYPASS_URL = 'https://digitalcorpora.s3.amazonaws.com/'
USE_BYPASS = True
IGNORE_FILES = ['.DS_Store', 'Icon']
# Specify files in the runtime environment
S3_TEMPLATE_FILENAME = os.path.join(dirname(__file__), "templates/s3_index.tpl")
S3_ERROR_404_FILENAME = os.path.join(dirname(__file__), "templates/error_404.tpl")
# Create the S3_INDEX bottle SimpleTemplate here, outside of the
# s3_list_prefix_v1, so that it gets read when s3_gateway.py is imported.
# This causes bottle to compile it ONCE and repeatedly serve it out
S3_INDEX = bottle.SimpleTemplate( open( S3_TEMPLATE_FILENAME ).read())
ERROR_404 = bottle.SimpleTemplate( open( S3_TEMPLATE_FILENAME ).read())
def s3_get_dirs_files(bucket_name, prefix):
"""
Returns a tuple of the s3 objects of the 'dirs' and the 'files'
Makes an unauthenticated call
:param bucket_name: bucket to read
:param prefix: prefix to examine
:return: (prefixes,keys) - a list of prefixes under `prefix`, and keys under `prefix`.
"""
s3client = boto3.client('s3', config=Config(signature_version=UNSIGNED))
paginator = s3client.get_paginator('list_objects_v2')
pages = paginator.paginate(
Bucket=bucket_name, Prefix=prefix, Delimiter='/')
dirs = []
files = []
for page in pages:
for obj in page.get('CommonPrefixes', []):
dirs.append(obj)
for obj in page.get('Contents', []):
files.append(obj)
if (not dirs) and (not files):
raise FileNotFoundError(prefix)
return (dirs, files)
def s3_to_link(obj):
"""Given a s3 object, return a link to it"""
# pylint: disable=R1705
if 'Prefix' in obj:
name = obj['Prefix'].split("/")[-2]+"/"
return request.url + urllib.parse.quote(name)
elif 'Key' in obj:
return BYPASS_URL + urllib.parse.quote(obj['Key'])
else:
raise RuntimeError("obj: "+json.dumps(obj, default=str))
def s3_list_prefix(bucket_name, prefix, auth=None):
"""The revised s3_list_prefix implementation: uses the Bottle
template system to generate HTML. Get a list of the sub-prefixes
(dirs) and the objects with this prefix (files), and then construct
the dirs[] and files[] arrays. Elements of dirs are strings (one for
each prefix). Elements of files[] are (url,name, size,sha256,sha3)
tuples.
:param bucket_name: the bucket to list
:param path: the path within the bucket (no leading /)
"""
path = '/'
paths = []
for part in prefix.split('/')[:-1]:
part += '/'
path += part
paths.append((path, part))
(s3_dirs, s3_files) = s3_get_dirs_files(bucket_name, prefix)
dirs = [obj['Prefix'].split('/')[-2]+'/' for obj in s3_dirs]
if auth is not None and s3_files:
db_lookup.annotate_s3files(auth, s3_files)
files = [{'a': s3_to_link(obj),
'basename': os.path.basename(obj['Key']),
'size': "{:,}".format(obj['Size']),
'ETag': obj['ETag'],
'sha2_256': obj.get('sha2_256','n/a'),
'sha3_256': obj.get('sha3_256','n/a') } for obj in s3_files]
return S3_INDEX.render(prefix=prefix, paths=paths, files=files, dirs=dirs, sys_version=sys.version)
def s3_app(*, bucket, quoted_prefix, auth=None):
"""
Fetching a file. Called from bottle.
:param bucket: - the bucket that we are serving from
:param quoted_prefix: - the path to display.
:param auth: - Database authenticator
"""
prefix = urllib.parse.unquote(quoted_prefix)
logging.warning("bucket=%s quoted_prefix=%s prefix=%s", bucket, quoted_prefix, prefix)
if prefix.endswith("/"):
try:
return s3_list_prefix(bucket, prefix, auth=auth)
except FileNotFoundError as e:
logging.warning("e:%s", e)
response.status = 404
return ERROR_404.render(bucket=bucket,prefix=prefix)
# If the prefix does not end with a '/' and there is object there, see if it is a prefix
try:
obj = boto3.client('s3', config=Config( signature_version=UNSIGNED)).get_object(Bucket=bucket, Key=prefix)
except botocore.exceptions.ClientError as e:
try:
return s3_list_prefix(bucket, prefix+"/", auth=auth)
except FileNotFoundError as e:
# No object and not a prefix
response.status = 404
return ERROR_404.render(bucket=bucket,prefix=prefix)
# If we are using the bypass, redirect
if USE_BYPASS:
logging.info("redirect to %s", BYPASS_URL + prefix)
redirect(BYPASS_URL + prefix)
# Otherwise download directly
try:
response.content_type = mimetypes.guess_type(prefix)[0]
except (TypeError,ValueError,KeyError) as e:
response.content_type = 'application/octet-stream'
return obj['Body']
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=DESCRIPTION)
parser.add_argument("--bucket", default=DEFAULT_BUCKET, help='which bucket to use.')
parser.add_argument('--prefix', help='specify prefix')
args = parser.parse_args()
if args.prefix:
print(s3_app(bucket=args.bucket, quoted_prefix=args.prefix))
| nilq/baby-python | python |
from torch import Tensor, _VF # noqa: F401
from torch.nn.utils.rnn import PackedSequence
import torch
import warnings
from typing import List, Optional, Tuple
class QuantizedLinear(torch.jit.ScriptModule):
__constants__ = ['scale', 'zero_point']
def __init__(self, other):
super(QuantizedLinear, self).__init__()
self.in_features = other.in_features
self.out_features = other.out_features
# Quantize weight and discard the original
self.weight, self.col_offsets, self.scale, self.zero_point = torch.fbgemm_linear_quantize_weight(
other.weight.clone(memory_format=torch.contiguous_format).float())
self.weight = torch.nn.Parameter(self.weight, requires_grad=False)
self.col_offsets = torch.nn.Parameter(self.col_offsets, requires_grad=False)
assert other.bias is not None, 'QuantizedLinear requires a bias'
self.bias = torch.nn.Parameter(other.bias.clone(memory_format=torch.contiguous_format).float(), requires_grad=False)
self.register_buffer(
'packed_tensor_ptr',
torch.fbgemm_pack_quantized_matrix(self.weight.clone(memory_format=torch.contiguous_format)))
@torch.jit.script_method
def _unpack(self):
self.packed_tensor_ptr.set_(
torch.fbgemm_pack_quantized_matrix(self.weight))
@torch.jit.script_method
def _pack(self):
self.packed_tensor_ptr.set_(
torch.zeros(torch.jit.annotate(List[int], []), dtype=torch.uint8).detach())
@torch.jit.script_method
def forward(self, input):
out = torch.fbgemm_linear_int8_weight_fp32_activation(
input.float(), self.weight, self.packed_tensor_ptr, self.col_offsets,
self.scale, self.zero_point, self.bias)
return out.to(input.dtype)
def extra_repr(self):
repr = 'in_features={in_features}, out_features={out_features}, ' \
'scale={scale}, zero_point={zero_point}'.format(**self.__dict__)
return repr
# FP16 weights
class QuantizedLinearFP16(torch.jit.ScriptModule):
def __init__(self, other):
super(QuantizedLinearFP16, self).__init__()
self.in_features = other.in_features
self.out_features = other.out_features
self.original_weight = other.weight
self.weight = torch.fbgemm_pack_gemm_matrix_fp16(
other.weight.clone(memory_format=torch.contiguous_format).float())
assert other.bias is not None, 'QuantizedLinearFP16 requires a bias'
self.bias = torch.nn.Parameter(other.bias.clone(memory_format=torch.contiguous_format).float(), requires_grad=False)
self.register_buffer('packed_weight', self.weight)
@torch.jit.script_method
def _unpack(self):
self.packed_weight.set_(
torch.fbgemm_pack_gemm_matrix_fp16(
self.original_weight))
@torch.jit.script_method
def _pack(self):
self.packed_weight.set_(
torch.zeros(torch.jit.annotate(List[int], []), dtype=torch.uint8).detach())
@torch.jit.script_method
def forward(self, input):
out = torch.fbgemm_linear_fp16_weight_fp32_activation(
input.float(), self.packed_weight, self.bias)
return out
def extra_repr(self):
repr = 'in_features={in_features}, out_features={out_features}, '.format(**self.__dict__)
return repr
# Quantized RNN cell implementations
class QuantizedRNNCellBase(torch.jit.ScriptModule):
__constants__ = ['input_size', 'hidden_size', 'bias', 'scale_hh', 'scale_ih',
'zero_point_ih', 'zero_point_hh']
def __init__(self, other):
super(QuantizedRNNCellBase, self).__init__()
self.input_size = other.input_size
self.hidden_size = other.hidden_size
self.bias = other.bias
if not self.bias:
raise ValueError("Quantized RNN cells require bias terms")
weight_ih, col_offsets_ih, self.scale_ih, self.zero_point_ih = \
torch.fbgemm_linear_quantize_weight(other.weight_ih.clone(memory_format=torch.contiguous_format).float())
self.register_buffer('weight_ih', weight_ih)
self.register_buffer('col_offsets_ih', col_offsets_ih)
weight_hh, col_offsets_hh, self.scale_hh, self.zero_point_hh = \
torch.fbgemm_linear_quantize_weight(other.weight_hh.clone(memory_format=torch.contiguous_format).float())
self.register_buffer('weight_hh', weight_hh)
self.register_buffer('col_offsets_hh', col_offsets_hh)
packed_ih = torch.fbgemm_pack_quantized_matrix(self.weight_ih)
self.register_buffer('packed_ih', packed_ih)
packed_hh = torch.fbgemm_pack_quantized_matrix(self.weight_hh)
self.register_buffer('packed_hh', packed_hh)
self.bias_ih = torch.nn.Parameter(other.bias_ih.clone(memory_format=torch.contiguous_format).float(), requires_grad=False)
self.bias_hh = torch.nn.Parameter(other.bias_hh.clone(memory_format=torch.contiguous_format).float(), requires_grad=False)
def extra_repr(self):
s = '{input_size}, {hidden_size}'
if 'bias' in self.__dict__ and self.bias is not True:
s += ', bias={bias}'
if 'nonlinearity' in self.__dict__ and self.nonlinearity != "tanh":
s += ', nonlinearity={nonlinearity}'
return s.format(**self.__dict__)
@torch.jit.script_method
def check_forward_input(self, input):
if input.size(1) != self.input_size:
raise RuntimeError(
"input has inconsistent input_size: got {}, expected {}".format(
input.size(1), self.input_size))
@torch.jit.script_method
def check_forward_hidden(self, input: Tensor, hx: Tensor, hidden_label: str = '') -> None:
if input.size(0) != hx.size(0):
raise RuntimeError(
"Input batch size {} doesn't match hidden{} batch size {}".format(
input.size(0), hidden_label, hx.size(0)))
if hx.size(1) != self.hidden_size:
raise RuntimeError(
"hidden{} has inconsistent hidden_size: got {}, expected {}".format(
hidden_label, hx.size(1), self.hidden_size))
# TODO: for some reason weak_script_method causes a destruction of the
# module to occur, which in turn frees the packed_ih object via its DataPtr
# deleter. This is bizarre and should probably get fixed.
# @torch._jit_internal.weak_script_method
@torch.jit.script_method
def _unpack(self):
self.packed_ih.set_(torch.fbgemm_pack_quantized_matrix(self.weight_ih))
self.packed_hh.set_(torch.fbgemm_pack_quantized_matrix(self.weight_hh))
# @torch._jit_internal.weak_script_method
@torch.jit.script_method
def _pack(self):
self.packed_ih.set_(
torch.zeros(torch.jit.annotate(List[int], []), dtype=torch.uint8).detach())
self.packed_hh.set_(
torch.zeros(torch.jit.annotate(List[int], []), dtype=torch.uint8).detach())
class QuantizedRNNCell(QuantizedRNNCellBase):
__constants__ = ['input_size', 'hidden_size', 'bias', 'scale_hh', 'scale_ih',
'zero_point_ih', 'zero_point_hh', 'nonlinearity']
def __init__(self, other):
super(QuantizedRNNCell, self).__init__(other)
self.nonlinearity = other.nonlinearity
@torch.jit.script_method
def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor:
self.check_forward_input(input)
if hx is None:
hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
self.check_forward_hidden(input, hx, '')
if self.nonlinearity == "tanh":
ret = _VF.quantized_rnn_tanh_cell(
input, hx, self.weight_ih, self.weight_hh, self.bias_ih,
self.bias_hh, self.packed_ih, self.packed_hh, self.col_offsets_ih,
self.col_offsets_hh, self.scale_ih, self.scale_hh, self.zero_point_ih,
self.zero_point_hh
)
elif self.nonlinearity == "relu":
ret = _VF.quantized_rnn_relu_cell(
input, hx, self.weight_ih, self.weight_hh, self.bias_ih,
self.bias_hh, self.packed_ih, self.packed_hh, self.col_offsets_ih,
self.col_offsets_hh, self.scale_ih, self.scale_hh, self.zero_point_ih,
self.zero_point_hh
)
else:
ret = input # TODO: remove when jit supports exception flow
raise RuntimeError(
"Unknown nonlinearity: {}".format(self.nonlinearity))
return ret
class QuantizedLSTMCell(QuantizedRNNCellBase):
def __init__(self, other):
super(QuantizedLSTMCell, self).__init__(other)
@torch.jit.script_method
def forward(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None) -> Tuple[Tensor, Tensor]:
self.check_forward_input(input)
if hx is None:
zeros = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
hx = (zeros, zeros)
self.check_forward_hidden(input, hx[0], '[0]')
self.check_forward_hidden(input, hx[1], '[1]')
return _VF.quantized_lstm_cell(
input, hx, self.weight_ih, self.weight_hh, self.bias_ih,
self.bias_hh, self.packed_ih, self.packed_hh, self.col_offsets_ih,
self.col_offsets_hh, self.scale_ih, self.scale_hh, self.zero_point_ih,
self.zero_point_hh
)
class QuantizedGRUCell(QuantizedRNNCellBase):
def __init__(self, other):
super(QuantizedGRUCell, self).__init__(other)
@torch.jit.script_method
def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor:
self.check_forward_input(input)
if hx is None:
hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
self.check_forward_hidden(input, hx, '')
return _VF.quantized_gru_cell(
input, hx, self.weight_ih, self.weight_hh, self.bias_ih,
self.bias_hh, self.packed_ih, self.packed_hh, self.col_offsets_ih,
self.col_offsets_hh, self.scale_ih, self.scale_hh, self.zero_point_ih,
self.zero_point_hh
)
def apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor:
return tensor.index_select(dim, permutation)
class QuantizedRNNBase(torch.jit.ScriptModule):
__constants__ = ['mode', 'input_size', 'hidden_size', 'num_layers', 'bias',
'batch_first', 'dropout', 'bidirectional', 'dtype']
def __init__(self, other, dtype=torch.int8):
super(QuantizedRNNBase, self).__init__()
self.mode = other.mode
self.input_size = other.input_size
self.hidden_size = other.hidden_size
self.num_layers = other.num_layers
self.bias = other.bias
self.batch_first = other.batch_first
if self.mode != 'GRU':
assert not self.batch_first
self.dropout = other.dropout
self.bidirectional = other.bidirectional
num_directions = 2 if self.bidirectional else 1
self.dtype = dtype
assert self.bias
# TODO: support more than just LSTM
if self.mode != 'LSTM' and self.mode != 'GRU':
raise RuntimeError('Only LSTM or GRU is supported for QuantizedRNN')
if dtype != torch.int8 and dtype != torch.float16:
raise RuntimeError('Unsupported dtype: {}'.format(dtype))
self.all_weights = [] # type: ignore
for layer in range(self.num_layers):
for direction in range(num_directions):
layer_input_size = self.input_size if layer == 0 else self.hidden_size * num_directions
suffix = '_reverse' if direction == 1 else ''
def get_weight_bias(ihhh):
weight_name = 'weight_{}_l{}{}'.format(ihhh, layer, suffix)
bias_name = 'bias_{}_l{}{}'.format(ihhh, layer, suffix)
weight = getattr(other, weight_name)
bias = getattr(other, bias_name)
return weight, bias
weight_ih, bias_ih = get_weight_bias('ih')
weight_hh, bias_hh = get_weight_bias('hh')
if dtype == torch.int8:
cell_params = torch.ops.quantized.make_quantized_cell_params(
weight_ih, weight_hh, bias_ih, bias_hh)
else:
packed_ih = torch.ops.quantized.linear_prepack_fp16(
weight_ih.float(), bias_ih)
packed_hh = torch.ops.quantized.linear_prepack_fp16(
weight_hh.float(), bias_hh)
cell_params = torch.ops.quantized.make_quantized_cell_params_fp16(
packed_ih, packed_hh)
setattr(self, 'cell_params_{}_{}'.format(layer, suffix), cell_params)
self.all_weights.append(cell_params)
@torch.jit.script_method
def check_input(self, input: Tensor, batch_sizes: Optional[Tensor]) -> None:
expected_input_dim = 2 if batch_sizes is not None else 3
if input.dim() != expected_input_dim:
raise RuntimeError(
'input must have {} dimensions, got {}'.format(
expected_input_dim, input.dim()))
if self.input_size != input.size(-1):
raise RuntimeError(
'input.size(-1) must be equal to input_size. Expected {}, got {}'.format(
self.input_size, input.size(-1)))
@torch.jit.script_method
def get_expected_hidden_size(self, input: Tensor, batch_sizes: Optional[Tensor]) -> Tuple[int, int, int]:
if batch_sizes is not None:
mini_batch = int(batch_sizes[0])
else:
mini_batch = input.size(0) if self.batch_first else input.size(1)
num_directions = 2 if self.bidirectional else 1
expected_hidden_size = (self.num_layers * num_directions,
mini_batch, self.hidden_size)
return expected_hidden_size
@torch.jit.script_method
def check_hidden_size(self, hx: Tensor, expected_hidden_size: Tuple[int, int, int],
msg: str = 'Expected hidden size {}, got {}') -> None:
if hx.size() != expected_hidden_size:
raise RuntimeError(msg.format(expected_hidden_size, list(hx.size())))
@torch.jit.script_method
def check_forward_args(self, input: Tensor, hidden: Tensor, batch_sizes: Optional[Tensor]) -> None:
self.check_input(input, batch_sizes)
expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes)
self.check_hidden_size(hidden, expected_hidden_size, msg='Expected hidden size {}, got {}')
@torch.jit.script_method
def permute_hidden(self, hx: Tensor, permutation: Optional[Tensor]) -> Tensor:
if permutation is None:
return hx
return apply_permutation(hx, permutation)
class QuantizedLSTM(QuantizedRNNBase):
__overloads__ = {'forward': ['forward_packed', 'forward_tensor']}
def __init__(self, other, dtype):
super(QuantizedLSTM, self).__init__(other, dtype)
@torch.jit.script_method
def forward_impl(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]], batch_sizes: Optional[Tensor],
max_batch_size: int, sorted_indices: Optional[Tensor]) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
# noqa
if hx is None:
num_directions = 2 if self.bidirectional else 1
zeros = torch.zeros(self.num_layers * num_directions,
max_batch_size, self.hidden_size,
dtype=input.dtype, device=input.device)
hx = (zeros, zeros)
else:
# Each batch of the hidden state should match the input sequence that
# the user believes he/she is passing in.
hx = self.permute_hidden(hx, sorted_indices)
self.check_forward_args(input, hx, batch_sizes)
assert batch_sizes is None
result = torch.quantized_lstm(input, hx, self.all_weights, self.bias, self.num_layers,
float(self.dropout), self.training, self.bidirectional,
self.batch_first, dtype=self.dtype, use_dynamic=False)
output = result[0]
hidden = result[1:]
return output, hidden
@torch.jit.script_method
def forward_tensor(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
batch_sizes = None
max_batch_size = input.size(0) if self.batch_first else input.size(1)
sorted_indices = None
unsorted_indices = None
output, hidden = self.forward_impl(input, hx, batch_sizes, max_batch_size, sorted_indices)
return output, self.permute_hidden(hidden, unsorted_indices)
@torch.jit.script_method
def forward_packed(self, input: PackedSequence, hx: Optional[Tuple[Tensor, Tensor]] = None
) -> Tuple[PackedSequence, Tuple[Tensor, Tensor]]:
input, batch_sizes, sorted_indices, unsorted_indices = input
max_batch_size = batch_sizes[0]
max_batch_size = int(max_batch_size)
output, hidden = self.forward_impl(input, hx, batch_sizes, max_batch_size, sorted_indices)
output = PackedSequence(output, batch_sizes, sorted_indices, unsorted_indices)
return output, self.permute_hidden(hidden, unsorted_indices)
@torch.jit.script_method
def permute_hidden(self, hx: Tuple[Tensor, Tensor], permutation: Optional[Tensor]) -> Tuple[Tensor, Tensor]:
if permutation is None:
return hx
return apply_permutation(hx[0], permutation), apply_permutation(hx[1], permutation)
@torch.jit.script_method
def check_forward_args(self, input: Tensor, hidden: Tuple[Tensor, Tensor], batch_sizes: Optional[Tensor]) -> None:
self.check_input(input, batch_sizes)
expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes)
self.check_hidden_size(hidden[0], expected_hidden_size,
'Expected hidden[0] size {}, got {}')
self.check_hidden_size(hidden[1], expected_hidden_size,
'Expected hidden[1] size {}, got {}')
def forward(self, input, hx=None):
if isinstance(input, PackedSequence):
return self.forward_packed(input, hx)
else:
return self.forward_tensor(input, hx)
class QuantizedGRU(QuantizedRNNBase):
__overloads__ = {'forward': ['forward_packed', 'forward_tensor']}
@torch.jit.script_method
def forward_impl(self, input: Tensor, hx: Optional[Tensor], batch_sizes: Optional[Tensor], max_batch_size: int,
sorted_indices: Optional[Tensor]) -> Tuple[Tensor, Tensor]:
# noqa
if hx is None:
num_directions = 2 if self.bidirectional else 1
hx = torch.zeros(self.num_layers * num_directions,
max_batch_size, self.hidden_size,
dtype=input.dtype, device=input.device)
else:
# Each batch of the hidden state should match the input sequence that
# the user believes he/she is passing in.
hx = self.permute_hidden(hx, sorted_indices)
self.check_forward_args(input, hx, batch_sizes)
if batch_sizes is None:
result = torch.quantized_gru(input, hx, self.all_weights, self.bias, self.num_layers,
float(self.dropout), self.training, self.bidirectional,
self.batch_first)
else:
result = torch.quantized_gru(input, batch_sizes, hx, self.all_weights, self.bias, self.num_layers,
float(self.dropout), self.training, self.bidirectional)
output = result[0]
hidden = result[1]
return output, hidden
@torch.jit.script_method
def forward_tensor(self, input: Tensor, hx: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]:
batch_sizes = None
max_batch_size = input.size(0) if self.batch_first else input.size(1)
sorted_indices = None
unsorted_indices = None
output, hidden = self.forward_impl(input, hx, batch_sizes, max_batch_size, sorted_indices)
return output, self.permute_hidden(hidden, unsorted_indices)
@torch.jit.script_method
def forward_packed(self, input: PackedSequence, hx: Optional[Tensor] = None) -> Tuple[PackedSequence, Tensor]:
input, batch_sizes, sorted_indices, unsorted_indices = input
max_batch_size = batch_sizes[0]
max_batch_size = int(max_batch_size)
output, hidden = self.forward_impl(input, hx, batch_sizes, max_batch_size, sorted_indices)
output = PackedSequence(output, batch_sizes, sorted_indices, unsorted_indices)
return output, self.permute_hidden(hidden, unsorted_indices)
def forward(self, input, hx=None):
if isinstance(input, PackedSequence):
return self.forward_packed(input, hx)
else:
return self.forward_tensor(input, hx)
def quantize_rnn_cell_modules(module):
warnings.warn("quantize_rnn_cell_modules function has been deprecated. "
"Please use torch.quantization.quantize_dynamic API instead.")
reassign = {}
for name, mod in module.named_modules():
if mod is module:
continue
new_mod = quantize_rnn_cell_modules(mod)
if new_mod is not mod:
reassign[name] = new_mod
for name, mod in reassign.items():
setattr(module, name, mod)
if isinstance(module, torch.nn.LSTMCell):
return QuantizedLSTMCell(module)
if isinstance(module, torch.nn.GRUCell):
return QuantizedGRUCell(module)
if isinstance(module, torch.nn.RNNCell):
return QuantizedRNNCell(module)
return module
def quantize_linear_modules(module, dtype=torch.int8):
warnings.warn("quantize_linear_modules function has been deprecated. "
"Please use torch.quantization.quantize_dynamic API instead.")
reassign = {}
for name, mod in module.named_modules():
if mod is module:
continue
new_mod = quantize_linear_modules(mod, dtype)
if new_mod is not mod:
reassign[name] = new_mod
for name, mod in reassign.items():
setattr(module, name, mod)
if isinstance(module, torch.nn.Linear):
if dtype == torch.int8:
return QuantizedLinear(module)
elif dtype == torch.float16:
return QuantizedLinearFP16(module)
else:
raise RuntimeError(
"Unsupported dtype: {}".format(dtype))
return module
def quantize_rnn_modules(module, dtype=torch.int8):
warnings.warn("quantize_rnn_modules function has been deprecated. "
"Please use torch.quantization.quantize_dynamic API instead.")
reassign = {}
for name, mod in module.named_modules():
if mod is module:
continue
new_mod = quantize_rnn_modules(mod, dtype)
if new_mod is not mod:
reassign[name] = new_mod
for name, mod in reassign.items():
setattr(module, name, mod)
if isinstance(module, torch.nn.LSTM):
if dtype != torch.int8 and dtype != torch.float16:
raise RuntimeError("Unsupported dtype: {}".format(dtype))
return QuantizedLSTM(module, dtype)
if isinstance(module, torch.nn.GRU):
return QuantizedGRU(module)
return module
| nilq/baby-python | python |
import argparse
import os
import json
import xml.etree.cElementTree as ET
import logging
import numpy as np
import sys
sys.path.insert(0,'common')
from transforms3dbatch import *
from utils.quaternion import *
def parse_motions(path):
xml_tree = ET.parse(path)
xml_root = xml_tree.getroot()
xml_motions = xml_root.findall('Motion')
motions = []
if len(xml_motions) > 1:
logging.warn('more than one <Motion> tag in file "%s", only parsing the first one', path)
motions.append(_parse_motion(xml_motions[0], path))
return motions
def _parse_motion(xml_motion, path):
xml_joint_order = xml_motion.find('JointOrder')
if xml_joint_order is None:
raise RuntimeError('<JointOrder> not found')
joint_names = []
joint_indexes = []
for idx, xml_joint in enumerate(xml_joint_order.findall('Joint')):
name = xml_joint.get('name')
if name is None:
raise RuntimeError('<Joint> has no name')
joint_indexes.append(idx)
joint_names.append(name)
frames = {'root_pos':[], 'root_rot':[], 'joint_pos':[]}
xml_frames = xml_motion.find('MotionFrames')
if xml_frames is None:
raise RuntimeError('<MotionFrames> not found')
for xml_frame in xml_frames.findall('MotionFrame'):
root_pos, root_rot, joint_pos = _parse_frame(xml_frame, joint_indexes)
frames['root_pos'].append(root_pos)
frames['root_rot'].append(root_rot)
frames['joint_pos'].append(joint_pos)
return joint_names, frames
def _parse_frame(xml_frame, joint_indexes):
xml_root_pos = xml_frame.find('RootPosition')
xml_root_rot = xml_frame.find('RootRotation')
n_joints = len(joint_indexes)
xml_joint_pos = xml_frame.find('JointPosition')
if xml_joint_pos is None:
raise RuntimeError('<JointPosition> not found')
root_pos = _parse_list(xml_root_pos, 3)
root_rot = _parse_list(xml_root_rot, 3)
joint_pos = _parse_list(xml_joint_pos, n_joints, joint_indexes)
return root_pos, root_rot, joint_pos
def _parse_list(xml_elem, length, indexes=None):
if indexes is None:
indexes = range(length)
elems = [float(x) for idx, x in enumerate(xml_elem.text.rstrip().split(' ')) if idx in indexes]
if len(elems) != length:
raise RuntimeError('invalid number of elements')
return elems
def mmm2csv(src):
joint_names, mmm_dict = parse_motions(src.as_posix())[0]
root_pos = np.array(mmm_dict['root_pos'], dtype=np.float) * 0.001 / 0.056444
root_rot = np.array(mmm_dict['root_rot'], dtype=np.float)
joint_pos = np.array(mmm_dict['joint_pos'], dtype=np.float)
joint_dict = {}
for idx, name in enumerate(joint_names):
if name.split('_')[0][-1] != 't':
xyz = name.split('_')[0][-1]
joint = name.split('_')[0][:-1]
else:
xyz = 'y'
joint = name.split('_')[0]
if joint not in joint_dict:
joint_dict[joint] = dict()
joint_dict[joint][xyz] = joint_pos[:, idx]
joints = []
values = []
for cnt, joint in enumerate(joint_dict):
joint_vals = []
joints.append(joint)
for axes in ['x', 'y', 'z']:
if axes in joint_dict[joint]:
joint_vals.append(joint_dict[joint][axes])
else:
joint_vals.append(np.zeros_like(root_pos[:, 0]))
values.append(np.stack(joint_vals, axis=1))
values = np.stack(values, axis=0)
return joints, root_pos, root_rot, values, joint_dict
def mmm2amc(src, dest):
joints, root_pos, root_rot, values, joint_dict = mmm2csv(src)
axesMap = {'x':'x', 'y':'y', 'z':'z'}
root_pos = root_pos[..., [0,2,1]]
## convert to quaternion and back by changing the axes order
root_rot = quat2eulerbatch(qinv_np(euler2quatbatch(root_rot, 'sxyz')[...,[0, 1, 3, 2]]), 'sxyz') * 180/np.pi
values = quat2eulerbatch(qinv_np(euler2quatbatch(values, 'sxyz')[..., [0, 1, 3, 2]]), 'sxyz') * 180/np.pi
joint_pos = []
for cnt, joint in enumerate(joints):
for axes_num, axes in enumerate(['x', 'y', 'z']):
if axesMap[axes] in joint_dict[joint]:
joint_dict[joint][axesMap[axes]] = values[cnt, :, axes_num]
lines = ["#!OML:ASF H:",
":FULLY-SPECIFIED",
":DEGREES"]
for idx in range(root_pos.shape[0]):
lines.append('{}'.format(idx+1))
lines.append('root' + (' {}'*6).format(root_pos[idx, 0], root_pos[idx, 1], root_pos[idx, 2],
root_rot[idx, 0], root_rot[idx, 1], root_rot[idx, 2]))
for cnt, joint in enumerate(joint_dict):
format_str = '{} ' * (len(joint_dict[joint])+1)
format_str = format_str[:-1]
joint_vals = []
for axes in ['x', 'y', 'z']:
if axes in joint_dict[joint]:
joint_vals.append(joint_dict[joint][axes][idx])
lines.append(format_str.format(*([joint] + joint_vals)))
lines = '\n'.join(lines) + '\n'
os.makedirs(dest.parent, exist_ok=True)
with open(dest, 'w') as fp:
fp.writelines(lines)
| nilq/baby-python | python |
from django.apps import apps as django_apps
default_app_config = 'scrapyd_dash.apps.ScrapydDashConfig' | nilq/baby-python | python |
__all__ = ['auth', 'constants', 'controllers', 'forms'] | nilq/baby-python | python |
#!/bin/python
import sys
S = raw_input().strip()
try:
r = int(S)
print r
except ValueError:
print "Bad String"
| nilq/baby-python | python |
#!/usr/bin/env python3
# coding: utf-8
"""Automatic EcoFlex sequences annotation pipeline.
Edits:
- Recolor all AmpR with the same color as YTK parts
- Add AmpR terminator feature with standard color
"""
import copy
import io
import itertools
import json
import re
import os
import warnings
import sys
import bs4 as bs
import fs.path
import six
import tqdm
import requests
from Bio.Alphabet import IUPAC
from Bio.Seq import Seq, translate
from Bio.SeqFeature import (
SeqFeature,
FeatureLocation,
CompoundLocation,
Reference,
)
from Bio.SeqIO import read, write
from Bio.SeqRecord import SeqRecord
from Bio.Restriction import BsaI
from fs.zipfs import ReadZipFS
from moclo.record import CircularRecord
from moclo.regex import DNARegex
ZIP_URL = "https://media.addgene.org/cms/filer_public/1a/00/1a00a9f1-608f-453a-937a-7f46cf872dfc/ecoflex-kit-genbank-files.zip"
URL = "https://www.addgene.org/cloning/moclo/freemont-ecoflex/"
UA = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"
# Part sequence for automatic annotation / annotation relocation
AMPR_TERM = DNARegex("gattatcaaaaaggatctt") # Reverse 3' of AmpR terminator
BB_PREFIX = DNARegex("gaattcgcggccgcttctag")
CMR_PROMOTER = DNARegex(
'tttagcttccttagctcctgaaaatctcgataactcaaaaaatacgcccggtagtgatcttatttcattatggtgaaagttggaacctcttacgtgcccgatcaa')
CMR_TERMINATOR = DNARegex(
'accaataaaaaacgcccggcggcaaccgagcgttctgaacaaatccagatggagttctgaggtcattactggatctatcaacaggagtccaagcgagctcgatatcaaa')
AMPR_PROMOTER = DNARegex(
'actcttcctttttcaatattattgaagcatttatcagggttattgtctcatgagcggatacatatttgaatgtatttagaaaaataaacaaataggggttccgcgcacatttccccgaaaagtgccacctg')
AMPR_TERMINATOR = DNARegex(
'gattatcaaaaaggatcttcacctagatccttttaaattaaaaatgaagttttaaatcaatctaaagtatatatgagtaaacttggtctgacag')
NAME_REGEX = re.compile(r"([^ ]*) \(([^\)]*)\)(_[A-Z]{2})")
COLOR_REGEX = re.compile(r"color: (#[0-9a-fA-F]{6})")
FULL_SEQUENCES = {
"pBP-BBa_B0034": "https://www.addgene.org/72980/sequences/",
"pBP-SJM901": "https://www.addgene.org/72966/sequences/",
}
# Partial sequences from the reference EcoFlex paper
PROMOTERS = {
"pBP-SJM901": "CTATTTTACAGCTAGCTCAGTCCTAGGTATAATGCTAGCGTAC",
"pBP-SJM902": "CTATTTTACAGCTAGCTCAGTCCTAGGGATTATGCTAGCGTAC",
"pBP-SJM903": "CTATCTTATAGCTAGCTCAGTCCTTGGGATTATGCTAGCGTAC",
"pBP-SJM905": "CTATTTTATAGCTAGCTCAGTCCTTGGGATTATGCTAGCGTAC",
"pBP-SJM906": "CTATTTGATGGCTAGCTCAGTCCTAGGGATTGTGCTAGCGTAC",
"pBP-SJM908": "CTATTTTATAGCTAGCTCAGCCCTTGGTATTATGCTAGCGTAC",
"pBP-SJM910": "CTATTTGATGGCTAGCTCAGTCCTTGGTATTATGCTAGCGTAC",
"pBP-SJM911": "CTATTTGACAGCTAGCTCAGTCCTTGGTACTGTGCTAGCGTAC",
"pBP-SJM912": "CTATTTGATAGCTAGCTCAGTCCTAGGTACTATGCTAGCGTAC",
"pBP-SJM914": "CTATTTGATGGCTAGCTCAGTCCTAGGGATTGTGCTAGCGTAC",
"pBP-SJM915": "CTATTTTATGGCTAGCTCAGTCCTTGGTATTATGCTAGCGTAC",
}
def translate_color(feature):
notes = feature.qualifiers.get("note", [])
color_note = next((n for n in notes if n.startswith("color: #")), None)
if color_note is None:
return
hex_color = COLOR_REGEX.match(color_note).group(1).lower()
feature.qualifiers["note"].remove(color_note)
feature.qualifiers.update(
{
"ApEinfo_fwdcolor": [hex_color],
"ApEinfo_revcolor": [hex_color],
"ApEinfo_graphicformat": [
"arrow_data {{0 1 2 0 0 -1} {} 0} width 5 offset 0"
],
}
)
if __name__ == "__main__":
warnings.simplefilter("ignore")
session = requests.Session()
# load the kit inventory page
with session.get(URL) as res:
soup = bs.BeautifulSoup(res.text, "html.parser")
# load the zip archive
with session.get(ZIP_URL) as res:
archive = ReadZipFS(six.BytesIO(res.content)).opendir(
"/EcoFlex - GenBank/"
)
# load inventory
inventory = soup.find("table", class_="kit-inventory-table")
it = tqdm.tqdm(inventory.find_all("tr")[1:])
for row in it:
# extract each row
row_text = row.find("a").text
# get antibiotics resistances
resistance = row.find("span", class_="resistance-spacing").text.strip()
name = id_ = row_text.strip()
# Update the progress bar
it.set_description(id_)
# TODO: entry vector not supported
if id_ in ('pBP', 'pBP-ORF', 'pBP-lacZ'):
continue
elif id_ == "pBP-T7_RBS-His6-Thrombin":
name = id_ = "pBP-T7-RBS-His6"
elif id_.startswith("pBP-T7_"):
name = id_ = id_.replace("_", "-")
elif id_.startswith("pBP-ORF-"):
name = id_ = id_.replace("pBP-ORF-", "pBP-")
elif id_ == "pBP-HexHis":
name = id_ = "pBP-His6"
elif id_.startswith("pBP_BBa"):
name = id_ = id_.replace("pBP_BBa", "pBP-BBa")
# extract info
info = {
"resistance": resistance,
# "name": id_,
"id": id_,
# "type": type_,
"location": row.find("b").text.strip().replace(" / ", ""),
"addgene_id": row.find("a").get("href").strip("/"),
}
# get the online full sequence
if id_ in FULL_SEQUENCES:
# Load the AddGene sequences page and get the full sequence
with requests.get(FULL_SEQUENCES[id_]) as res:
soup = bs.BeautifulSoup(res.text, "html.parser")
section = soup.find("section", id="depositor-full")
gb_url = soup.find("a", class_="genbank-file-download").get('href')
# Get the Genbank file
with requests.get(gb_url) as res:
gb = CircularRecord(read(io.StringIO(res.text), "gb"))
# get the pBP-SJM901 sequence and patch it
elif id_.startswith("pBP-SJM"):
# get pBP-SJM
# Load the AddGene sequences page and get the full sequence
with requests.get(FULL_SEQUENCES["pBP-SJM901"]) as res:
soup = bs.BeautifulSoup(res.text, "html.parser")
section = soup.find("section", id="depositor-full")
gb_url = soup.find("a", class_="genbank-file-download").get('href')
# Get the Genbank file
with requests.get(gb_url) as res:
gb = CircularRecord(read(io.StringIO(res.text), "gb"))
# replace the target sequence
gb.seq = Seq(
str(gb.seq.upper()).replace(PROMOTERS["pBP-SJM901"], PROMOTERS[id_])
)
gb.description = gb.description.replace("SJM901", id_[4:])
gb.keywords = [id_[4:]]
# get the ZIP sequence
else:
path = next(
(
f
for f in archive.walk.files('/')
if fs.path.basename(f).lower() == '{}.gb'.format(id_).lower()
),
None,
)
if id_ == "pBP-His6":
path = "/Level 0/Tags/pBP-His6_tag.gb"
elif id_ == "pBP-T7-RBS-His6":
path = "/Level 0/T7 parts/pBP-T7_RBS_His6.gb"
elif id_ == "pBP-T7-RBS":
path = "/Level 0/T7 parts/pBP-T7_RBS.gb"
elif id_ == "pBP-Strep(II)":
path = "/Level 0/Tags/pBP-StrepII_tag.gb"
elif id_ == "pBP-pET-RBS":
path = "/Level 0/RBS/pBP-PET_RBS.gb"
elif id_ == "pBP-BBa_B0034":
path = "/Level 0/Promoters/pBP_BBa_B0034.gb"
if path is None:
print("COULD NOT FIND", id_)
continue
with archive.open(path) as f:
gb = CircularRecord(read(f, "gb"))
# Copy well documented information from one record to the other
gb.seq = gb.seq.upper()
gb.seq.alphabet = IUPAC.unambiguous_dna
gb.id = id_
gb.name = name
gb.annotations['references'].clear() # FIXME ?
# quick feature accessor
def get_features_from_label(label):
return (
f for f in gb.features if label in f.qualifiers.get("label", [])
)
def get_features_from_note(note):
return (
f for f in gb.features if note in f.qualifiers.get("note", [])
)
def get_features(name):
return itertools.chain(
get_features_from_label(name),
get_features_from_note(name),
)
# Correct overlapping features by setting the origin just before the
# biobrick prefix
pref = next(itertools.chain(
get_features("BioBrick prefix"),
get_features_from_note("BioBrick prefix")
))
if pref.location is None:
match = BB_PREFIX.search(gb)
pref.location = FeatureLocation(
start=match.start(),
end=match.end(),
strand=1,
)
gb <<= pref.location.start - 1
# AmpR recolor and annotations
ampr = next(get_features("AmpR"), None)
if ampr is not None:
ampr.qualifiers = {
"label": "AmpR",
"codon_start": 1,
"gene": "bla",
"product": "beta-lactamase",
"function": "ampicilin and caribenicillin resistance",
"translation": ampr.extract(gb.seq).translate(),
"note": ["color: #9F4240"],
"db_xref": [
"GO:0005515",
"GO:0008800",
"GO:0016787",
"GO:0030655",
"GO:0046677",
"InterPro:IPR000871",
"InterPro:IPR023650",
"InterPro:IPR012338",
"PDB:1ZG4",
"UniProtKB/Swiss-Prot:P62593",
],
"EC_number": "3.5.2.6",
}
old_prom = next(get_features_from_note('AmpR promoter'), None)
if old_prom is not None:
gb.features.remove(old_prom)
ampr_prom = next(get_features_from_label("AmpR promoter"), None)
if ampr_prom is None:
start, end = AMPR_PROMOTER.search(gb.seq).span()
ampr_prom = SeqFeature(FeatureLocation(start, end, -1))
gb.features.append(ampr_prom)
ampr_prom.type = "promoter"
ampr_prom.qualifiers["label"] = ["AmpR Promoter"]
ampr_prom.qualifiers["note"] = ["color: #ff6666"]
ampr_term = next(get_features_from_label("AmpR terminator"), None)
if ampr_term is None:
start, end = AMPR_TERMINATOR.search(gb.seq).span()
ampr_term = SeqFeature(FeatureLocation(start, end, -1))
gb.features.append(ampr_term)
ampr_term.type = 'terminator'
ampr_term.qualifiers['label'] = 'AmpR Terminator'
ampr_term.qualifiers['note'] = ['color: #ff6666']
# CmR recolor and annotations
cmr = next(get_features('CmR'), None)
if cmr is not None:
cmr.qualifiers.update(
{
"codon_start": [1],
"gene": ["cat"],
"product": ["chloramphenicol acetyltransferase"],
"label": ["CmR"],
"function": ["chloramphenicol resistance"],
"note": ["color: #0000ff; direction: LEFT"],
"EC_number": ["2.3.1.28"],
"db_xref": [
"UniProtKB/Swiss-Prot:P62577",
"GO:0008811",
"GO:0016740",
"GO:0016746",
"GO:0046677",
"PFAM:PF00302",
],
}
)
cmr_prom = next(get_features("CamR Promoter"), None)
if cmr_prom is None:
start, end = CMR_PROMOTER.search(gb.seq).span()
cmr_prom = SeqFeature(location=FeatureLocation(start, end, -1))
gb.features.append(cmr_prom)
cmr_prom.type = "promoter"
cmr_prom.qualifiers.update(
{
"label": ["CmR Promoter"],
"note": ["color: #66ccff; direction: LEFT"],
}
)
cmr_term = next(get_features_from_label("CamR Terminator"), None)
if cmr_term is None:
start, end = CMR_TERMINATOR.search(gb.seq).span()
cmr_term = SeqFeature(location=FeatureLocation(start, end, -1))
gb.features.append(cmr_term)
cmr_term.type = "terminator"
cmr_term.qualifiers.update(
{
"label": ["CmR Terminator"],
"note": ["color: #66ccff; direction: LEFT"],
}
)
old_term = next(get_features_from_note('lambda t0 terminator'), None)
if old_term is not None:
gb.features.remove(old_term)
# GFP recolor and annotations
gfp = next(get_features_from_label("GFP"), None)
if gfp is not None:
gfp.qualifiers.update(
{
"label": "GFP",
"note": ["color: #34ff03"],
"product": ["green fluorescent protein"],
"gene": ["GFP"],
"db_xref": [
"PDB:1H6R",
"InterPro:IPR009017",
"InterPro:IPR011584",
"InterPro:IPR000786",
"PFAM:PF01353",
"GO:0008218",
"GO:0006091",
"GO:0018298",
"UniProtKB/Swiss-Prot:P42212",
],
"inference": [
"DESCRIPTION:alignment:blastx:UniProtKB/Swiss-Prot:P42212"
],
}
)
# mRFP1 recolor and annotations
rfp = next(get_features_from_label("mRFP1"), None)
if rfp is not None:
rfp.qualifiers.update(
{
"label": "mRFP",
"product": "mRFP1",
"note": [
"monomeric derivative of DsRed (Campbell et al., 2002)",
"iGEM Part: BBa_E1010",
"color: #c16969",
],
"db_xref": [
"UniProtKB/Swiss-Prot:Q9U6Y8",
"GO:0008218",
"GO:0006091",
"GO:0018298",
"PDB:2H5R",
],
}
)
# patch pBP-SJM promoters
if id_.startswith("pBP-SJM"):
promoter = next(get_features_from_label("J23119 promoter"))
promoter.type = "promoter"
promoter.qualifiers.update({
"function": ["strong constitutive promoter"],
"note": ["color: #00a1ee; direction: RIGHT"],
})
if id_ == "pBP-SJM901":
promoter.qualifiers['label'] = "J23119 Promoter"
promoter.qualifiers['note'].insert(0, "Anderson series consensus promoter")
else:
promoter.qualifiers['label'] = "{} Promoter".format(id_[4:])
promoter.qualifiers['note'].insert(0, "derived from pBP-SJM901 (BBa_J23119)")
# if any(f.location is None for f in gb.features):
# continue
for f in gb.features:
if f.location is None:
print(gb, f)
# sort features by start location, source always first
gb.features.sort(
key=lambda f: (-len(gb.seq)) * (f.type == "source")
+ f.location.start
)
# translate color from notes to ApEinfo
for feature in gb.features:
translate_color(feature)
# Add an EcoFlex article reference
ref = Reference()
ref.authors = 'Moore SJ, Lai HE, Kelwick RJ, Chee SM, Bell DJ, Polizzi KM, Freemont PS.'
ref.title = 'EcoFlex: A Multifunctional MoClo Kit for E. coli Synthetic Biology.'
ref.journal = 'ACS Synth Biol 2016;5:1059-1069.'
ref.pubmed_id = '27096716'
gb.annotations['references'].append(ref)
# Fix the direct submission reference
ref = Reference()
# ref = gb.annotations["references"][-1]
ref.authors = "Larralde M"
ref.title = "Direct Submission"
ref.journal = "Distributed with the MoClo Python library\nhttps://github.com/althonos/moclo"
gb.annotations['references'].append(ref)
# write the final record
dst_dir = os.path.abspath(
os.path.join(
__file__, "..", "..", "moclo-ecoflex", "registry", "ecoflex"
)
)
dst_file = os.path.join(dst_dir, "{}.gb").format(info["id"])
write(gb, dst_file, "gb")
| nilq/baby-python | python |
# hsrp parameters
ng_order = (3072,)
_ng_const = (
# 3072
(
"""\
FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08\
8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B\
302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9\
A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6\
49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8\
FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D\
670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C\
180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718\
3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D\
04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D\
B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226\
1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C\
BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC\
E0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF""",
"5",
),
)
def get_srp_context(ng_group_len, hashfunc, salt_len=16, secret_len=32):
group = _ng_const[ng_order.index(ng_group_len)]
ctx = {
"hashfunc": hashfunc,
"N": int(group[0], 16),
"g": int(group[1], 16),
"N_len": ng_group_len,
"salt_len": salt_len,
"secret_len": secret_len,
}
return ctx
| nilq/baby-python | python |
import random
import kivy
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
kivy.require('1.9.0')
class MyRoot(BoxLayout):
def __init__(self):
super(MyRoot, self).__init__()
def generate_affirmation(self):
affirmations = ["I am the architect of my life; \nI build its foundation and choose its contents.",
"I am brimming with energy and \noverflowing with joy.",
"My body is healthy; my mind is brilliant; \nmy soul is tranquil.",
"I forgive those who have harmed me in my past and \npeacefully detach from them.",
"A river of compassion washes away my anger \nand replaces it with love.",
"Creative energy surges through me and leads \nme to new and brilliant ideas.",
"The only thing to fear is fear itself.",
"My ability to exceed my goals is limitless; \nmy potential to succeed is infinite.",
"I acknowledge my own self-worth; \nmy confidence is soaring.",
"Everything that is happening now is \nhappening for my ultimate good.",
"I woke up today with strength in my \nheart and clarity in my mind."]
rand_num = random.randint(0, len(affirmations)-1)
self.rand_aff.text = affirmations[rand_num]
class RandAffirmations(App):
def build(self):
return MyRoot()
randAffirmations = RandAffirmations()
randAffirmations.run() | nilq/baby-python | python |
# Copyright 2020 Regents of the University of Minnesota.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from mtap import GenericLabel, Location, Document
from mtap.data._label_indices import presorted_label_index
document = Document('plaintext', text='blah')
@pytest.fixture
def tested():
return presorted_label_index([
GenericLabel(0, 5, document=document, i=7),
GenericLabel(0, 7, document=document, i=6),
GenericLabel(2, 6, document=document, i=5),
GenericLabel(6, 7, document=document, i=4),
GenericLabel(6, 8, document=document, i=3),
GenericLabel(9, 10, document=document, i=2),
GenericLabel(9, 13, document=document, i=1),
GenericLabel(9, 13, document=document, i=0),
]).descending()
def test_getitem(tested):
assert tested[3] == GenericLabel(6, 8, document=document, i=3)
def test_getitem_first(tested):
assert tested[0] == GenericLabel(9, 13, document=document, i=0)
def test_getitem_last(tested):
assert tested[7] == GenericLabel(0, 5, document=document, i=7)
def test_getitem_negative(tested):
assert tested[-4] == GenericLabel(6, 7, document=document, i=4)
def test_getitem_last_negative(tested):
assert tested[-1] == GenericLabel(0, 5, document=document, i=7)
def test_getitem_slice(tested):
sliced = tested[2:4]
assert sliced == [
GenericLabel(9, 10, document=document, i=2),
GenericLabel(6, 8, document=document, i=3),
]
def test_getitem_slice_end(tested):
assert tested[4:8] == [
GenericLabel(6, 7, document=document, i=4),
GenericLabel(2, 6, document=document, i=5),
GenericLabel(0, 7, document=document, i=6),
GenericLabel(0, 5, document=document, i=7),
]
def test_getitem_slice_open_left(tested):
assert tested[:4] == [
GenericLabel(9, 13, document=document, i=0),
GenericLabel(9, 13, document=document, i=1),
GenericLabel(9, 10, document=document, i=2),
GenericLabel(6, 8, document=document, i=3),
]
def test_getitem_slice_open_right(tested):
assert tested[4:] == [
GenericLabel(6, 7, document=document, i=4),
GenericLabel(2, 6, document=document, i=5),
GenericLabel(0, 7, document=document, i=6),
GenericLabel(0, 5, document=document, i=7),
]
def test_getitem_slice_neg_right(tested):
assert tested[4:-1] == [
GenericLabel(6, 7, document=document, i=4),
GenericLabel(2, 6, document=document, i=5),
GenericLabel(0, 7, document=document, i=6),
]
def test_getitem_slice_neg_left(tested):
assert tested[-4:-1] == [
GenericLabel(6, 7, document=document, i=4),
GenericLabel(2, 6, document=document, i=5),
GenericLabel(0, 7, document=document, i=6),
]
def test_getitem_not_idx_slice(tested):
with pytest.raises(TypeError):
tested['foo']
def tested_getitem_slice_step_not_one(tested):
slice = tested[1:4:2]
assert slice == ([
GenericLabel(9, 13, document=document, i=1),
GenericLabel(6, 8, document=document, i=3),
])
def test_at(tested):
assert tested.at(GenericLabel(2, 6, document=document))[0] == GenericLabel(2, 6, document=document, i=5)
def test_at_location(tested):
assert tested.at(Location(2, 6))[0] == GenericLabel(2, 6, document=document, i=5)
def test_at_location_multiple(tested):
assert tested.at(Location(9, 13)) == [
GenericLabel(9, 13, document=document, i=0),
GenericLabel(9, 13, document=document, i=1),
]
def test_at_location_not_found(tested):
assert tested.at(Location(10, 10)) == []
def test_len(tested):
assert len(tested) == 8
def test_covering(tested):
covering = tested.covering(2, 4)
assert list(covering) == [
GenericLabel(2, 6, document=document, i=5),
GenericLabel(0, 7, document=document, i=6),
GenericLabel(0, 5, document=document, i=7),
]
def test_covering_empty(tested):
assert tested.covering(4, 10) == []
def test_empty_covering(tested):
covering = tested.covering(4, 10)
assert list(covering) == []
def test_inside(tested):
inside = tested.inside(1, 8)
assert list(inside) == [
GenericLabel(6, 8, document=document, i=3),
GenericLabel(6, 7, document=document, i=4),
GenericLabel(2, 6, document=document, i=5),
]
def test_inside_before(tested):
inside = tested.inside(0, 3)
assert list(inside) == []
def test_inside_after(tested):
inside = tested.inside(15, 20)
assert list(inside) == []
def test_inside_many(tested):
tested = presorted_label_index([
GenericLabel(0, 3, document=document),
GenericLabel(0, 3, document=document),
GenericLabel(0, 3, document=document),
GenericLabel(0, 3, document=document),
GenericLabel(0, 3, document=document),
GenericLabel(0, 3, document=document),
GenericLabel(0, 3, document=document),
GenericLabel(2, 5, document=document),
GenericLabel(2, 5, document=document),
GenericLabel(2, 5, document=document),
GenericLabel(2, 5, document=document),
GenericLabel(2, 5, document=document),
GenericLabel(2, 5, document=document),
GenericLabel(2, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(6, 6, document=document),
GenericLabel(6, 6, document=document),
GenericLabel(6, 6, document=document),
GenericLabel(6, 6, document=document),
GenericLabel(6, 6, document=document),
GenericLabel(6, 6, document=document),
GenericLabel(6, 6, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
])
inside = tested.inside(3, 6)
assert inside == [
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
]
inside = inside.inside(5, 6)
assert inside == [
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
]
def test_begins_inside(tested):
inside = tested.beginning_inside(1, 9)
assert list(inside) == [
GenericLabel(6, 8, document=document, i=3),
GenericLabel(6, 7, document=document, i=4),
GenericLabel(2, 6, document=document, i=5),
]
def test_begins_inside_empty(tested):
inside = tested.beginning_inside(3, 5)
assert inside == []
def test_ascending(tested):
ascending = tested.ascending()
assert ascending == [
GenericLabel(0, 5, document=document, i=7),
GenericLabel(0, 7, document=document, i=6),
GenericLabel(2, 6, document=document, i=5),
GenericLabel(6, 7, document=document, i=4),
GenericLabel(6, 8, document=document, i=3),
GenericLabel(9, 10, document=document, i=2),
GenericLabel(9, 13, document=document, i=1),
GenericLabel(9, 13, document=document, i=0),
]
def test_descending(tested):
descending = tested.descending()
assert descending == tested
def test_before(tested):
before = tested.before(8)
assert before == [
GenericLabel(6, 8, document=document, i=3),
GenericLabel(6, 7, document=document, i=4),
GenericLabel(2, 6, document=document, i=5),
GenericLabel(0, 7, document=document, i=6),
GenericLabel(0, 5, document=document, i=7),
]
def test_before_start(tested):
before = tested.before(3)
assert before == []
def test_after(tested):
after = tested.after(2)
assert after == [
GenericLabel(9, 13, document=document, i=0),
GenericLabel(9, 13, document=document, i=1),
GenericLabel(9, 10, document=document, i=2),
GenericLabel(6, 8, document=document, i=3),
GenericLabel(6, 7, document=document, i=4),
GenericLabel(2, 6, document=document, i=5),
]
def test_contains_true(tested):
assert GenericLabel(9, 13, document=document, i=0) in tested
def test_contains_false_location_in(tested):
assert GenericLabel(9, 13, document=document) not in tested
def test_contains_false_location_not_in(tested):
assert GenericLabel(0, 4, document=document) not in tested
def test_contains_false_not_label(tested):
assert "blub" not in tested
def test_reversed(tested):
l = list(reversed(tested))
assert l == [
GenericLabel(0, 5, document=document, i=7),
GenericLabel(0, 7, document=document, i=6),
GenericLabel(2, 6, document=document, i=5),
GenericLabel(6, 7, document=document, i=4),
GenericLabel(6, 8, document=document, i=3),
GenericLabel(9, 10, document=document, i=2),
GenericLabel(9, 13, document=document, i=1),
GenericLabel(9, 13, document=document, i=0),
]
def test_count_in(tested):
assert tested.count(GenericLabel(2, 6, document=document, i=5)) == 1
def test_count_multiple(tested):
index = presorted_label_index([
GenericLabel(2, 6, document=document, i=2),
GenericLabel(6, 7, document=document, i=3),
GenericLabel(6, 8, document=document, i=4),
GenericLabel(9, 10, document=document, i=5),
GenericLabel(9, 13, document=document, i=6),
GenericLabel(9, 13, document=document, i=7),
GenericLabel(9, 13, document=document, i=6)
]).descending()
assert index.count(GenericLabel(9, 13, document=document, i=6)) == 2
def test_count_different_label(tested):
assert tested.count(GenericLabel(9, 13, document=document, x=2)) == 0
def test_count_not_label(tested):
assert tested.count("blub") == 0
def test_count_location_not_in(tested):
assert tested.count(GenericLabel(4, 5, document=document)) == 0
def test_filter(tested):
assert tested.filter(lambda x: x.i % 2 == 0) == [
GenericLabel(9, 13, document=document, i=0),
GenericLabel(9, 10, document=document, i=2),
GenericLabel(6, 7, document=document, i=4),
GenericLabel(0, 7, document=document, i=6),
]
| nilq/baby-python | python |
from rpcb.message_dispatch import MessageDispatch
from rpcb.service import Service
import threading
import time
import random
import pika
import queue
import logging
import pika.adapters.blocking_connection
"""
消息调度器实现,用于批处理数据,多个数据来了会打包为一个batch输入到模型中,从而提高整体的吞吐量
"""
class BatchMessageDispatcher(MessageDispatch):
def __init__(self, callback, max_queue_size:int=32, max_waiting_time:float=0.1,
service:Service=None, max_batch_size=10):
self.safe_queue = queue.Queue(maxsize=max_queue_size) # 线程安全队列,用于存放数据
self.message_send_and_ack = callback # 回调函数
self.service = service
guard = GuardThread(max_waiting_time=max_waiting_time,safe_queue=self.safe_queue,
compute=self.compute, max_batch_size=max_batch_size)
guard.start()
def compute(self, status):
"""
开始调用算法进行计算
"""
status = True
batch = list()
batch_size = min(10, self.safe_queue.qsize())
for i in range(batch_size):
batch.append(self.safe_queue.get())
# 提取body信息
service_needed_batch = [item['body'] for item in batch]
pre_time = time.time()
result = self.service(service_needed_batch) # [body1,body2....]
logging.info("service 用时"+str(time.time()-pre_time))
# 发送消息到队列中
for i in range(len(batch)):
correlation_id = batch[i]['correlation_id']
reply_to = batch[i]['reply_to']
delivery_tag = batch[i]['delivery_tag']
self.message_send_and_ack(result[i], correlation_id, reply_to, delivery_tag)
status = False
logging.info("batch size大小:"+str(len(result)))
return result
def deliver_message(self, body:bytes, properties:pika.BasicProperties, delivery_tag:str) -> None:
"""
将消息递交给消息管理器管理
:params
body: 传来的数据
properties: 传来消息的属性,用于回调消息
delivery_tag: 用于ack答复
:returns
None
"""
message = {'body': body, 'correlation_id':properties.correlation_id,
'reply_to':properties.reply_to, 'delivery_tag':delivery_tag}
self.safe_append_queue(message=message)
logging.debug("添加一个消息入队")
def safe_append_queue(self, message):
"""
安全的入队
"""
if self.safe_queue is None:
raise Exception("消息队列为空")
if self.safe_queue.full():
logging.warning(self.safe_queue.qsize())
# TODO wait until queue is not full
logging.error("队列长度超出范围")
self.safe_queue.put(message)
def check_channel(self):
"""
检查channel是否可用
"""
if self.channel is None:
return False
return True
class GuardThread(threading.Thread):
"""
守护进程,用于监听消息是否已经超过指定时间,从而分发数据到服务中
"""
def __init__(self, max_waiting_time:float=0.1, safe_queue=None, compute=None,max_batch_size=10):
threading.Thread.__init__(self, daemon=True)
self.safe_queue = safe_queue
self.activate = False # 处理器激活,当有消息来的时候通知消息调度器可以开始处理了,开启一个计时器
self.is_running = False # 算法正在处理
self.max_waiting_time = max_waiting_time # 最大等待时间,单位秒
self.max_batch_size = max_batch_size # 算法最大batch size
self.start_time = time.time() # 第一个算法开始时间
self.compute = compute
def run(self):
"""
开始处理消息
"""
# 监听循环
while(True):
# 判断queue是否有数据,如果没有,就继续loop 开始计时,保证一定时间内一定处理
if self.safe_queue.qsize() == 0:
self.activate = False
if (self.safe_queue.qsize() > 0) and not self.is_running:
# 如果静默状态突然有消息到达,或者运行状态结束队列又存在消息时,开启计时
if(not self.activate):
self.start_time = time.time()
self.activate = True
# 如果到达时间大于最大等待时间
if self.activate and time.time()-self.start_time > self.max_waiting_time:
self.activate = False
logging.info(" 等待时间:"+ str(time.time()-self.start_time))
# TODO self.is_running 无用,因为计算是阻塞式,计算完成之前,不可能进行新计算,考虑到将来多线程拓展,有必要引入此变量
self.compute(self.is_running)
# 如果队列元素超过最大batch size,并且处于非运行状态直接计算
if self.safe_queue.qsize() >= self.max_batch_size and not self.is_running:
self.activate = False
logging.info(" 等待时间:"+ str(time.time()-self.start_time))
self.compute(self.is_running)
# logging.debug("队列大小"+ str(self.safe_queue.qsize()))
time.sleep(0.01) # 每隔10ms检测一次
if __name__ == '__main__':
from base.service import AsrService
logging.basicConfig(level=logging.DEBUG, format='%(levelname) -10s %(asctime)s %(name) -20s %(funcName) -25s %(lineno) -5d: %(message)s')
message_disaptcher = BatchMessageDispatcher(None, max_queue_size=32, max_waiting_time=0.1, service=AsrService())
class Prop:
correlation_id = "correlation_id"
reply_to = "reply_to"
pre_time = time.time()
for i in range(200):
message_disaptcher.deliver_message(bytes("hello"+str(i), encoding='utf-8'), properties=Prop(), delivery_tag="delivery_tag")
sleep_time = float(random.randint(2, 25))/1000
time.sleep(sleep_time)
logging.info("计算总时长为: "+ str(time.time()-pre_time))
| nilq/baby-python | python |
import vcs
import cdms2
import os
x = vcs.init()
f = cdms2.open(os.path.join(vcs.sample_data, 'clt.nc'))
u = f("u")
v = f("v")
V = x.createvector()
V.linecolor = 242
V.scale = 5.
V.type = "arrows"
V.reference = 6.
V.list()
x.plot(u[::2], v[::2], V)
x.png("vectors")
x.interact()
| nilq/baby-python | python |
# OpenWeatherMap API Key
api_key = "Goes here if needed" | nilq/baby-python | python |
# Note that only the currently used fields are shown unless show_all is set to True.
import os
import pandas as pd
import anytree
from anytree.search import find
from anytree.exporter import DotExporter
import collections
PolicyTuple = collections.namedtuple('PolicyTuple','layer_id agg_id calc_rules')
CalcRuleTuple = collections.namedtuple('CalcRuleTuple', 'policytc_id calcrule_id is_step trig_start trig_end')
def load_df(path, required_file=None):
if path:
return pd.read_csv(path)
else:
if required_file:
raise FileNotFoundError(f"Required File does not exist: {required_file}")
else:
return None
def create_fm_tree(fm_programme_df, fm_policytc_df, fm_profile_df, fm_summary_df):
missing_node_link = False
def get_policy_tc(agg_id, level_id):
policytc = fm_policytc_df.loc[
(fm_policytc_df['agg_id'] == agg_id) & (fm_policytc_df['level_id'] == level_id)
]
policy_list = []
for _, policy in policytc.iterrows():
# Find calc_rule
profile = fm_profile_df.loc[fm_profile_df.policytc_id == policy.policytc_id]
calc_rules = []
for _, step in profile.iterrows():
trig_start = step.trigger_start if hasattr(step, 'trigger_start') else 0
trig_end = step.trigger_end if hasattr(step, 'trigger_end') else 0
is_step_rule = (trig_end > 0 or trig_start > 0)
calc_rules.append(CalcRuleTuple(
policytc_id=int(policy.policytc_id),
calcrule_id=int(step.calcrule_id),
is_step=is_step_rule,
trig_start=trig_start,
trig_end=trig_end,
))
policy_list.append(
PolicyTuple(
layer_id=int(policy.layer_id),
agg_id=int(policy.agg_id),
calc_rules=calc_rules,
)
)
return len(policytc), policy_list
level_ids = sorted(list(fm_programme_df.level_id.unique()), reverse=True)
root = anytree.Node('Insured Loss', agg_id=1, level_id=max(level_ids)+1, policy_tc=None)
for level in level_ids:
agg_id_idxs = list(fm_programme_df[fm_programme_df.level_id == level].drop_duplicates(subset=['level_id','to_agg_id'], keep="first").index)
for node_idx in agg_id_idxs:
node_info = fm_programme_df.iloc[node_idx]
layer_max, policy_list = get_policy_tc(node_info.to_agg_id, node_info.level_id)
# Set parent node as root or find based on level/agg ids
if level == max(level_ids):
parent_node = root
else:
try:
matched_id = fm_programme_df.loc[(fm_programme_df.level_id == level+1) & (fm_programme_df.from_agg_id == node_info.to_agg_id)].to_agg_id.item()
parent_node = find(root, filter_=lambda node: node.level_id == level+1 and node.agg_id == matched_id)
except ValueError:
missing_node_link = True
print('Missing node link: agg_id={}, level_id={}'.format(node_info.to_agg_id,level+1))
# Set node names based on attrs in FM files
if level >= 3:
node_name = "policy term {} \nlevel: {}".format(
node_info.to_agg_id,
node_info.level_id
)
elif level == 2:
node_name = "loc term {} ".format(node_info.to_agg_id)
else:
node_name = "cov term {}".format(node_info.to_agg_id)
for policy in policy_list:
node_name += "\n\nlayer_id: {}".format(policy.layer_id)
for rule in policy.calc_rules:
if rule.is_step:
node_name += "\n policytc_id {}: step_rule:{}, start:{} end:{}".format(
rule.policytc_id,
rule.calcrule_id,
rule.trig_start,
rule.trig_end
)
else:
node_name += "\npolicytc_id: {} \ncalc_rule: {}".format(
rule.policytc_id,
rule.calcrule_id,
)
# Create Node in FM tree
node = anytree.Node(
node_name,
agg_id=node_info.to_agg_id,
level_id=level,
parent=parent_node,
layer_max=layer_max,
policy_tc=policy_list,
)
# Add item level data
item_agg_idx = list(fm_summary_df[['agg_id']].drop_duplicates().index)
for item in item_agg_idx:
item_info = fm_summary_df.iloc[item]
matched_id = fm_programme_df.loc[(fm_programme_df.level_id == 1) & (fm_programme_df.from_agg_id == item_info.agg_id)].to_agg_id.item()
parent_node = find(root, filter_=lambda node: node.level_id == 1 and node.agg_id == matched_id)
node_name = "\n".join([
"item {}\n".format(int(item_info.agg_id)),
"locnumber: {}".format(item_info.locnumber),
"accnumber: {}".format(item_info.accnumber),
"polnumber: {}".format(item_info.polnumber),
"portnumber: {}".format(item_info.portnumber),
"cov_type: {}".format(item_info.coverage_type_id),
"peril_id: {}".format(item_info.peril_id),
"tiv: {}".format(item_info.tiv),
])
node = anytree.Node(
node_name,
agg_id=item_info.agg_id,
level_id=0,
parent=parent_node,
locnumber=item_info.locnumber,
accnumber=item_info.accnumber,
polnumber=item_info.polnumber,
portnumber=item_info.polnumber,
tiv=item_info.tiv,
coverage_id=item_info.coverage_id,
coverage_type=item_info.coverage_type_id,
peril_id=item_info.peril_id,
)
return root, missing_node_link
def render_fm_tree(root_node, filename='tree.png'):
# Function to format nodes in FM tree
def format_box(node):
# https://graphviz.org/doc/info/shapes.html
if node.level_id == 0:
# Item Level Node
return "fixedsize=false, shape=rect, fillcolor=lightgrey, style=filled"
else:
if not node.policy_tc:
# Error? missing policy_tc entry for this Node
return "fixedsize=false, shape=ellipse, fillcolor=pink, style=filled"
elif len(node.policy_tc) > 1:
# Node with multiple layers
return "fixedsize=false, shape=rect, fillcolor=orange, style=filled"
else:
# Cov or loc nodes
return "fixedsize=false, shape=ellipse, fillcolor=lightblue, style=filled"
# Function to add weighted 'by layer number' edges
def layered_edge(node, child):
# https://anytree.readthedocs.io/en/latest/tricks/weightededges.html
if hasattr(child, 'layer_max'):
if child.layer_max > 1:
return 'dir=back, style=bold, label=" {} Layers"'.format(child.layer_max)
return "dir=back"
# Render tree to png
dot_data = DotExporter(
root_node,
edgeattrfunc=layered_edge,
nodeattrfunc=format_box)
dot_data.to_picture(filename)
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
import os
from Crypto import Random
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from common import xJiPZbUzlGCIdemowYnQNONypdeudgmd, ckAjUaLEXnferbefRGpQeOZRysoqlffQ
FFVGFOvcuiKjdGKFcTRNoKJcuBaGjGEf = 'b14ce95fa4c33ac2803782d18341869f'
class LVPFsEGShJELnCwtpptaZvXDbVmShyns(Exception):
pass
def NmtIKYiMrjhKpKqWnTKDAJlAKWDTPVIy(OPGQtyHoGVjbssyoAyqrdJvFlGoiAQYp, FRIUnJhVUpQceKKKwrGdGufEFeSRdAAs=AES.block_size):
kWFtuPcTzhXQsxkZcspaKNvhNllUXCxh = (FRIUnJhVUpQceKKKwrGdGufEFeSRdAAs - (len(OPGQtyHoGVjbssyoAyqrdJvFlGoiAQYp) % FRIUnJhVUpQceKKKwrGdGufEFeSRdAAs))
return OPGQtyHoGVjbssyoAyqrdJvFlGoiAQYp + (chr(kWFtuPcTzhXQsxkZcspaKNvhNllUXCxh)*kWFtuPcTzhXQsxkZcspaKNvhNllUXCxh)
def GwAonSsUlHwhDnYTFlqQhOKBVLcSheYV(OPGQtyHoGVjbssyoAyqrdJvFlGoiAQYp):
kWFtuPcTzhXQsxkZcspaKNvhNllUXCxh = OPGQtyHoGVjbssyoAyqrdJvFlGoiAQYp[-1]
if OPGQtyHoGVjbssyoAyqrdJvFlGoiAQYp.endswith(kWFtuPcTzhXQsxkZcspaKNvhNllUXCxh*ord(kWFtuPcTzhXQsxkZcspaKNvhNllUXCxh)):
return OPGQtyHoGVjbssyoAyqrdJvFlGoiAQYp[:-ord(kWFtuPcTzhXQsxkZcspaKNvhNllUXCxh)]
raise LVPFsEGShJELnCwtpptaZvXDbVmShyns("PKCS7 improper padding {}".format(repr(OPGQtyHoGVjbssyoAyqrdJvFlGoiAQYp[-32:])))
def ZHmaXjmaptcjOuQWzIYmNcRFyCaggAdR(sock, server=True, bits=2048):
gFsSukpmrcgWJfrmLhgayqqAVmsbyWUi = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF;
adZcabjFPtBsWTrudsVIVsDshBtgiUsQ = 2
adrCPWbaRPWXrGfZTxfXAtVEFHttGCMF = ckAjUaLEXnferbefRGpQeOZRysoqlffQ(os.urandom(32))
DPSXmPpMUYVABlOJsKMoPltwubSXAvTt = pow(adZcabjFPtBsWTrudsVIVsDshBtgiUsQ, adrCPWbaRPWXrGfZTxfXAtVEFHttGCMF, gFsSukpmrcgWJfrmLhgayqqAVmsbyWUi)
if server:
sock.send(xJiPZbUzlGCIdemowYnQNONypdeudgmd(DPSXmPpMUYVABlOJsKMoPltwubSXAvTt))
b = ckAjUaLEXnferbefRGpQeOZRysoqlffQ(sock.recv(4096))
else:
b = ckAjUaLEXnferbefRGpQeOZRysoqlffQ(sock.recv(4096))
sock.send(xJiPZbUzlGCIdemowYnQNONypdeudgmd(DPSXmPpMUYVABlOJsKMoPltwubSXAvTt))
OPGQtyHoGVjbssyoAyqrdJvFlGoiAQYp = pow(b, adrCPWbaRPWXrGfZTxfXAtVEFHttGCMF, gFsSukpmrcgWJfrmLhgayqqAVmsbyWUi)
return SHA256.new(xJiPZbUzlGCIdemowYnQNONypdeudgmd(OPGQtyHoGVjbssyoAyqrdJvFlGoiAQYp)).digest()
def gcbCoqAgZztElhuzHlCRVsaXiDmrxjeQ(vKpdSqEzIxjyGuerXAIFZMYvfWbEQyYx, KEY):
vKpdSqEzIxjyGuerXAIFZMYvfWbEQyYx = NmtIKYiMrjhKpKqWnTKDAJlAKWDTPVIy(vKpdSqEzIxjyGuerXAIFZMYvfWbEQyYx)
VMJrviwEuDZCQAKsDTVcZeWibgUxuOuU = Random.new().read(AES.block_size)
omtoDzoHpDngJrwwtkYkmrGaWwwGAHVK = AES.new(KEY, AES.MODE_CBC, VMJrviwEuDZCQAKsDTVcZeWibgUxuOuU)
return VMJrviwEuDZCQAKsDTVcZeWibgUxuOuU + omtoDzoHpDngJrwwtkYkmrGaWwwGAHVK.encrypt(vKpdSqEzIxjyGuerXAIFZMYvfWbEQyYx)
def zcqgzrNhhMMrepGKrXzOYcYeaRymVspf(ciphertext, KEY):
VMJrviwEuDZCQAKsDTVcZeWibgUxuOuU = ciphertext[:AES.block_size]
omtoDzoHpDngJrwwtkYkmrGaWwwGAHVK = AES.new(KEY, AES.MODE_CBC, VMJrviwEuDZCQAKsDTVcZeWibgUxuOuU)
vKpdSqEzIxjyGuerXAIFZMYvfWbEQyYx = omtoDzoHpDngJrwwtkYkmrGaWwwGAHVK.decrypt(ciphertext[AES.block_size:])
return GwAonSsUlHwhDnYTFlqQhOKBVLcSheYV(vKpdSqEzIxjyGuerXAIFZMYvfWbEQyYx)
| nilq/baby-python | python |
#!/usr/bin/env python3
import os
import re
import sys
LOWP_SEARCH = "lowp"
MEDIUMP_SEARCH = "mediump"
HIGHP_SEARCH = "highp"
VERTEX_SHADER_EXT = ".vsh.glsl"
FRAG_SHADER_EXT = ".fsh.glsl"
GLES3_PREFIX = "GLES3_"
GLES3_SHADER_PREFIX = "gles3_"
SHADERS_LIB_COMMON_PATTERN = "// Common"
SHADERS_LIB_VS_PATTERN = "// VS"
SHADERS_LIB_FS_PATTERN = "// FS"
SHADERS_LIB_COMMON_INDEX = 0
SHADERS_LIB_VS_INDEX = 1
SHADERS_LIB_FS_INDEX = 2
def format_shader_source_name(shader_file_name):
shader_source_name = shader_file_name
return shader_source_name.replace(".glsl", "").replace(".", "_").upper()
def read_index_file(file_path, programs_order):
gpu_programs = dict()
with open(file_path, 'r') as f:
index = 0
for line in f:
line_parts = line.strip().split()
if len(line_parts) != 3:
print("Incorrect GPU program definition : " + line)
exit(10)
if line_parts[0] != programs_order[index]:
print("Incorrect GPU program order or name : " + line)
exit(11)
vertex_shader = next(f for f in line_parts if f.endswith(VERTEX_SHADER_EXT))
fragment_shader = next(f for f in line_parts if f.endswith(FRAG_SHADER_EXT))
if not vertex_shader:
print("Vertex shader not found in GPU program definition : " + line)
exit(12)
if not fragment_shader:
print("Fragment shader not found in GPU program definition : " + line)
exit(13)
if line_parts[0] in gpu_programs.keys():
print("More than one definition of %s gpu program" % line_parts[0])
exit(14)
gpu_programs[index] = (vertex_shader, fragment_shader, line_parts[0])
index += 1
return gpu_programs
def read_programs_file(file_path):
gpu_programs = []
with open(file_path, 'r') as f:
found = False
for line in f:
if not found and line.find('enum class Program') >= 0:
found = True
continue
if found and line.find('}') >= 0:
break
if found and line.find('{') == -1:
line_parts = re.split(',|=', line)
name = line_parts[0].strip()
if name and name != 'ProgramsCount':
gpu_programs.append(name)
return gpu_programs
def read_shaders_lib_file(file_path):
shaders_library = ['', '', '']
with open(file_path, 'r') as f:
shaders_lib_content = f.read()
if len(shaders_lib_content) == 0:
return shaders_library
common_index = shaders_lib_content.find(SHADERS_LIB_COMMON_PATTERN)
if common_index < 0:
print("Common functions block is not found in " + file_path)
exit(14)
vs_index = shaders_lib_content.find(SHADERS_LIB_VS_PATTERN)
if vs_index < 0:
print("Vertex shaders functions block is not found in " + file_path)
exit(15)
fs_index = shaders_lib_content.find(SHADERS_LIB_FS_PATTERN)
if fs_index < 0:
print("Vertex shaders functions block is not found in " + file_path)
exit(16)
if not (common_index < vs_index < fs_index):
print("Order of functions block is incorrect in " + file_path)
exit(17)
shaders_library[SHADERS_LIB_COMMON_INDEX] = shaders_lib_content[common_index:vs_index - 1]
shaders_library[SHADERS_LIB_VS_INDEX] = shaders_lib_content[vs_index:fs_index - 1]
shaders_library[SHADERS_LIB_FS_INDEX] = shaders_lib_content[fs_index:]
return shaders_library
def generate_shader_indexes(shaders):
return dict((v, k) for k, v in enumerate(shaders))
def write_definition_file(defines_file, generation_dir):
with open(os.path.join(generation_dir, defines_file), 'w') as output_file:
output_file.write("#pragma once\n\n")
output_file.write("#include \"shaders/programs.hpp\"\n")
output_file.write("#include \"shaders/gl_program_info.hpp\"\n\n")
output_file.write("#include \"drape/drape_global.hpp\"\n\n")
output_file.write("namespace gpu\n")
output_file.write("{\n")
output_file.write("extern char const * GL3_SHADER_VERSION;\n")
output_file.write("extern char const * GLES3_SHADER_VERSION;\n\n")
output_file.write("extern GLProgramInfo GetProgramInfo(dp::ApiVersion apiVersion, Program program);\n")
output_file.write("} // namespace gpu\n")
def write_shader_gles_header(output_file):
output_file.write(" #ifdef GL_ES \\n\\\n")
output_file.write(" #ifdef GL_FRAGMENT_PRECISION_HIGH \\n\\\n")
output_file.write(" #define MAXPREC highp \\n\\\n")
output_file.write(" #else \\n\\\n")
output_file.write(" #define MAXPREC mediump \\n\\\n")
output_file.write(" #endif \\n\\\n")
output_file.write(" precision MAXPREC float; \\n\\\n")
output_file.write(" #define LOW_P lowp \\n\\\n")
output_file.write(" #define MEDIUM_P mediump \\n\\\n")
output_file.write(" #define HIGH_P highp \\n\\\n")
output_file.write(" #else \\n\\\n")
output_file.write(" #define LOW_P \\n\\\n")
output_file.write(" #define MEDIUM_P \\n\\\n")
output_file.write(" #define HIGH_P \\n\\\n")
output_file.write(" #endif \\n\\\n")
def get_shaders_lib_content(shader_file, shaders_library):
lib_content = shaders_library[SHADERS_LIB_COMMON_INDEX]
if shader_file.find(VERTEX_SHADER_EXT) >= 0:
lib_content += shaders_library[SHADERS_LIB_VS_INDEX]
elif shader_file.find(FRAG_SHADER_EXT) >= 0:
lib_content += shaders_library[SHADERS_LIB_FS_INDEX]
return lib_content
def write_shader_line(output_file, line, convert_to_gles3, is_fragment_shader):
if line.lstrip().startswith("//") or line == '\n' or len(line) == 0:
return
if line.find(LOWP_SEARCH) >= 0:
print("Incorrect shader. Do not use lowp in shader, use LOW_P instead.")
exit(2)
if line.find(MEDIUMP_SEARCH) >= 0:
print("Incorrect shader. Do not use mediump in shader, use MEDIUM_P instead.")
exit(2)
if line.find(HIGHP_SEARCH) >= 0:
print("Incorrect shader. Do not use highp in shader, use HIGH_P instead.")
exit(2)
output_line = line.rstrip()
if convert_to_gles3:
output_line = output_line.replace("attribute", "in")
if is_fragment_shader:
output_line = output_line.replace("varying", "in")
else:
output_line = output_line.replace("varying", "out")
output_line = output_line.replace("texture2D", "texture")
output_line = output_line.replace("gl_FragColor", "v_FragColor")
output_file.write(" %s \\n\\\n" % output_line)
def write_shader_body(output_file, shader_file, shader_dir, shaders_library, convert_to_gles3):
is_fragment_shader = shader_file.find(FRAG_SHADER_EXT) >= 0
lib_content = get_shaders_lib_content(shader_file, shaders_library)
for line in open(os.path.join(shader_dir, shader_file)):
if line.lstrip().startswith("void main"):
for lib_line in lib_content.splitlines():
write_shader_line(output_file, lib_line, convert_to_gles3, is_fragment_shader)
if convert_to_gles3 and is_fragment_shader:
output_file.write(" out vec4 v_FragColor; \\n\\\n")
write_shader_line(output_file, line, convert_to_gles3, is_fragment_shader)
output_file.write("\";\n\n")
def write_shader(output_file, shader_file, shader_dir, shaders_library):
output_file.write("char const %s[] = \" \\\n" % (format_shader_source_name(shader_file)))
write_shader_gles_header(output_file)
write_shader_body(output_file, shader_file, shader_dir, shaders_library, False)
def write_gles3_shader(output_file, shader_file, shader_dir, shaders_library):
output_file.write("char const %s[] = \" \\\n" % (GLES3_PREFIX + format_shader_source_name(shader_file)))
write_shader_gles_header(output_file)
if os.path.exists(os.path.join(shader_dir, GLES3_SHADER_PREFIX + shader_file)):
write_shader_body(output_file, GLES3_SHADER_PREFIX + shader_file, shader_dir, shaders_library, False)
else:
write_shader_body(output_file, shader_file, shader_dir, shaders_library, True)
def write_gpu_programs_map(file, programs_def, source_prefix):
for program in programs_def.keys():
vertex_shader = programs_def[program][0]
vertex_source_name = source_prefix + format_shader_source_name(vertex_shader)
fragment_shader = programs_def[program][1]
fragment_source_name = source_prefix + format_shader_source_name(fragment_shader)
file.write(" GLProgramInfo(\"%s\", \"%s\", %s, %s),\n" % (
vertex_source_name, fragment_source_name, vertex_source_name, fragment_source_name))
def write_implementation_file(programs_def, shader_index, shader_dir, impl_file, def_file, generation_dir,
shaders_library):
with open(os.path.join(generation_dir, impl_file), 'w') as file:
file.write("#include \"shaders/%s\"\n\n" % (def_file))
file.write("#include \"base/assert.hpp\"\n\n")
file.write("#include \"std/target_os.hpp\"\n\n")
file.write("#include <array>\n\n")
file.write("namespace gpu\n")
file.write("{\n")
file.write("char const * GL3_SHADER_VERSION = \"#version 150 core \\n\";\n")
file.write("char const * GLES3_SHADER_VERSION = \"#version 300 es \\n\";\n\n")
for shader in shader_index.keys():
write_shader(file, shader, shader_dir, shaders_library)
write_gles3_shader(file, shader, shader_dir, shaders_library)
file.write("GLProgramInfo GetProgramInfo(dp::ApiVersion apiVersion, Program program)\n")
file.write("{\n")
file.write(" if (apiVersion == dp::ApiVersion::OpenGLES2)\n")
file.write(" {\n")
file.write(" static std::array<GLProgramInfo, static_cast<size_t>(Program::ProgramsCount)> gpuIndex = {{\n")
write_gpu_programs_map(file, programs_def, '')
file.write(" }};\n")
file.write(" return gpuIndex[static_cast<size_t>(program)];\n")
file.write(" }\n")
file.write(" else if (apiVersion == dp::ApiVersion::OpenGLES3)\n")
file.write(" {\n")
file.write(" static std::array<GLProgramInfo, static_cast<size_t>(Program::ProgramsCount)> gpuIndex = {{\n")
write_gpu_programs_map(file, programs_def, GLES3_PREFIX)
file.write(" }};\n")
file.write(" return gpuIndex[static_cast<size_t>(program)];\n")
file.write(" }\n")
file.write(" CHECK(false, (\"Unsupported API version.\"));\n")
file.write(" return {};\n")
file.write("}\n")
file.write("} // namespace gpu\n")
if __name__ == '__main__':
if len(sys.argv) < 6:
print("Usage : " + sys.argv[0] + " <shader_dir> <index_file> <programs_file> <shaders_lib> <generation_dir> <generated_file>")
exit(1)
shader_dir = sys.argv[1]
index_file_name = sys.argv[2]
programs_file_name = sys.argv[3]
shaders_lib_file = sys.argv[4]
generation_dir = sys.argv[5]
defines_file = sys.argv[6] + ".hpp"
impl_file = sys.argv[6] + ".cpp"
shaders = [file for file in os.listdir(shader_dir) if
os.path.isfile(os.path.join(shader_dir, file)) and (
file.endswith(VERTEX_SHADER_EXT) or file.endswith(FRAG_SHADER_EXT))]
shaderIndex = generate_shader_indexes(shaders)
programs_order = read_programs_file(os.path.join(shader_dir, '..', programs_file_name))
programDefinition = read_index_file(os.path.join(shader_dir, index_file_name), programs_order)
shaders_library = read_shaders_lib_file(os.path.join(shader_dir, shaders_lib_file))
write_definition_file(defines_file, generation_dir)
write_implementation_file(programDefinition, shaderIndex, shader_dir, impl_file, defines_file, generation_dir,
shaders_library)
| nilq/baby-python | python |
#!/usr/bin/env python3.8
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import os
import platform
import sys
def main():
parser = argparse.ArgumentParser()
# TODO(fxbug.dev/5535): make this argument required.
parser.add_argument(
'--reference', help='Path to the golden API file', required=False)
parser.add_argument(
'--manifest', help='Path to the SDK manifest', required=True)
parser.add_argument(
'--updated', help='Path to the API file to compute', required=True)
parser.add_argument(
'--warn',
help='Whether API changes should only cause warnings',
action='store_true')
args = parser.parse_args()
if not args.reference:
# Nothing to do.
with open(args.updated, 'w') as updated_file:
updated_file.write('No API verification for this SDK :/')
return 0
with open(args.manifest, 'r') as manifest_file:
manifest = json.load(manifest_file)
ids = [a['id'] for a in manifest['atoms']]
# Ignore images which are very architecture-dependent.
# TODO(fxbug.dev/5824): remove this exception when obsolete.
ids = [i for i in ids if not (i.startswith('sdk://images'))]
with open(args.updated, 'w') as updated_file:
updated_file.write('\n'.join(ids))
with open(args.reference, 'r') as reference_file:
old_ids = [l.strip() for l in reference_file.readlines()]
# tools/arm64 should not exist on mac hosts
# TODO(fxbug.dev/42999): remove when SDK transition is complete.
if platform.mac_ver()[0]:
old_ids = [i for i in old_ids if not i.startswith('sdk://tools/arm64')]
ids = filter(lambda i: not i.startswith('sdk://fidl/zx'), ids)
old_ids = filter(lambda i: not i.startswith('sdk://fidl/zx'), old_ids)
ids = filter(lambda i: not i.startswith('sdk://fidl/zx'), ids)
old_ids = filter(lambda i: not i.startswith('sdk://fidl/zx'), old_ids)
new_id_set = set(ids)
old_id_set = set(old_ids)
added_ids = new_id_set - old_id_set
removed_ids = old_id_set - new_id_set
if added_ids:
print('Elements added to SDK:')
for id in sorted(added_ids):
print(' - %s' % id)
if removed_ids:
print('Elements removed from SDK:')
for id in sorted(removed_ids):
print(' - %s' % id)
if removed_ids or added_ids:
type = 'Warning' if args.warn else 'Error'
print('%s: SDK contents have changed!' % type)
print('Please acknowledge this change by running:')
print(
' cp ' + os.path.abspath(args.updated) + ' ' +
os.path.abspath(args.reference))
if not args.warn:
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| nilq/baby-python | python |
import filecmp
import os
def exists(path):
return os.path.isfile(path)
class FileApprover(object):
def verify(self, namer, writer, reporter):
base = namer.get_basename()
approved = namer.get_approved_filename(base)
received = namer.get_received_filename(base)
writer.write_received_file(received)
ok = self.verify_files(approved, received, reporter)
if not ok:
return "Approval Mismatch"
return None
def verify_files(self, approved_file, received_file, reporter):
if self.are_files_the_same(approved_file, received_file):
os.remove(received_file)
return True
reporter.report(received_file, approved_file)
return False
@staticmethod
def are_files_the_same(approved_file, received_file):
if not exists(approved_file) or not exists(received_file):
return False
if os.stat(approved_file).st_size != os.stat(received_file).st_size:
return False
else:
return filecmp.cmp(approved_file, received_file)
| nilq/baby-python | python |
import socket
import serial
from config import DEFAULT_VELOCITY
from config import TIME_INTERVAL
import time
import math
import os
class Robot:
def __init__(self, mac: str, color: str, com: str):
if os.name == 'posix':
self.socket = socket.socket(
socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM)
print("Input 1234 code in system bluetooth window")
self.socket.connect((mac, 1))
else:
self.serial = serial.Serial(com)
self.last_update = time.time()
self.color = color
self.user_time = time.time()
self.user_age = 15
self.user_id = 0
def send_speed_command(self, left, right):
cmd = '[={},{}]'.format(left, right)
if os.name == 'posix':
self.socket.send(bytes(cmd, 'UTF-8'))
else:
self.serial.write(bytes(cmd, 'UTF-8'))
self.last_update = time.time()
print(cmd)
def forward(self):
self.send_speed_command(self.velocity(), self.velocity())
def reverse(self):
self.send_speed_command(-self.velocity(), -self.velocity())
def left(self):
self.send_speed_command(0, math.ceil(self.velocity()/2))
def right(self):
self.send_speed_command(math.ceil(self.velocity()/2), 0)
def stop(self):
self.send_speed_command(0, 0)
def is_time_exceeded(self):
return (time.time() - self.last_update) > TIME_INTERVAL
def age(self):
return (time.time() - self.user_time)
def velocity(self):
if self.user_age < 10:
return math.ceil(DEFAULT_VELOCITY/2)
elif self.user_age > 20:
return DEFAULT_VELOCITY
else:
return math.ceil(self.user_age / 20 * DEFAULT_VELOCITY)
| nilq/baby-python | python |
# THIS FILE IS GENERATED FROM KIVY SETUP.PY
__version__ = '1.11.0.dev0'
__hash__ = '9b90467ec9efea3891e07be92c9bb4ba638a7ca0'
__date__ = '20190329'
| nilq/baby-python | python |
# Tara O'Kelly - G00322214
# Emerging Technologies, Year 4, Software Development, GMIT.
# Problem set: Python fundamentals
# 6. Write a function that returns the largest and smallest elements in a list.
user_list = []
# get user input
n = int(input('How many numbers: '))
for x in range(n):
numbers = int(input('Enter number: \n'))
user_list.append(numbers)
# use min and max functions
# https://docs.python.org/3/library/functions.html#max
# https://docs.python.org/3/library/functions.html#min
print("Largest element in the list is :", max(user_list), "\nSmallest element in the list is :", min(user_list)) | nilq/baby-python | python |
#%%
import numpy as np
import pandas as pd
# Load the data
data = pd.read_csv('./input/2021-02-11_REL606_NCM3722_diauxie.csv')
# DO some serious tidying
melted = data.melt('Cycle Nr.')
# Get the time indices
time = melted[melted['Cycle Nr.']=='Time [s]']
time.sort_values(by='variable', inplace=True)
time = time['value'].values
# Get the temperature indices
temp = melted[melted['Cycle Nr.']=='Temp. [°C]']
temp.sort_values(by='variable', inplace=True)
temp = temp['value'].values
# get the well info
dfs = []
_melted = melted[(melted['Cycle Nr.'] != 'Time [s]') &
(melted['Cycle Nr.'] != 'Temp. [°C]')]
for g, d in _melted.groupby(['Cycle Nr.']):
d.sort_values(by='variable', inplace=True)
d['time_s'] = time
d['temp_C'] = temp
d.rename(columns={'Cycle Nr.': 'well',
'value':'od_600nm'}, inplace=True)
d.drop(columns=['variable'], inplace=True)
dfs.append(d)
tidy = pd.concat(dfs, sort=False)
# Add identifier for the strain
tidy['strain'] = 'blank'
tidy['medium'] = 'blank'
for n in range(4, 10):
if n <= 6:
medium = '10 mM glucose + 30 mM acetate'
else:
medium = '0.61 mM glucose + 30 mM acetate'
for letter, strain in zip(['D', 'E'], ['NCM3722', 'REL606']):
tidy.loc[tidy['well'] == f'{letter}{n}', 'strain'] = strain
tidy.loc[tidy['well'] == f'{letter}{n}', 'medium'] = medium
# Add replicate information.
for g, d in tidy.groupby(['strain', 'medium']):
mapper = {w:r + 1 for r, w in enumerate(d['well'].unique())}
for k, v in mapper.items():
tidy.loc[tidy['well']==k, 'replicate'] = v
tidy['replicate'] = tidy['replicate'].astype(int)
# Save the tidy dataframe to disk for further processing
tidy.to_csv('./output/2021-02-11_NCM_REL_diauxie_tidy.csv', index=False)
# %%
# %%
| nilq/baby-python | python |
"""Library for CIM sparql queries"""
__version__ = "1.9.0"
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
"""
Script to make test
"""
from indeed import params
def test_indeed_params():
assert params('my_username', 'my_password') == ('my_username', 'my_password')
assert params('your_username', 'your_password') == ('your_username', 'your_password')
| nilq/baby-python | python |
#
# Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved.
#
# Licensed under the MIT License. See the LICENSE accompanying this file
# for the specific language governing permissions and limitations under
# the License.
#
import mount_efs
import os
import pytest
from datetime import datetime
from mock import MagicMock
try:
import ConfigParser
except ImportError:
from configparser import ConfigParser
FS_ID = 'fs-deadbeef'
AP_ID = 'fsap-fedcba9876543210'
REGION = 'us-east-1'
COMMON_NAME = 'fs-deadbeef.efs.us-east-1.amazonaws.com'
MOUNT_NAME = 'fs-deadbeef.mount.dir.12345'
ACCESS_KEY_ID_VAL = 'FAKE_AWS_ACCESS_KEY_ID'
SECRET_ACCESS_KEY_VAL = 'FAKE_AWS_SECRET_ACCESS_KEY'
SESSION_TOKEN_VAL = 'FAKE_SESSION_TOKEN'
CREDENTIALS = {
'AccessKeyId': ACCESS_KEY_ID_VAL,
'SecretAccessKey': SECRET_ACCESS_KEY_VAL,
'Token': SESSION_TOKEN_VAL
}
FIXED_DT = datetime(2000, 1, 1, 12, 0, 0)
@pytest.fixture(autouse=True)
def setup_method(mocker):
mocker.patch('mount_efs.get_region', return_value=REGION)
mocker.patch('mount_efs.get_region_helper', return_value=REGION)
mocker.patch('mount_efs.get_aws_security_credentials', return_value=CREDENTIALS)
mocker.patch('mount_efs.get_utc_now', return_value=FIXED_DT)
mocker.patch('socket.gethostbyname')
def _get_config():
try:
config = ConfigParser.SafeConfigParser()
except AttributeError:
config = ConfigParser()
config.add_section(mount_efs.CONFIG_SECTION)
config.set(mount_efs.CONFIG_SECTION, 'state_file_dir_mode', '750')
config.set(mount_efs.CONFIG_SECTION, 'dns_name_format', '{fs_id}.efs.{region}.amazonaws.com')
return config
def _get_ca_conf_body(config_path, common_name, directory, private_key, date, region, fs_id, iam, ap_id):
ca_conf_str = mount_efs.create_ca_conf(config_path, common_name, directory, private_key, date, region, fs_id, iam, ap_id)
return ca_conf_str
def _get_mock_config(dns_name_format='{fs_id}.efs.{region}.amazonaws.com'):
def config_get_side_effect(section, field):
if section == mount_efs.CONFIG_SECTION and field == 'state_file_dir_mode':
return '0755'
elif section == mount_efs.CONFIG_SECTION and field == 'dns_name_format':
return dns_name_format
else:
raise ValueError('Unexpected arguments')
mock_config = MagicMock()
mock_config.get.side_effect = config_get_side_effect
return mock_config
def _get_mock_private_key_path(mocker, tmpdir):
pk_path = os.path.join(str(tmpdir), 'privateKey.pem')
mocker.patch('mount_efs.get_private_key_path', return_value=pk_path)
return pk_path
def test_certificate_without_iam_with_ap_id(mocker, tmpdir):
config = _get_mock_config()
pk_path = _get_mock_private_key_path(mocker, tmpdir)
tls_dict = mount_efs.tls_paths_dictionary(MOUNT_NAME, str(tmpdir))
tmp_config_path = os.path.join(str(tmpdir), MOUNT_NAME, 'tmpConfig')
mount_efs.create_certificate(config, MOUNT_NAME, COMMON_NAME, REGION, FS_ID, False, ap_id=AP_ID, base_path=str(tmpdir))
with open(os.path.join(tls_dict['mount_dir'], 'config.conf')) as f:
conf_body = f.read()
assert conf_body == _get_ca_conf_body(tmp_config_path, COMMON_NAME, tls_dict['mount_dir'], pk_path, FIXED_DT, REGION,
FS_ID, False, AP_ID)
assert os.path.exists(pk_path)
assert not os.path.exists(os.path.join(tls_dict['mount_dir'], 'publicKey.pem'))
assert os.path.exists(os.path.join(tls_dict['mount_dir'], 'request.csr'))
assert os.path.exists(os.path.join(tls_dict['mount_dir'], 'certificate.pem'))
def test_certificate_with_iam_with_ap_id(mocker, tmpdir):
config = _get_mock_config()
pk_path = _get_mock_private_key_path(mocker, tmpdir)
tls_dict = mount_efs.tls_paths_dictionary(MOUNT_NAME, str(tmpdir))
tmp_config_path = os.path.join(str(tmpdir), MOUNT_NAME, 'tmpConfig')
mount_efs.create_certificate(config, MOUNT_NAME, COMMON_NAME, REGION, FS_ID, True, ap_id=AP_ID, base_path=str(tmpdir))
with open(os.path.join(tls_dict['mount_dir'], 'config.conf')) as f:
conf_body = f.read()
assert conf_body == _get_ca_conf_body(tmp_config_path, COMMON_NAME, tls_dict['mount_dir'], pk_path, FIXED_DT, REGION,
FS_ID, True, AP_ID)
assert os.path.exists(pk_path)
assert os.path.exists(os.path.join(tls_dict['mount_dir'], 'publicKey.pem'))
assert os.path.exists(os.path.join(tls_dict['mount_dir'], 'request.csr'))
assert os.path.exists(os.path.join(tls_dict['mount_dir'], 'certificate.pem'))
def test_certificate_with_iam_without_ap_id(mocker, tmpdir):
config = _get_mock_config()
pk_path = _get_mock_private_key_path(mocker, tmpdir)
tls_dict = mount_efs.tls_paths_dictionary(MOUNT_NAME, str(tmpdir))
tmp_config_path = os.path.join(str(tmpdir), MOUNT_NAME, 'tmpConfig')
mount_efs.create_certificate(config, MOUNT_NAME, COMMON_NAME, REGION, FS_ID, True, ap_id=None, base_path=str(tmpdir))
with open(os.path.join(tls_dict['mount_dir'], 'config.conf')) as f:
conf_body = f.read()
assert conf_body == _get_ca_conf_body(tmp_config_path, COMMON_NAME, tls_dict['mount_dir'], pk_path, FIXED_DT, REGION,
FS_ID, True, None)
assert os.path.exists(pk_path)
assert os.path.exists(os.path.join(tls_dict['mount_dir'], 'publicKey.pem'))
assert os.path.exists(os.path.join(tls_dict['mount_dir'], 'request.csr'))
assert os.path.exists(os.path.join(tls_dict['mount_dir'], 'certificate.pem'))
def test_create_ca_supporting_dirs(tmpdir):
config = _get_config()
tls_dict = mount_efs.tls_paths_dictionary(MOUNT_NAME, str(tmpdir))
mount_efs.ca_dirs_check(config, tls_dict['database_dir'], tls_dict['certs_dir'])
assert os.path.exists(tls_dict['database_dir'])
assert os.path.exists(tls_dict['certs_dir'])
def test_create_ca_supporting_files(tmpdir):
config = _get_config()
tls_dict = mount_efs.tls_paths_dictionary(MOUNT_NAME, str(tmpdir))
index = tls_dict['index']
index_attr = tls_dict['index_attr']
serial = tls_dict['serial']
rand = tls_dict['rand']
mount_efs.ca_dirs_check(config, tls_dict['database_dir'], tls_dict['certs_dir'])
mount_efs.ca_supporting_files_check(index, index_attr, serial, rand)
with open(index_attr, 'r') as index_attr_file:
index_attr_content = index_attr_file.read()
with open(serial, 'r') as serial_file:
serial_content = serial_file.read()
assert os.path.exists(index)
assert os.path.exists(index_attr)
assert os.path.exists(serial)
assert os.path.exists(rand)
assert 'unique_subject = no' == index_attr_content
assert '00' == serial_content
def test_create_canonical_request_without_token():
public_key_hash = 'fake_public_key_hash'
canonical_request_out = mount_efs.create_canonical_request(public_key_hash, FIXED_DT, ACCESS_KEY_ID_VAL, REGION, FS_ID)
assert 'GET\n/\nAction=Connect&PublicKeyHash=fake_public_key_hash&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=' \
'FAKE_AWS_ACCESS_KEY_ID%2F20000101%2Fus-east-1%2Felasticfilesystem%2Faws4_request&X-Amz-Date=20000101T120000Z&' \
'X-Amz-Expires=86400&X-Amz-SignedHeaders=host\nhost:fs-deadbeef\nhost\n' \
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' == canonical_request_out
def test_create_canonical_request_with_token(mocker):
mocker.patch('mount_efs.get_utc_now', return_value=FIXED_DT)
public_key_hash = 'fake_public_key_hash'
canonical_request_out = mount_efs.create_canonical_request(public_key_hash, FIXED_DT, ACCESS_KEY_ID_VAL, REGION, FS_ID,
SESSION_TOKEN_VAL)
assert 'GET\n/\nAction=Connect&PublicKeyHash=fake_public_key_hash&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=' \
'FAKE_AWS_ACCESS_KEY_ID%2F20000101%2Fus-east-1%2Felasticfilesystem%2Faws4_request&X-Amz-Date=20000101T120000Z&' \
'X-Amz-Expires=86400&X-Amz-Security-Token=FAKE_SESSION_TOKEN&X-Amz-SignedHeaders=host\nhost:fs-deadbeef\nhost' \
'\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' == canonical_request_out
def test_get_public_key_sha1(tmpdir):
fake_public_key_filename = 'fake_public_key.pem'
fake_public_key_path = os.path.join(str(tmpdir), fake_public_key_filename)
public_key_body = '-----BEGIN PUBLIC KEY-----\nMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEArGJgJTTwefL+jHV8A9EM\npX56n3Z' \
'JczM+4iPPSnledJzBcUO1VF+j6TOzy39BWBtvRjSs0nqd5wqw+1xHawhh\ndJF5KsqMNGcP/y9fLi9Bm1vInHfQVan4NhXWh8S' \
'NbRZM1tNZV5/k+VnFur6ACHwq\neWppGXkGBASL0zG0MiCbOVMkwfv/E69APVC6ljnPXBWaDuggAClYheTv5RIU4wD1\nc1nohR' \
'b0ZHyfZjELjnqLfY0eOqY+msQXzP0eUmZXCMvUkGxi5DJnNVKhw5y96QbB\nRFO5ImQXpNsQmp8F9Ih1RIxNsl4csaEuK+/Zo' \
'J68vR47oQNtPp1PjdIwcnQ3cOvO\nHMxulMX21Fd/e9TsnqISOTOyebmYFgaHczg4JVu5lV699+7QWJm1a7M4ab0WgVVR\nz27J0' \
'Lx/691MZB4TbGoEIFza30/sk6uTPxAzebzCaroXzT7uA6TIRtRpxt4X9a+4\n6GhfgR5RJfFMb8rPGmaKWqA2YkTsZzRGHhbAzs' \
'J/nEstAgMBAAE=\n-----END PUBLIC KEY-----'
tmpdir.join(fake_public_key_filename).write(public_key_body)
sha1_result = mount_efs.get_public_key_sha1(fake_public_key_path)
assert sha1_result == 'd9c2a68f2c4de49982e310d95e539a89abd6bc13'
def test_create_string_to_sign():
canonical_request = 'canonical_request'
string_to_sign_output = mount_efs.create_string_to_sign(canonical_request, FIXED_DT, REGION)
assert 'AWS4-HMAC-SHA256\n20000101T120000Z\n20000101/us-east-1/elasticfilesystem/aws4_request\n' \
'572b1e335109068b81e4def81524c5fe5d0e385143b5656cbf2f7c88e5c1a51e' == string_to_sign_output
def test_calculate_signature():
string_to_sign = 'string_to_sign'
signature_output = mount_efs.calculate_signature(string_to_sign, FIXED_DT, SECRET_ACCESS_KEY_VAL, REGION)
assert '6aa643803d4a1b07c5ac87bff96347ef28dab1cb5a5c5d63969c90ca11454c4a' == signature_output
| nilq/baby-python | python |
from chibi.units.base import Unit
from unittest import TestCase
class Test_unit( TestCase ):
def setUp( self ):
self.unit = Unit( 10 )
def test_should_print_the_value_when_is_str( self ):
self.assertIn( '10', str( self.unit ) )
def test_when_add_a_int_should_work( self ):
r = 10 + self.unit
self.assertEqual( r.value, 20 )
r = self.unit + 10
self.assertEqual( r.value, 20 )
def test_when_add_a_float_should_work( self ):
r = 10.10 + self.unit
self.assertEqual( r.value, 20.1 )
r = self.unit + 10.1
self.assertEqual( r.value, 20.1 )
def test_when_sub_a_int_should_work( self ):
r = 10 - self.unit
self.assertEqual( r.value, 0 )
r = self.unit - 10
self.assertEqual( r.value, 0 )
def test_when_sub_a_float_should_work( self ):
r = 10.10 - self.unit
self.assertAlmostEqual( r.value, -0.1, delta=0.01 )
r = self.unit - 10.10
self.assertAlmostEqual( r.value, -0.1, delta=0.01 )
def test_when_mul_a_int_should_work( self ):
r = 10 * self.unit
self.assertEqual( r.value, 100 )
r = self.unit * 10
self.assertEqual( r.value, 100 )
def test_when_mul_a_float_should_work( self ):
r = 10.1 * self.unit
self.assertEqual( r.value, 101.0 )
r = self.unit * 10.1
self.assertEqual( r.value, 101.0 )
def test_when_div_a_int_should_work( self ):
r = 10 / self.unit
self.assertEqual( r.value, 1 )
r = self.unit / 10
self.assertEqual( r.value, 1 )
def test_when_div_a_float_should_work( self ):
r = 10.10 / self.unit
self.assertAlmostEqual( r.value, 0.99, delta=0.001 )
r = self.unit / 10.10
self.assertAlmostEqual( r.value, 0.99, delta=0.001 )
def test_when_div_int_a_int_should_work( self ):
r = 10 // self.unit
self.assertEqual( r.value, 1 )
r = self.unit // 10
self.assertEqual( r.value, 1 )
def test_when_div_int_a_float_should_work( self ):
r = 10.10 // self.unit
self.assertEqual( r.value, 0 )
r = self.unit // 10.10
self.assertEqual( r.value, 0 )
def test_when_pow_a_int_should_work( self ):
r = 10 ** self.unit
self.assertEqual( r.value, 10000000000 )
r = self.unit ** 10
self.assertEqual( r.value, 10000000000 )
def test_when_pow_float_a_float_should_work( self ):
r = 10.10 ** self.unit
self.assertEqual( r.value, 12589254117.941662 )
r = self.unit ** 10.10
self.assertEqual( r.value, 12589254117.941662 )
| nilq/baby-python | python |
###################################################################################################################
# Uses a trained network to predict the class for an input image
# Notes - Run train.py first before this script
# Basic usage: python predict.py /path/to/image checkpoint
# Options:
# Return top KK most likely classes: python predict.py input checkpoint --top_k 3
# Use a mapping of categories to real names: python predict.py input checkpoint --category_names cat_to_name.json
# Use GPU for inference: python predict.py input checkpoint --gpu
# Typical run: python predict.py --gpu --category_names cat_to_name.json --top_k 3 check_point.pt
#####################################################################################################################
###########################################
# Get the arguments from the command line
###########################################
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('image_checkpoint', metavar='image_checkpoint',
help='/path/to/image_checkpoint')
parser.add_argument('--category_names', action="store",
dest="category_names", default='cat_to_name.json',
help='a mapping of categories to real names ')
parser.add_argument('--top_k', metavar='top_k',
default=3, type=int,
help='top KK most likely classes (default: 3)')
parser.add_argument('--gpu', dest='use_gpu', action="store_true",
default=False,
help='Use GPU for training (default: True)')
parser.add_argument('--version', action='version', version='%(prog)s 1.0 There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.') # Decided to pull some wording from GCC
parser.add_argument('--load_dir', action="store",
dest="load_dir", default='./',
help='directory_to_saved_checkpoints')
parser.add_argument('--test_image_dir', action="store",
dest="test_image_dir", default='./flowers/test/10',
help='directory location to image used to test prediction')
parser.add_argument('--test_image', action="store",
dest="test_image", default='image_07104.jpg',
help='Image file used to test prediction')
args = parser.parse_args()
### DEBUG ###
print(vars(args))
print(args.use_gpu)
#########################
# Various Python imports
#########################
import os
import sys
import numpy as np
import torch
import time
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
#%matplotlib inline
from torch import nn
from torch import optim
import torch.nn.functional as F
import torchvision.models as models
from PIL import Image
import json
from matplotlib.ticker import FormatStrFormatter
from collections import OrderedDict
image_checkpoint = args.image_checkpoint
category_names = args.category_names
top_k = args.top_k
use_gpu = args.use_gpu
load_dir = args.load_dir
test_image_dir = args.test_image_dir
test_image = args.test_image
device = torch.device('cuda' if torch.cuda.is_available() and use_gpu else 'cpu')
if use_gpu:
#############################
# Check if CUDA is available
#############################
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('CUDA is not available. Exiting ...')
sys.exit()
else:
print('CUDA is available! Training on GPU ...')
### DEBUG ###
#print('Passed GPU Check')
##########################
# Possible models to use
##########################
structures = {"densenet121" : 1024,
"alexnet" : 9216}
def model_setup(structure='densenet121',dropout=0.5, hidden_layer1 = 120,lr = 0.001):
#def model_setup(structure='densenet121',dropout=0.5, hidden_layer1 = 512,lr = 0.01):
### DEBUG ###
#print('Model Setup Function...')
if structure == 'densenet121':
model = models.densenet121(pretrained=True)
elif structure == 'alexnet':
model = models.alexnet(pretrained = True)
else:
print("Im sorry but {} is not a valid model. Did you mean densenet121 or alexnet?".format(structure))
sys.exit()
classifier = nn.Sequential(OrderedDict([
('dropout',nn.Dropout(dropout)),
('inputs', nn.Linear(structures[structure], hidden_layer1)),
('relu1', nn.ReLU()),
('hidden_layer1', nn.Linear(hidden_layer1, 90)),
('relu2',nn.ReLU()),
('hidden_layer2',nn.Linear(90,80)),
('relu3',nn.ReLU()),
('hidden_layer3',nn.Linear(80,102)),
('output', nn.LogSoftmax(dim=1))
]))
for param in model.parameters():
param.requires_grad = False
model.classifier = classifier
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr )# Observe that all parameters are being optimized
if use_gpu:
model.cuda()
return model, optimizer, criterion, structure
####################################################################
# Loads a checkpoint and rebuilds the model
####################################################################
def load_model(path='./',file_name='check_point.pt'):
### DEBUG ###
#print('Load Model Function...')
checkpoint = torch.load((path + file_name))
structure = checkpoint['structure']
hidden_layer1 = checkpoint['hidden_layer1']
model,_,_,_ = model_setup(structure , 0.5,hidden_layer1)
model.class_to_idx = checkpoint['class_to_idx']
model.load_state_dict(checkpoint['state_dict'])
### DEBUG ###
#print('Exiting Load Model Function...')
return model
#############
# Load Model
#############
model2 = load_model(path=load_dir,file_name=image_checkpoint)
### DEBUG ###
#print(model2)
#print(model2.state_dict())
###########################
# Label mapping for DEBUG
###########################
with open(category_names, 'r') as f:
cat_to_name = json.load(f)
#######################
# Image Preprocessing
#######################
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
### DEBUG ###
#print('Image Preprocessing Function...')
img_pil = Image.open(image)
adjustments = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
img_tensor = adjustments(img_pil)
return img_tensor
####################
# Class Prediction
####################
#model.class_to_idx =train_data.class_to_idx
### DEBUG ###
#print('Pre Class Prediction')
ctx = model2.class_to_idx
#use_gpu = True
def predict(image_path, model, topk=5):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
# if use_gpu:
# model.to('cuda:0')
# else:
# model.to('cpu')
model.to(device)
img_torch = process_image(image_path)
img_torch = img_torch.unsqueeze_(0)
img_torch = img_torch.float()
with torch.no_grad():
if use_gpu:
output = model.forward(img_torch.cuda())
else:
output = model.forward(img_torch)
probability = F.softmax(output.data,dim=1)
############################
# Pulled from check_sanity()
############################
probabilities = probability.topk(topk)
#b = [cat_to_name[str(index + 1)] for index in np.array(probabilities[1][0])]
idx_to_class = {val: key for key, val in model.class_to_idx.items()}
b = [cat_to_name[idx_to_class[index]] for index in np.array(probabilities[1][0])]
print(b)
return probability.topk(topk)
# Implement the code to predict the class from an image file
####################
# Get an test image
####################
#data_dir = 'flowers'
#img = (data_dir + '/test' + '/10/' + 'image_07104.jpg')
img = os.path.join(test_image_dir,test_image)
val1, val2 = predict(img, model2, top_k)
print(val1)
print(val2)
| nilq/baby-python | python |
from datetime import datetime
import hashlib
import uuid
from google.cloud import firestore
SONG_TEMPLATE = '{verse}\n\n{pre_chorus}\n\n{chorus}\n\n{pre_chorus}\n\n{chorus}\n\n{bridge}'
class Song:
collection_name = 'songs'
def __init__(self, id, chorus_id=None, pre_chorus_id=None, verse_id=None,
bridge_id=None, created=None, modified=None):
now = datetime.utcnow()
self.db = firestore.Client()
self.id = id
self.chorus_id = chorus_id
self.pre_chorus_id = pre_chorus_id
self.verse_id = verse_id
self.bridge_id = bridge_id
self.created = created or now
self.modified = modified or now
@classmethod
def get_id(cls, chorus, pre_chorus, verse, bridge):
id_base = f'{chorus}|{pre_chorus}|{verse}|{bridge}'
hasher = hashlib.sha1()
hasher.update(id_base.encode('utf8'))
song_id = hasher.hexdigest()[:7]
return song_id
@classmethod
def get(cls, song_id):
db = firestore.Client()
results = [
item
for item in db.collection(cls.collection_name).where('id', '==', song_id).stream()
]
if results:
return cls(song_id).populate(**(results[0].to_dict()))
else:
return None
@classmethod
def get_all(cls):
db = firestore.Client()
return [
cls(item.id).populate(**item.to_dict())
for item in db.collection(cls.collection_name).stream()
]
def populate(self, **kwargs):
for key, val in kwargs.items():
if hasattr(self, key):
setattr(self, key, val)
return self
def save(self):
doc_ref = self.db.collection(self.collection_name).document(self.id)
doc_ref.set({
'id': self.id,
'chorus_id': self.chorus_id,
'pre_chorus_id': self.pre_chorus_id,
'verse_id': self.verse_id,
'bridge_id': self.bridge_id,
'created': self.created,
'modified': datetime.utcnow(),
})
| nilq/baby-python | python |
"""This module contains various decorators.
There are two kinds of decorators defined in this module which consists of either two or
three nested functions. The former are decorators without and the latter with arguments.
For more information on decorators, see this `guide`_ on https://realpython.com which
provides a comprehensive overview.
.. _guide:
https://realpython.com/primer-on-python-decorators/
"""
import functools
import warnings
from typing import NamedTuple
import numpy as np
import pandas as pd
from estimagic.exceptions import get_traceback
from estimagic.parameters.process_constraints import process_constraints
from estimagic.parameters.reparametrize import reparametrize_from_internal
def numpy_interface(func=None, *, params=None, constraints=None, numpy_output=False):
"""Convert x to params.
This decorated function receives a NumPy array of parameters and converts it to a
:class:`pandas.DataFrame` which can be handled by the user's criterion function.
For convenience, the decorated function can also be called directly with a
params DataFrame. In that case, the decorator does nothing.
Args:
func (callable): The function to which the decorator is applied.
params (pandas.DataFrame): See :ref:`params`.
constraints (list of dict): Contains constraints.
numpy_output (bool): Whether pandas objects in the output should also be
converted to numpy arrays.
Returns:
callable
"""
constraints = [] if constraints is None else constraints
pc, pp = process_constraints(constraints, params)
fixed_values = pp["_internal_fixed_value"].to_numpy()
pre_replacements = pp["_pre_replacements"].to_numpy().astype(int)
post_replacements = pp["_post_replacements"].to_numpy().astype(int)
def decorator_numpy_interface(func):
@functools.wraps(func)
def wrapper_numpy_interface(x, *args, **kwargs):
if isinstance(x, pd.DataFrame):
p = x
elif isinstance(x, np.ndarray):
p = reparametrize_from_internal(
internal=x,
fixed_values=fixed_values,
pre_replacements=pre_replacements,
processed_constraints=pc,
post_replacements=post_replacements,
params=params,
return_numpy=False,
)
else:
raise ValueError(
"x must be a numpy array or DataFrame with 'value' column."
)
criterion_value = func(p, *args, **kwargs)
if isinstance(criterion_value, (pd.DataFrame, pd.Series)) and numpy_output:
criterion_value = criterion_value.to_numpy()
return criterion_value
return wrapper_numpy_interface
if callable(func):
return decorator_numpy_interface(func)
else:
return decorator_numpy_interface
def catch(
func=None,
*,
exception=Exception,
exclude=(KeyboardInterrupt, SystemExit),
onerror=None,
default=None,
warn=True,
reraise=False,
):
"""Catch and handle exceptions.
This decorator can be used with and without additional arguments.
Args:
exception (Exception or tuple): One or several exceptions that
are caught and handled. By default all Exceptions are
caught and handled.
exclude (Exception or tuple): One or several exceptionts that
are not caught. By default those are KeyboardInterrupt and
SystemExit.
onerror (None or Callable): Callable that takes an Exception
as only argument. This is called when an exception occurs.
default: Value that is returned when as the output of func when
an exception occurs. Can be one of the following:
- a constant
- "__traceback__", in this case a string with a traceback is returned.
- callable with the same signature as func.
warn (bool): If True, the exception is converted to a warning.
reraise (bool): If True, the exception is raised after handling it.
"""
def decorator_catch(func):
@functools.wraps(func)
def wrapper_catch(*args, **kwargs):
try:
res = func(*args, **kwargs)
except exclude:
raise
except exception as e:
if onerror is not None:
onerror(e)
if reraise:
raise e
tb = get_traceback()
if warn:
msg = f"The following exception was caught:\n\n{tb}"
warnings.warn(msg)
if default == "__traceback__":
res = tb
elif callable(default):
res = default(*args, **kwargs)
else:
res = default
return res
return wrapper_catch
if callable(func):
return decorator_catch(func)
else:
return decorator_catch
def unpack(func=None, symbol=None):
def decorator_unpack(func):
if symbol is None:
@functools.wraps(func)
def wrapper_unpack(arg):
return func(arg)
elif symbol == "*":
@functools.wraps(func)
def wrapper_unpack(arg):
return func(*arg)
elif symbol == "**":
@functools.wraps(func)
def wrapper_unpack(arg):
return func(**arg)
return wrapper_unpack
if callable(func):
return decorator_unpack(func)
else:
return decorator_unpack
def switch_sign(func):
"""Switch sign of all outputs of a function."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
unswitched = func(*args, **kwargs)
if isinstance(unswitched, dict):
switched = {key: -val for key, val in unswitched.items()}
elif isinstance(unswitched, (tuple, list)):
switched = []
for entry in unswitched:
if isinstance(entry, dict):
switched.append({key: -val for key, val in entry.items()})
else:
switched.append(-entry)
if isinstance(unswitched, tuple):
switched = tuple(switched)
else:
switched = -unswitched
return switched
return wrapper
class AlgoInfo(NamedTuple):
primary_criterion_entry: str
name: str
parallelizes: bool
disable_cache: bool
needs_scaling: bool
is_available: bool
def mark_minimizer(
func=None,
*,
primary_criterion_entry="value",
name=None,
parallelizes=False,
disable_cache=False,
needs_scaling=False,
is_available=True,
):
"""Decorator to mark a function as internal estimagic minimizer and add information.
Args:
func (callable): The function to be decorated
primary_criterion_entry (str): One of "value", "contributions",
"root_contributions" or "dict". Default: "value". This decides
which part of the output of the user provided criterion function
is needed by the internal optimizer.
name (str): The name of the internal algorithm.
parallelizes (bool): Must be True if an algorithm evaluates the criterion,
derivative or criterion_and_derivative in parallel.
disable_cache (bool): If True, no caching for the criterion function
or its derivatives are used.
needs_scaling (bool): Must be True if the algorithm is not reasonable
independent of the scaling of the parameters.
is_available (bool): Whether the algorithm is available. This is needed for
algorithms that require optional dependencies.
"""
if name is None:
raise TypeError(
"mark_minimizer() missing 1 required keyword-only argument: 'name'"
)
elif not isinstance(name, str):
raise TypeError("name must be a string.")
valid_entries = ["value", "dict", "contributions", "root_contributions"]
if primary_criterion_entry not in valid_entries:
raise ValueError(
f"primary_criterion_entry must be one of {valid_entries} not "
f"{primary_criterion_entry}."
)
if not isinstance(parallelizes, bool):
raise TypeError("parallelizes must be a bool.")
if not isinstance(disable_cache, bool):
raise TypeError("disable_cache must be a bool.")
if not isinstance(needs_scaling, bool):
raise TypeError("needs_scaling must be a bool.")
if not isinstance(is_available, bool):
raise TypeError("is_available must be a bool.")
algo_info = AlgoInfo(
primary_criterion_entry=primary_criterion_entry,
name=name,
parallelizes=parallelizes,
disable_cache=disable_cache,
needs_scaling=needs_scaling,
is_available=is_available,
)
def decorator_mark_minimizer(func):
@functools.wraps(func)
def wrapper_mark_minimizer(*args, **kwargs):
return func(*args, **kwargs)
wrapper_mark_minimizer._algorithm_info = algo_info
return wrapper_mark_minimizer
if callable(func):
return decorator_mark_minimizer(func)
else:
return decorator_mark_minimizer
| nilq/baby-python | python |
from typing import Tuple, List
import pytest
from predicates.state import State
from predicates import guards, actions
from predicates.guards import AlwaysTrue, AlwaysFalse
from model.model import the_model, Model
from model.operation import Operation, Transition
from planner.plan import plan
# ---------------------------------------------------------------------------
# ...
# ---------------------------------------------------------------------------
g = guards.from_str
a = actions.from_str
def test_simple_planner_1():
"""
This test checks the implementation of the planner with a simple model
"""
initial_state = State(
v1 = False,
v2 = 0
)
o1 = Operation(
name=f"o1",
# enabled when v1 is false
precondition=Transition("pre", g("!v1"), ()),
# the guard of the postcondition is only used when running the operation, not when planning
postcondition=Transition("post", AlwaysTrue, a(f"v1")),
# the effects are only used when planning to simulate changes of sensors
effects=(),
)
o2 = Operation(
name=f"o2",
precondition=Transition("pre", g("v1 && v2 == 0"), ()),
postcondition=Transition("post", AlwaysTrue, a(f"v2 += 1")),
effects=(),
)
o3 = Operation(
name=f"o3",
precondition=Transition("pre", g("v1 && v2 == 0"), ()),
postcondition=Transition("post", AlwaysTrue, a(f"v2 += 2")),
effects=(),
)
o4 = Operation(
name=f"o4",
precondition=Transition("pre", g("v1 && v2 == 2"), ()),
postcondition=Transition("post", AlwaysTrue, a(f"v2 += 1")),
effects=(),
)
o5 = Operation(
name=f"o5",
precondition=Transition("pre", g("v1"), ()),
postcondition=Transition("post", AlwaysTrue, a(f"v2 <- 0")),
effects=(),
)
simple_model = Model(initial_state, {
o1.name: o1,
o2.name: o2,
o3.name: o3,
o4.name: o4,
o5.name: o5,
})
goal = g("v2 == 3")
p = plan(initial_state, goal, simple_model)
assert p != None
assert len(p) != 0
assert p == [o1.name, o3.name, o4.name]
goal = g("v2 == 1")
p = plan(initial_state, goal, simple_model)
assert p == [o1.name, o2.name]
def test_simple_planner_2():
"""
This test checks the implementation of the planner with a simple model
"""
initial_state = State(
v1 = False,
v2 = 0
)
ops = {}
for i in range(100):
ops[f"o{i}"] = Operation(
name=f"o{i}",
# enabled when v1 is false
precondition=Transition("pre", g("!v1"), ()),
# the guard of the postcondition is only used when running the operation, not when planning
postcondition=Transition("post", AlwaysTrue, a(f"v1")),
# the effects are only used when planning to simulate changes of sensors
effects=(),
)
ops["final"] = Operation(
name=f"final",
precondition=Transition("pre", g("v1 && v2 == 0"), ()),
postcondition=Transition("post", AlwaysTrue, a(f"v2 += 1")),
effects=(),
)
model = Model(initial_state, ops)
goal = g("v2 == 1")
p = plan(initial_state, goal, model)
print(p)
assert p != None
assert len(p) == 2
assert p[1] == "final"
def test_simple_planner_3():
"""
This test checks the implementation of the planner with a simple model
"""
initial_state = State(
v1 = False,
v2 = 0
)
ops = {}
for i in range(100):
ops[f"o{i}"] = Operation(
name=f"o{i}",
# enabled when v1 is false
precondition=Transition("pre", g(f"v2 == {i}"), ()),
# the guard of the postcondition is only used when running the operation, not when planning
postcondition=Transition("post", AlwaysTrue, a(f"v2 +=1")),
# the effects are only used when planning to simulate changes of sensors
effects=(),
)
model = Model(initial_state, ops)
goal = g("v2 == 100")
p = plan(initial_state, goal, model, 120)
print(p)
assert p != None
assert len(p) == 100
# Use this test when you are working with the model
def test_planner_real_model_1():
"""This method creates the test the planner that you will use for just a simple case"""
m = the_model()
goal = g("in_pos1 == empty")
assert plan(m.initial_state, goal, m) == []
goal = g("in_pos1 != empty")
p = plan(m.initial_state, goal, m)
print(f"plan: {p}")
assert p == ['add_cube', 'to_input', 'pick_at_input', 'to_pos1', 'place_at_pos1']
goal = g("in_pos1 != empty && in_pos2 != empty && in_pos3 != empty")
p = plan(m.initial_state, goal, m)
print(f"plan long: {p}")
assert p != None
assert len(p) == 15
# here you should create more tests to check your model ...
| nilq/baby-python | python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.