code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
'''
<NAME>
2021
'''
import numpy as np
import cv2
from numpy.fft import fftn, ifftn, fft2, ifft2, fftshift
from numpy import conj, real
from utils import gaussian2d_rolled_labels, cos_window
from hog_cpp.fhog.get_hog import get_hog
vgg_path = 'model/imagenet-vgg-verydeep-19.mat'
def create_model():
from scipy import io
from keras.applications.vgg19 import VGG19
from keras.models import Model
mat = io.loadmat(vgg_path)
model = VGG19(mat)
ixs = [2, 5, 10, 15, 20]
outputs = [model.layers[i].output for i in ixs]
model = Model(inputs=model.inputs, outputs=outputs)
# model.summary()
return model
vgg_model = create_model()
class KernelizedCorrelationFilter:
def __init__(self, correlation_type='gaussian', feature='hog'):
self.padding = 1.5 # extra area surrounding the target #padding = 2 #extra area surrounding the target
self.lambda_ = 1e-4 # regularization
self.output_sigma_factor = 0.1 # spatial bandwidth (proportional to target)
self.correlation_type = correlation_type
self.feature = feature
self.resize = False
# GRAY
if feature == 'gray':
self.interp_factor = 0.075 # linear interpolation factor for adaptation
self.sigma = 0.2 # gaussian kernel bandwidth
self.poly_a = 1 # polynomial kernel additive term
self.poly_b = 7 # polynomial kernel exponent
self.gray = True
self.cell_size = 1
# HOG
elif feature == 'hog':
self.interp_factor = 0.02 # linear interpolation factor for adaptation
self.sigma = 0.5 # gaussian kernel bandwidth
self.poly_a = 1 # polynomial kernel additive term
self.poly_b = 9 # polynomial kernel exponent
self.hog = True
self.hog_orientations = 9
self.cell_size = 4
# DEEP
elif feature == 'deep':
self.interp_factor = 0.02 # linear interpolation factor for adaptation
self.sigma = 0.5 # gaussian kernel bandwidth
self.poly_a = 1 # polynomial kernel additive term
self.poly_b = 9 # polynomial kernel exponent
self.deep = True
self.cell_size = 4 # 8
def start(self, init_gt, show, frame_list):
poses = []
poses.append(init_gt)
init_frame = cv2.imread(frame_list[0])
x1, y1, w, h = init_gt
init_gt = tuple(init_gt)
self.init(init_frame, init_gt)
for idx in range(len(frame_list)):
if idx != 0:
current_frame = cv2.imread(frame_list[idx])
bbox = self.update(current_frame)
if bbox is not None:
x1, y1, w, h = bbox
if show is True:
if len(current_frame.shape) == 2:
current_frame = cv2.cvtColor(current_frame, cv2.COLOR_GRAY2BGR)
show_frame = cv2.rectangle(current_frame, (int(x1), int(y1)), (int(x1 + w), int(y1 + h)),
(255, 0, 0), 1)
cv2.imshow('demo', show_frame)
cv2.waitKey(1)
else:
print('bbox is None')
poses.append(np.array([int(x1), int(y1), int(w), int(h)]))
return np.array(poses)
def init(self, image, roi):
# Get image size and search window size
x, y, w, h = roi
self.image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
self.target_sz = np.array([h, w])
self.target_sz_real = np.array([h, w])
self.pos = np.array([y + np.floor(h/2), x + np.floor(w/2)])
if np.sqrt(h * w) >= 100: # diagonal size >= threshold
self.resize = True
self.pos = np.floor(self.pos / 2)
self.target_sz = np.floor(self.target_sz / 2)
if self.resize:
self.image = cv2.resize(self.image, (self.image.shape[1] // 2, self.image.shape[0] // 2))
# window size, taking padding into account
self.window_sz = np.floor(np.multiply(self.target_sz, (1 + self.padding)))
self.output_sigma = round(round(np.sqrt(self.target_sz[0]*self.target_sz[1]), 4) * self.output_sigma_factor / self.cell_size, 4)
yf_sz = np.floor(self.window_sz / self.cell_size)
yf_sz[0] = np.floor(self.window_sz / self.cell_size)[1]
yf_sz[1] = np.floor(self.window_sz / self.cell_size)[0]
gauss = gaussian2d_rolled_labels(yf_sz, self.output_sigma)
self.yf = fft2(gauss)
#store pre-computed cosine window
self.cos_window = cos_window([self.yf.shape[1], self.yf.shape[0]])
# obtain a subwindow for training at newly estimated target position
patch = self.get_subwindow(self.image, self.pos, self.window_sz)
feat = self.get_features(patch)
xf = fftn(feat, axes=(0, 1))
kf = []
if self.correlation_type == 'gaussian':
kf = self.gaussian_correlation(xf, xf)
alphaf = np.divide(self.yf, (kf + self.lambda_))
self.model_alphaf = alphaf
self.model_xf = xf
def update(self, image):
self.image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if self.resize:
self.image = cv2.resize(self.image, (self.image.shape[1] // 2, self.image.shape[0] // 2))
patch = self.get_subwindow(self.image, self.pos, self.window_sz)
zf = fftn(self.get_features(patch), axes=(0, 1))
if self.correlation_type == 'gaussian':
kzf = self.gaussian_correlation(zf, self.model_xf)
response = real(ifftn(self.model_alphaf * kzf, axes=(0, 1))) # equation for fast detection
# Find indices and values of nonzero elements curr = np.unravel_index(np.argmax(gi, axis=None), gi.shape)
delta = np.unravel_index(np.argmax(response, axis=None), response.shape)
vert_delta, horiz_delta = delta[0], delta[1]
if vert_delta > np.size(zf, 0) / 2: # wrap around to negative half-space of vertical axis
vert_delta = vert_delta - np.size(zf, 0)
if horiz_delta > np.size(zf, 1) / 2: # same for horizontal axis
horiz_delta = horiz_delta - np.size(zf, 1)
self.pos = self.pos + self.cell_size * np.array([vert_delta, horiz_delta])
# obtain a subwindow for training at newly estimated target position
patch = self.get_subwindow(self.image, self.pos, self.window_sz)
feat = self.get_features(patch)
xf = fftn(feat, axes=(0, 1))
# Kernel Ridge Regression, calculate alphas (in Fourier domain)
if self.correlation_type == 'gaussian':
kf = self.gaussian_correlation(xf, xf)
alphaf = np.divide(self.yf, (kf + self.lambda_))
# subsequent frames, interpolate model
self.model_alphaf = (1 - self.interp_factor) * self.model_alphaf + self.interp_factor * alphaf
self.model_xf = (1 - self.interp_factor) * self.model_xf + self.interp_factor * xf
if self.resize:
pos_real = np.multiply(self.pos, 2)
else:
pos_real = self.pos
box = [pos_real[1] - self.target_sz_real[1] / 2,
pos_real[0] - self.target_sz_real[0] / 2,
self.target_sz_real[1],
self.target_sz_real[0]]
return box[0], box[1], box[2], box[3]
def get_subwindow(self, im, pos, sz):
_p1 = np.array(range(0, int(sz[0]))).reshape([1, int(sz[0])])
_p2 = np.array(range(0, int(sz[1]))).reshape([1, int(sz[1])])
ys = np.floor(pos[0]) + _p1 - np.floor(sz[0]/2)
xs = np.floor(pos[1]) + _p2 - np.floor(sz[1]/2)
# Check for out-of-bounds coordinates, and set them to the values at the borders
xs[xs < 0] = 0
ys[ys < 0] = 0
xs[xs > np.size(im, 1) - 1] = np.size(im, 1) - 1
ys[ys > np.size(im, 0) - 1] = np.size(im, 0) - 1
xs = xs.astype(int)
ys = ys.astype(int)
# extract image
out1 = im[list(ys[0, :]), :, :]
out = out1[:, list(xs[0, :]), :]
return out
def get_features(self, im):
if self.feature == 'hog':
# HOG features, from Piotr's Toolbox
x = np.double(self.get_fhog(im))
return x * self.cos_window[:, :, None]
if self.feature == 'gray':
x = np.double(im) / 255
x = x - np.mean(x)
return x * self.cos_window[:, :, None]
if self.feature == 'deep':
x = self.get_deep_feature(im)
x = x / np.max(x)
return x * self.cos_window[:, :, None]
def get_fhog(self, im_patch):
H = get_hog(im_patch/255)
return H
def gaussian_correlation(self, xf, yf):
N = xf.shape[0] * xf.shape[1]
xff = xf.reshape([xf.shape[0] * xf.shape[1] * xf.shape[2], 1], order='F')
xff_T = xff.conj().T
yff = yf.reshape([yf.shape[0] * yf.shape[1] * yf.shape[2], 1], order='F')
yff_T = yff.conj().T
xx = np.dot(xff_T, xff).real / N # squared norm of x
yy = np.dot(yff_T, yff).real / N # squared norm of y
# cross-correlation term in Fourier domain
xyf = xf * conj(yf)
ixyf = ifftn(xyf, axes=(0, 1))
rxyf = real(ixyf)
xy = np.sum(rxyf, 2) # to spatial domain
# calculate gaussian response for all positions, then go back to the Fourier domain
sz = xf.shape[0] * xf.shape[1] * xf.shape[2]
mltp = (xx + yy - 2 * xy) / sz
crpm = -1 / (self.sigma * self.sigma)
expe = crpm * np.maximum(0, mltp)
expx = np.exp(expe)
kf = fftn(expx, axes=(0, 1))
return kf
def get_deep_feature(self, im):
# Preprocessing
from numpy import expand_dims
#img = im.astype('float32') # note: [0, 255] range
img = im # note: [0, 255] range
img = cv2.resize(img, (224, 224))
img = expand_dims(img, axis=0)
feature_maps = vgg_model.predict(img)
f_map = feature_maps[3][0][:][:][:]
feature_map_n = cv2.resize(f_map, (self.cos_window.shape[1], self.cos_window.shape[0]),
interpolation=cv2.INTER_LINEAR)
return feature_map_n
| [
"numpy.sqrt",
"scipy.io.loadmat",
"cv2.imshow",
"numpy.array",
"numpy.divide",
"numpy.mean",
"numpy.multiply",
"numpy.fft.fftn",
"numpy.fft.fft2",
"numpy.exp",
"numpy.real",
"numpy.max",
"numpy.dot",
"keras.models.Model",
"keras.applications.vgg19.VGG19",
"numpy.maximum",
"cv2.waitKe... | [((423, 443), 'scipy.io.loadmat', 'io.loadmat', (['vgg_path'], {}), '(vgg_path)\n', (433, 443), False, 'from scipy import io\n'), ((456, 466), 'keras.applications.vgg19.VGG19', 'VGG19', (['mat'], {}), '(mat)\n', (461, 466), False, 'from keras.applications.vgg19 import VGG19\n'), ((560, 603), 'keras.models.Model', 'Model', ([], {'inputs': 'model.inputs', 'outputs': 'outputs'}), '(inputs=model.inputs, outputs=outputs)\n', (565, 603), False, 'from keras.models import Model\n'), ((2396, 2421), 'cv2.imread', 'cv2.imread', (['frame_list[0]'], {}), '(frame_list[0])\n', (2406, 2421), False, 'import cv2\n'), ((3398, 3413), 'numpy.array', 'np.array', (['poses'], {}), '(poses)\n', (3406, 3413), True, 'import numpy as np\n'), ((3541, 3579), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (3553, 3579), False, 'import cv2\n'), ((3605, 3621), 'numpy.array', 'np.array', (['[h, w]'], {}), '([h, w])\n', (3613, 3621), True, 'import numpy as np\n'), ((3652, 3668), 'numpy.array', 'np.array', (['[h, w]'], {}), '([h, w])\n', (3660, 3668), True, 'import numpy as np\n'), ((4349, 4390), 'numpy.floor', 'np.floor', (['(self.window_sz / self.cell_size)'], {}), '(self.window_sz / self.cell_size)\n', (4357, 4390), True, 'import numpy as np\n'), ((4535, 4585), 'utils.gaussian2d_rolled_labels', 'gaussian2d_rolled_labels', (['yf_sz', 'self.output_sigma'], {}), '(yf_sz, self.output_sigma)\n', (4559, 4585), False, 'from utils import gaussian2d_rolled_labels, cos_window\n'), ((4604, 4615), 'numpy.fft.fft2', 'fft2', (['gauss'], {}), '(gauss)\n', (4608, 4615), False, 'from numpy.fft import fftn, ifftn, fft2, ifft2, fftshift\n'), ((4684, 4732), 'utils.cos_window', 'cos_window', (['[self.yf.shape[1], self.yf.shape[0]]'], {}), '([self.yf.shape[1], self.yf.shape[0]])\n', (4694, 4732), False, 'from utils import gaussian2d_rolled_labels, cos_window\n'), ((4936, 4959), 'numpy.fft.fftn', 'fftn', (['feat'], {'axes': '(0, 1)'}), '(feat, axes=(0, 1))\n', (4940, 4959), False, 'from numpy.fft import fftn, ifftn, fft2, ifft2, fftshift\n'), ((5092, 5129), 'numpy.divide', 'np.divide', (['self.yf', '(kf + self.lambda_)'], {}), '(self.yf, kf + self.lambda_)\n', (5101, 5129), True, 'import numpy as np\n'), ((5245, 5283), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (5257, 5283), False, 'import cv2\n'), ((6569, 6592), 'numpy.fft.fftn', 'fftn', (['feat'], {'axes': '(0, 1)'}), '(feat, axes=(0, 1))\n', (6573, 6592), False, 'from numpy.fft import fftn, ifftn, fft2, ifft2, fftshift\n'), ((6783, 6820), 'numpy.divide', 'np.divide', (['self.yf', '(kf + self.lambda_)'], {}), '(self.yf, kf + self.lambda_)\n', (6792, 6820), True, 'import numpy as np\n'), ((8719, 8742), 'hog_cpp.fhog.get_hog.get_hog', 'get_hog', (['(im_patch / 255)'], {}), '(im_patch / 255)\n', (8726, 8742), False, 'from hog_cpp.fhog.get_hog import get_hog\n'), ((9281, 9304), 'numpy.fft.ifftn', 'ifftn', (['xyf'], {'axes': '(0, 1)'}), '(xyf, axes=(0, 1))\n', (9286, 9304), False, 'from numpy.fft import fftn, ifftn, fft2, ifft2, fftshift\n'), ((9320, 9330), 'numpy.real', 'real', (['ixyf'], {}), '(ixyf)\n', (9324, 9330), False, 'from numpy import conj, real\n'), ((9344, 9359), 'numpy.sum', 'np.sum', (['rxyf', '(2)'], {}), '(rxyf, 2)\n', (9350, 9359), True, 'import numpy as np\n'), ((9669, 9681), 'numpy.exp', 'np.exp', (['expe'], {}), '(expe)\n', (9675, 9681), True, 'import numpy as np\n'), ((9695, 9718), 'numpy.fft.fftn', 'fftn', (['expx'], {'axes': '(0, 1)'}), '(expx, axes=(0, 1))\n', (9699, 9718), False, 'from numpy.fft import fftn, ifftn, fft2, ifft2, fftshift\n'), ((9952, 9979), 'cv2.resize', 'cv2.resize', (['img', '(224, 224)'], {}), '(img, (224, 224))\n', (9962, 9979), False, 'import cv2\n'), ((9995, 10019), 'numpy.expand_dims', 'expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (10006, 10019), False, 'from numpy import expand_dims\n'), ((10134, 10241), 'cv2.resize', 'cv2.resize', (['f_map', '(self.cos_window.shape[1], self.cos_window.shape[0])'], {'interpolation': 'cv2.INTER_LINEAR'}), '(f_map, (self.cos_window.shape[1], self.cos_window.shape[0]),\n interpolation=cv2.INTER_LINEAR)\n', (10144, 10241), False, 'import cv2\n'), ((3748, 3762), 'numpy.sqrt', 'np.sqrt', (['(h * w)'], {}), '(h * w)\n', (3755, 3762), True, 'import numpy as np\n'), ((3855, 3877), 'numpy.floor', 'np.floor', (['(self.pos / 2)'], {}), '(self.pos / 2)\n', (3863, 3877), True, 'import numpy as np\n'), ((3907, 3935), 'numpy.floor', 'np.floor', (['(self.target_sz / 2)'], {}), '(self.target_sz / 2)\n', (3915, 3935), True, 'import numpy as np\n'), ((3985, 4061), 'cv2.resize', 'cv2.resize', (['self.image', '(self.image.shape[1] // 2, self.image.shape[0] // 2)'], {}), '(self.image, (self.image.shape[1] // 2, self.image.shape[0] // 2))\n', (3995, 4061), False, 'import cv2\n'), ((4147, 4192), 'numpy.multiply', 'np.multiply', (['self.target_sz', '(1 + self.padding)'], {}), '(self.target_sz, 1 + self.padding)\n', (4158, 4192), True, 'import numpy as np\n'), ((4410, 4451), 'numpy.floor', 'np.floor', (['(self.window_sz / self.cell_size)'], {}), '(self.window_sz / self.cell_size)\n', (4418, 4451), True, 'import numpy as np\n'), ((4474, 4515), 'numpy.floor', 'np.floor', (['(self.window_sz / self.cell_size)'], {}), '(self.window_sz / self.cell_size)\n', (4482, 4515), True, 'import numpy as np\n'), ((5333, 5409), 'cv2.resize', 'cv2.resize', (['self.image', '(self.image.shape[1] // 2, self.image.shape[0] // 2)'], {}), '(self.image, (self.image.shape[1] // 2, self.image.shape[0] // 2))\n', (5343, 5409), False, 'import cv2\n'), ((5678, 5721), 'numpy.fft.ifftn', 'ifftn', (['(self.model_alphaf * kzf)'], {'axes': '(0, 1)'}), '(self.model_alphaf * kzf, axes=(0, 1))\n', (5683, 5721), False, 'from numpy.fft import fftn, ifftn, fft2, ifft2, fftshift\n'), ((5901, 5931), 'numpy.argmax', 'np.argmax', (['response'], {'axis': 'None'}), '(response, axis=None)\n', (5910, 5931), True, 'import numpy as np\n'), ((7112, 7136), 'numpy.multiply', 'np.multiply', (['self.pos', '(2)'], {}), '(self.pos, 2)\n', (7123, 7136), True, 'import numpy as np\n'), ((7644, 7663), 'numpy.floor', 'np.floor', (['(sz[0] / 2)'], {}), '(sz[0] / 2)\n', (7652, 7663), True, 'import numpy as np\n'), ((7700, 7719), 'numpy.floor', 'np.floor', (['(sz[1] / 2)'], {}), '(sz[1] / 2)\n', (7708, 7719), True, 'import numpy as np\n'), ((7892, 7906), 'numpy.size', 'np.size', (['im', '(1)'], {}), '(im, 1)\n', (7899, 7906), True, 'import numpy as np\n'), ((7949, 7963), 'numpy.size', 'np.size', (['im', '(0)'], {}), '(im, 0)\n', (7956, 7963), True, 'import numpy as np\n'), ((9257, 9265), 'numpy.conj', 'conj', (['yf'], {}), '(yf)\n', (9261, 9265), False, 'from numpy import conj, real\n'), ((9634, 9653), 'numpy.maximum', 'np.maximum', (['(0)', 'mltp'], {}), '(0, mltp)\n', (9644, 9653), True, 'import numpy as np\n'), ((2626, 2653), 'cv2.imread', 'cv2.imread', (['frame_list[idx]'], {}), '(frame_list[idx])\n', (2636, 2653), False, 'import cv2\n'), ((6026, 6040), 'numpy.size', 'np.size', (['zf', '(0)'], {}), '(zf, 0)\n', (6033, 6040), True, 'import numpy as np\n'), ((6139, 6153), 'numpy.size', 'np.size', (['zf', '(0)'], {}), '(zf, 0)\n', (6146, 6153), True, 'import numpy as np\n'), ((6179, 6193), 'numpy.size', 'np.size', (['zf', '(1)'], {}), '(zf, 1)\n', (6186, 6193), True, 'import numpy as np\n'), ((6267, 6281), 'numpy.size', 'np.size', (['zf', '(1)'], {}), '(zf, 1)\n', (6274, 6281), True, 'import numpy as np\n'), ((6329, 6364), 'numpy.array', 'np.array', (['[vert_delta, horiz_delta]'], {}), '([vert_delta, horiz_delta])\n', (6337, 6364), True, 'import numpy as np\n'), ((7619, 7635), 'numpy.floor', 'np.floor', (['pos[0]'], {}), '(pos[0])\n', (7627, 7635), True, 'import numpy as np\n'), ((7675, 7691), 'numpy.floor', 'np.floor', (['pos[1]'], {}), '(pos[1])\n', (7683, 7691), True, 'import numpy as np\n'), ((8412, 8425), 'numpy.double', 'np.double', (['im'], {}), '(im)\n', (8421, 8425), True, 'import numpy as np\n'), ((8452, 8462), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (8459, 8462), True, 'import numpy as np\n'), ((8611, 8620), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (8617, 8620), True, 'import numpy as np\n'), ((9076, 9094), 'numpy.dot', 'np.dot', (['xff_T', 'xff'], {}), '(xff_T, xff)\n', (9082, 9094), True, 'import numpy as np\n'), ((9138, 9156), 'numpy.dot', 'np.dot', (['yff_T', 'yff'], {}), '(yff_T, yff)\n', (9144, 9156), True, 'import numpy as np\n'), ((3702, 3717), 'numpy.floor', 'np.floor', (['(h / 2)'], {}), '(h / 2)\n', (3710, 3717), True, 'import numpy as np\n'), ((3721, 3736), 'numpy.floor', 'np.floor', (['(w / 2)'], {}), '(w / 2)\n', (3729, 3736), True, 'import numpy as np\n'), ((7870, 7884), 'numpy.size', 'np.size', (['im', '(1)'], {}), '(im, 1)\n', (7877, 7884), True, 'import numpy as np\n'), ((7927, 7941), 'numpy.size', 'np.size', (['im', '(0)'], {}), '(im, 0)\n', (7934, 7941), True, 'import numpy as np\n'), ((3173, 3203), 'cv2.imshow', 'cv2.imshow', (['"""demo"""', 'show_frame'], {}), "('demo', show_frame)\n", (3183, 3203), False, 'import cv2\n'), ((3228, 3242), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3239, 3242), False, 'import cv2\n'), ((4236, 4282), 'numpy.sqrt', 'np.sqrt', (['(self.target_sz[0] * self.target_sz[1])'], {}), '(self.target_sz[0] * self.target_sz[1])\n', (4243, 4282), True, 'import numpy as np\n'), ((2920, 2967), 'cv2.cvtColor', 'cv2.cvtColor', (['current_frame', 'cv2.COLOR_GRAY2BGR'], {}), '(current_frame, cv2.COLOR_GRAY2BGR)\n', (2932, 2967), False, 'import cv2\n')] |
import html
from tqdm import tqdm
from .color_picker import color_for, html_color_for, mean_color_for
def export_report(filename, chars, dict_colors, include_key=False):
"""
Export a HTML report showing all the extraction usages for the file.
:param filename: Output filename
:param chars: Characters array (should be HighlightedFile.chars)
:param dict_colors: Dictionary specifying colors to use (should be HighlightedFile.dict_colors)
:param include_key: Whether to include a key at the bottom defining the usages of the colors
This basically loops through all of the characters in the characters array, and then creates
the relevant <span> tags for each character based on the usages stored for that character.
"""
output_strings = []
html_header = """<html>
<head>
</head>
<body style="font-family: Courier">
"""
output_strings.append(html_header)
last_hash = ""
for char in tqdm(chars):
letter = char.letter
this_hash = ""
this_message = ""
colors = []
multi_usages = len(char.usages) > 1
for usage in char.usages:
this_hash += usage.tool_field
needs_new_line = this_message != ""
colors.append(color_for(usage.tool_field, dict_colors))
if needs_new_line:
this_message += "
"
if multi_usages:
this_message += "-"
this_message += usage.tool_field + ", " + usage.message
# do we have anything to shade?
if this_hash != "":
# generate/retrieve a color for this hash
new_color = mean_color_for(colors)
hex_color = html_color_for(new_color)
# are we already in hash?
if last_hash != "":
# is it the different to this one?
if last_hash != this_hash:
# ok, close the span
output_strings.append("</span>")
# start a new span
output_strings.append(
f"<span title='{this_message}' style=\"background-color:{hex_color}\">"
)
else:
output_strings.append(
f"<span title='{this_message}' style=\"background-color:{hex_color}\">"
)
elif last_hash != "":
output_strings.append("</span>")
# just check if it's newline
if letter == "\n":
output_strings.append("<br>")
else:
# Escape the letter as otherwise the XML from XML files gets
# interpreted by browsers as (invalid) HTML
output_strings.append(html.escape(letter))
last_hash = this_hash
if last_hash != "":
output_strings.append("</span>")
# also provide a key
if include_key:
output_strings.append("<hr/><h3>Color Key</h3><ul>")
for key in dict_colors:
color = dict_colors[key]
hex_color = html_color_for(color)
output_strings.append(
f'<li><span style="background-color:{hex_color}">{key}</span></li>'
)
output_strings.append("</ul>")
html_footer = """</body>
</html>"""
output_strings.append(html_footer)
with open(filename, "w") as f:
f.write("".join(output_strings))
| [
"tqdm.tqdm",
"html.escape"
] | [((962, 973), 'tqdm.tqdm', 'tqdm', (['chars'], {}), '(chars)\n', (966, 973), False, 'from tqdm import tqdm\n'), ((2720, 2739), 'html.escape', 'html.escape', (['letter'], {}), '(letter)\n', (2731, 2739), False, 'import html\n')] |
"""Utility functions used in Activity 7."""
import random
import numpy as np
from matplotlib import pyplot as plt
from keras.callbacks import TensorBoard
def create_groups(data, group_size=7):
"""Create distinct groups from a continuous series.
Parameters
----------
data: np.array
Series of continious observations.
group_size: int, default 7
Determines how large the groups are. That is,
how many observations each group contains.
Returns
-------
A Numpy array object.
"""
samples = list()
for i in range(0, len(data), group_size):
sample = list(data[i:i + group_size])
if len(sample) == group_size:
samples.append(np.array(sample).reshape(1, group_size).tolist())
a = np.array(samples)
return a.reshape(1, a.shape[0], group_size)
def split_lstm_input(groups):
"""Split groups in a format expected by the LSTM layer.
Parameters
----------
groups: np.array
Numpy array with the organized sequences.
Returns
-------
X, Y: np.array
Numpy arrays with the shapes required by
the LSTM layer. X with (1, a - 1, b)
and Y with (1, b). Where a is the total
number of groups in `group` and b the
number of observations per group.
"""
X = groups[0:, :-1].reshape(1, groups.shape[1] - 1, groups.shape[2])
Y = groups[0:, -1:][0]
return X, Y
def mape(A, B):
"""Calculate the mean absolute percentage error from two series."""
return np.mean(np.abs((A - B) / A)) * 100
def rmse(A, B):
"""Calculate the root mean square error from two series."""
return np.sqrt(np.square(np.subtract(A, B)).mean())
def train_model(model, X, Y, epochs=100, version=0, run_number=0):
"""Shorthand function for training a new model.
This function names each run of the model
using the TensorBoard naming conventions.
Parameters
----------
model: Keras model instance
Compiled Keras model.
X, Y: np.array
Series of observations to be used in
the training process.
version: int
Version of the model to run.
run_number: int
The number of the run. Used in case
the same model version is run again.
"""
hash = random.getrandbits(128)
hex_code = '%032x' % hash
model_name = f'bitcoin_lstm_v{version}_run_{run_number}_{hex_code[:6]}'
tensorboard = TensorBoard(log_dir=f'./logs/{model_name}')
model_history = model.fit(
x=X, y=Y,
batch_size=1, epochs=epochs,
callbacks=[tensorboard],
shuffle=False)
return model_history
def plot_two_series(A, B, variable, title):
"""Plot two series using the same `date` index.
Parameters
----------
A, B: pd.DataFrame
Dataframe with a `date` key and a variable
passed in the `variable` parameter. Parameter A
represents the "Observed" series and B the "Predicted"
series. These will be labelled respectivelly.
variable: str
Variable to use in plot.
title: str
Plot title.
"""
plt.figure(figsize=(14, 4))
plt.xlabel('Observed and predicted')
ax1 = A.set_index('date')[variable].plot(
color='#d35400', grid=True, label='Observed', title=title)
ax2 = B.set_index('date')[variable].plot(
color='grey', grid=True, label='Predicted')
ax1.set_xlabel("Predicted Week")
ax1.set_ylabel("Predicted Values")
plt.legend()
plt.show()
def denormalize(reference, series,
normalized_variable='close_point_relative_normalization',
denormalized_variable='close'):
"""Denormalize the values for a given series.
Parameters
----------
reference: pd.DataFrame
DataFrame to use as reference. This dataframe
contains both a week index and the USD price
reference that we are interested on.
series: pd.DataFrame
DataFrame with the predicted series. The
DataFrame must have the same columns as the
`reference` dataset.
normalized_variable: str, default 'close_point_relative_normalization'
Variable to use in normalization.
denormalized_variable: str, default `close`
Variable to use in de-normalization.
Returns
-------
A modified DataFrame with the new variable provided
in `denormalized_variable` parameter.
"""
week_values = reference[reference['iso_week'] == series['iso_week'].values[0]]
last_value = week_values[denormalized_variable].values[0]
series[denormalized_variable] = last_value * (series[normalized_variable] + 1)
return series
| [
"numpy.abs",
"matplotlib.pyplot.xlabel",
"numpy.subtract",
"keras.callbacks.TensorBoard",
"numpy.array",
"matplotlib.pyplot.figure",
"random.getrandbits",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((779, 796), 'numpy.array', 'np.array', (['samples'], {}), '(samples)\n', (787, 796), True, 'import numpy as np\n'), ((2298, 2321), 'random.getrandbits', 'random.getrandbits', (['(128)'], {}), '(128)\n', (2316, 2321), False, 'import random\n'), ((2447, 2490), 'keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': 'f"""./logs/{model_name}"""'}), "(log_dir=f'./logs/{model_name}')\n", (2458, 2490), False, 'from keras.callbacks import TensorBoard\n'), ((3136, 3163), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 4)'}), '(figsize=(14, 4))\n', (3146, 3163), True, 'from matplotlib import pyplot as plt\n'), ((3168, 3204), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Observed and predicted"""'], {}), "('Observed and predicted')\n", (3178, 3204), True, 'from matplotlib import pyplot as plt\n'), ((3500, 3512), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3510, 3512), True, 'from matplotlib import pyplot as plt\n'), ((3517, 3527), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3525, 3527), True, 'from matplotlib import pyplot as plt\n'), ((1547, 1566), 'numpy.abs', 'np.abs', (['((A - B) / A)'], {}), '((A - B) / A)\n', (1553, 1566), True, 'import numpy as np\n'), ((1685, 1702), 'numpy.subtract', 'np.subtract', (['A', 'B'], {}), '(A, B)\n', (1696, 1702), True, 'import numpy as np\n'), ((720, 736), 'numpy.array', 'np.array', (['sample'], {}), '(sample)\n', (728, 736), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import itertools
__metaclass__ = type
def prob_incr(species, proj_compressed_data, min_occurences = 10):
p = proj_compressed_data['count_incr']/ proj_compressed_data['count']
p[proj_compressed_data['count'] < min_occurences] = -1
return p
def score(species,IV, G, exp_digitized, bins, thresholds): # G: control species set
(v_f, v_a, v_n) = (0, 0, 0)
IV[IV.isnull()] = 0
if (IV == 0).all(): return thresholds['Ti']
n_repressors = IV[IV == -1].count()
n_activators = IV[IV == 1].count()
# G.extend(parents(IV) )
GG = G[:]
GG.extend(parents(IV))
GG = np.unique(GG)
pcd = exp_digitized.project(species,GG)
pcd['prob_incr'] = prob_incr(species, pcd, min_occurences = 1)
# if (GG == ['CI','LacI']).all(): print pcd
query_parents= ""
if n_repressors > n_activators:
query_parents = " & ".join(['%s == %s' %(sp, bins[sp][0] ) for sp in IV[IV == -1].index ] ) # lowest level for repressors
query_act = " & ".join(['%s == %s' %(sp, bins[sp][-2]) for sp in IV[IV == 1].index ] ) # highest level for activators
if query_act != "": query_parents += (" & " + query_act )
else:
query_parents = " & ".join(['%s == %s' %(sp, bins[sp][0] ) for sp in IV[IV == 1].index ] ) # lowest level for activators
query_rep = " & ".join(['%s == %s' %(sp, bins[sp][-1]) for sp in IV[IV == -1].index ] ) # highest level for repressors
if query_rep != "": query_parents += (" & " + query_rep)
for g in G:
if (len(parents(IV) == 1) and g == parents(IV)[0]): # single-influence and self-regulating
idx_base = pcd.query(query_parents).index
p_base = pcd.at[idx_base[0], 'prob_incr']
idx_test = np.setdiff1d(pcd.index, idx_base)
if p_base != -1:
for i in idx_test:
p_a = pcd.loc[i,'prob_incr']
# print "p_a / p_base = %s / %s" % (p_a, p_base)
if p_a != -1 :
if n_repressors < n_activators:
if (p_a / p_base) > thresholds['Ta']: v_f += 1; # print "Voted for"
elif (p_a / p_base) < thresholds['Tr']: v_a +=1; # print "Voted against"
else: v_n += 1; # print "Voted neutral"
else:
if (p_a / p_base) < thresholds['Tr']: v_f += 1; # print "Voted for"
elif (p_a / p_base) > thresholds['Ta']: v_a +=1; # print "Voted against"
else: v_n += 1; # print "Voted neutral"
else:
for b in bins[g]:
query_cntl = '%s == %s' % (g,b)
if ( g in parents(IV)):
query_str = query_cntl
else:
#p_base = float(pcd.query(query_parents+ ' & ' + query_cntl )['prob_incr'])
query_str = (query_parents+ ' & ' + query_cntl, query_cntl)[query_parents == ""]
idx_base = pcd.query(query_str).index
p_base = pcd.at[idx_base[0], 'prob_incr']
if p_base != -1:
# if p_base == 0: p_base += pseudo_count
idx_test = np.setdiff1d(pcd.query(query_cntl).index, idx_base)
for i in idx_test:
# pcd.loc[i, 'ratio'] = pcd.loc[i,'prob_incr'] / p_base
p_a = pcd.loc[i,'prob_incr']
# print "p_a / p_base = %s / %s" % (p_a, p_base)
if p_a != -1 :
# print pcd.loc[idx, 'prob_incr']/ p_base
if n_repressors < n_activators:
if (p_a / p_base) > thresholds['Ta']: v_f += 1; # print "Voted for"
elif (p_a / p_base) < thresholds['Tr']: v_a +=1; # print "Voted against"
else: v_n += 1; # print "Voted neutral"
else:
if (p_a / p_base) < thresholds['Tr']: v_f += 1; # print "Voted for"
elif (p_a / p_base) > thresholds['Ta']: v_a +=1; # print "Voted against"
else: v_n += 1; # print "Voted neutral"
# print "IV: %s" % IV
# print (v_f, v_a, v_n)
if (v_f + v_a + v_n == 0): return 0.
score = (v_f - v_a + 0.) / (v_f + v_a + v_n )
if (len(parents(IV) == 1) and g == parents(IV)[0]): score *= 0.75 # down weight single-influence and self-regulating
return score
def parents(infl):
return infl[(infl.notnull()) & (infl != 0 )].index
def createIVSet(species, exp_digitized,IV0, bins, thresholds):
I = []
scores = []
idx_unknown = IV0[IV0.isnull()].index # species name
iv = IV0.copy()
iv[idx_unknown] = 0
G = [species]
score_zero = score(species,iv, G, exp_digitized, bins, thresholds)
# print "%s \t Background score: %s" % (list(iv), score_zero)
for u in idx_unknown:
iv1 = iv.copy()
iv1.loc[u] = 1 # set activators
# print "scoring %s" % iv1
score_a = score(species ,iv1, G, exp_digitized, bins, thresholds)
# print "%s \t Activator score: %s" % (list(iv1), score_a)
if score_a >= score_zero:
I.append(iv1)
scores.append(score_a)
else:
iv1.loc[u] = -1
# print "scoring %s" % iv1
score_r = score(species ,iv1, G, exp_digitized, bins, thresholds)
# print "%s \t Repressor score: %s" % (list(iv1), score_r)
if score_r >= score_zero:
I.append(iv1)
scores.append(score_r)
return (I, scores)
# IV[IV.isnull()] = 0
def combineIVs(species, IVs, IVscores, IV0,exp_digitized, bins, thresholds):
'''score every possible combination of IV in input IVs'''
I = []
scores = []
to_remove = []
tj = len(IV0[IV0.notnull()])
bg_score = 0.
bg_iv = IV0.copy()
bg_iv[IV0.isnull()] = 0
bg_score = score(species, bg_iv, [species], exp_digitized, bins, thresholds)
for i in range(2, min(thresholds['Tj'], len(IV0)- tj+1)):
K = itertools.combinations(range(len(IVs)), i)
for k in K:
old_scores = np.zeros((len(k),))
added = IVs[0][IV0.isnull()]; added[:] = 0 # combined vector
for j in range(len(k)):
added += IVs[k[j]][IV0.isnull()]
old_scores[j] = IVscores[k[j]]
new_iv = pd.concat((added , IV0[IV0.notnull()]))
if (max(old_scores) - min(old_scores)) <= thresholds['Tm']:
new_score = score(species, new_iv, [species] ,exp_digitized, bins, thresholds)
if ((new_score >= old_scores).all() and (new_score > bg_score)):
I.append(new_iv)
scores.append(new_score)
to_remove.extend(k)
return (I, scores, set(to_remove))
def competeIVs(species, iv1, iv2, exp_digitized, bins, thresholds):
G = [species]; G.extend(np.setdiff1d(parents(iv2), parents(iv1)) )
s1 = score(species, iv1, G, exp_digitized, bins, thresholds)
G = [species]; G.extend(np.setdiff1d(parents(iv1), parents(iv2)) )
s2 = score(species, iv2, G, exp_digitized, bins, thresholds)
if s1 > s2: return (0, s1)
elif s1 < s2: return (1, s2)
else: return ([0, 1], [s1, s2] )
def learn(experiments, initialNetwork, thresholds = { 'Tr': 0.75, 'Ta': 1.15, 'Tj': 2, 'Ti': 0.5, 'Tm': 0.} , nbins=4, bin_assignment = 1):
'''Learning of causal network from a set of time series data, each resulted from an independent experiment
The algorithm learn influence vectors for one gene at a time.
For each gene, there are 3 main stages of learning:
(1) Adding single influence to create set of new influence vectors
(2) Combining influence vectors from stage (1) to create new influence vectors (with more than 1 parents)
(3) competing between influence vectors to determine the best one
'''
cnet = initialNetwork.copy()
binned = experiments.digitize(nbins=nbins, bin_assignment = 1)
bins = { sp: np.unique(binned[sp]) for sp in initialNetwork }
for sp in initialNetwork:
# print "----------------------------\nLearning influence vector for %s" % sp
initial = initialNetwork.influences(sp)
(IVs, scores) = createIVSet(sp,binned, initial, bins, thresholds)
# if sp == 'LacI':
# print "Initial IVs"
# print IVs
# print scores
(cIVs, cScores, to_remove) = combineIVs(sp, IVs, scores, initial,binned, bins, thresholds)
# if sp == 'LacI':
# print "Combined IVs"
# print cIVs
# print cScores
for i in np.setdiff1d(range(len(IVs)), to_remove):
cIVs.append(IVs[i])
cScores.append(scores[i])
while len(cIVs) > 1:
sorted_idx = np.argsort(-np.array(cScores)) # ranking IVs from highest scores
winnerId, wScore = competeIVs(sp, cIVs[0], cIVs[-1], binned, bins, thresholds)
if winnerId == 1:
cIVs[0] = cIVs[-1]
cScores[0] = cScores[-1]
cIVs = cIVs[:-1]
cScores = cScores[:-1]
if len(cIVs) > 0: cnet.loc[sp] = cIVs[0]
else:
cnet.loc[sp] = initial.copy()
cnet.loc[sp][initial.isnull()] = 0
return cnet
class CausalNetwork(pd.DataFrame):
def __init__(self, species):
'''store influence vectors of each gene in a row, with value indicating relationship of gene in the column --> gene in the row. Example
n = CausalNetwork(...)
A B C
A 0 -1 0
B 0 1 1
C -1 None 0
0: no relation ship
1: activate
-1: repress
None: unknown
'''
super(CausalNetwork,self).__init__(np.zeros((len(species), len(species)), dtype=int)/ 0., columns = species, index=species)
def activators(self,i):
''' return the activators of i'''
pass
def repressors(self,i):
''' return the repressors of i'''
pass
def influences(self,i):
'''return the influence vector of i'''
return self.loc[i]
def __getitem__(self, i):
return self.loc[i]
| [
"numpy.array",
"numpy.setdiff1d",
"numpy.unique"
] | [((640, 653), 'numpy.unique', 'np.unique', (['GG'], {}), '(GG)\n', (649, 653), True, 'import numpy as np\n'), ((8443, 8464), 'numpy.unique', 'np.unique', (['binned[sp]'], {}), '(binned[sp])\n', (8452, 8464), True, 'import numpy as np\n'), ((1796, 1829), 'numpy.setdiff1d', 'np.setdiff1d', (['pcd.index', 'idx_base'], {}), '(pcd.index, idx_base)\n', (1808, 1829), True, 'import numpy as np\n'), ((9252, 9269), 'numpy.array', 'np.array', (['cScores'], {}), '(cScores)\n', (9260, 9269), True, 'import numpy as np\n')] |
# Copyright 2014 - Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from solum.objects import execution as abstract
from solum.objects.sqlalchemy import models as sql
class Execution(sql.Base, abstract.Execution):
"""Represent an execution in sqlalchemy."""
__tablename__ = 'execution'
__resource__ = 'executions'
__table_args__ = sql.table_args()
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
uuid = sa.Column(sa.String(36))
pipeline_id = sa.Column(sa.Integer, sa.ForeignKey('pipeline.id'))
class ExecutionList(abstract.ExecutionList):
"""Represent a list of executions in sqlalchemy."""
@classmethod
def get_all(cls, context):
return ExecutionList(sql.model_query(context, Execution))
| [
"solum.objects.sqlalchemy.models.table_args",
"sqlalchemy.ForeignKey",
"sqlalchemy.String",
"solum.objects.sqlalchemy.models.model_query",
"sqlalchemy.Column"
] | [((892, 908), 'solum.objects.sqlalchemy.models.table_args', 'sql.table_args', ([], {}), '()\n', (906, 908), True, 'from solum.objects.sqlalchemy import models as sql\n'), ((919, 978), 'sqlalchemy.Column', 'sa.Column', (['sa.Integer'], {'primary_key': '(True)', 'autoincrement': '(True)'}), '(sa.Integer, primary_key=True, autoincrement=True)\n', (928, 978), True, 'import sqlalchemy as sa\n'), ((1000, 1013), 'sqlalchemy.String', 'sa.String', (['(36)'], {}), '(36)\n', (1009, 1013), True, 'import sqlalchemy as sa\n'), ((1055, 1083), 'sqlalchemy.ForeignKey', 'sa.ForeignKey', (['"""pipeline.id"""'], {}), "('pipeline.id')\n", (1068, 1083), True, 'import sqlalchemy as sa\n'), ((1266, 1301), 'solum.objects.sqlalchemy.models.model_query', 'sql.model_query', (['context', 'Execution'], {}), '(context, Execution)\n', (1281, 1301), True, 'from solum.objects.sqlalchemy import models as sql\n')] |
"""Tools for working with Cryptopunk NFTs; this includes utilities for data analysis and image preparation for training machine learning models using Cryptopunks as training data.
Functions:
get_punk(id)
pixel_to_img(pixel_str, dim)
flatten(img)
unflatten(img)
sort_dict_by_function_of_value(d, f)
add_index_to_colors(colors)
"""
import os
import time
import requests
from collections import OrderedDict
from bs4 import BeautifulSoup
from re import sub
import numpy as np
import pandas as pd
from matplotlib.colors import rgb2hex
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
__ROOT_DIR__ = os.path.dirname(os.path.abspath(__file__))
__PUNK_DIR__ = f"{__ROOT_DIR__}/images/training";
def camel_case(string):
'''
Convert string to camelCase
'''
string = string.strip("\n")
string = sub(r"(_|-)+", " ", string).title().replace(" ", "")
return string[0].lower() + string[1:]
def color_str_to_hex(s):
'''
Convert string representation of numpy pixel array
to a string hex value
'''
return rgb2hex([float(x) for x in s[1:-1].split(' ') if x != ''])
def get_punk(id):
'''
Returns a ndarray with loaded image
'''
return mpimg.imread(f'''{__PUNK_DIR__}/punk{"%04d" % id}.png''')
def pixel_to_img(pixel_str, dim = (24,24)):
'''
Take pixel of format "[r,g,b,b]"
and return an image of size `dim` containing
only the pixel's color.
'''
(x,y) = dim
c = np.fromstring(pixel_str[1:-1], float, sep=' ')
return np.full((x, y, 4), c)
def pixel_to_ximg(pixel_strs, dim = (24,24), n=3 ):
'''
Take pixel of format "[r,g,b,b]"
and return an image of size `dim` containing
a matrix of size n*n
'''
(x,y) = (dim[0]//n, dim[1]//n)
m = []
for i in range(0,n):
l=[]
for j in range(0,n):
img = np.full((x, y, 4),
np.fromstring(pixel_strs[i*n + j][1:-1], float, sep=' '))
l.append(img)
m.append(np.concatenate(l, axis=1))
return np.concatenate(m, axis=0)
def flatten(img):
'''
Convert (x,y,z) array containing a pixel in z-dimension
to an (x,y) array with str values for each (i,j)
the intention is to make this easier to work with in ML
training.
'''
return np.array([[str(c) for c in row]
for row in img])
def unflatten(img):
'''
Return a flattend image to valid .png format for display
'''
return np.array([[np.fromstring(c[1:-1], float, sep=' ')
for c in row] for row in img])
def sort_dict_by_function_of_value(d, f = len):
sorted_tuples = sorted(d.items(),
key=lambda item: len(item[1]))
return {k: v for k, v in sorted_tuples}
def add_index_to_colors(colors):
'''
Add a unique, sequential index to the entry for
each color. returned dictionary will be of form
{`color_string`: { `"id": `int`, "punkIds" : `list[int`}}
'''
i=0
d={}
for k in colors.keys():
d[k] = {
'id' : i,
'punkIds' : colors[k]
}
i=i+1
return d
def get_attr_dict():
'''
Read the attr csv and populate a default dict
'''
d=OrderedDict()
with open(f"{__ROOT_DIR__}/data/list_attr_punx.csv") as f:
for attr in f.read().strip('\n').split(','):
d[attr]=-1
return d
def get_punk_attrs(id):
'''
Retrieve `id` cryptopunk from larvalabs.com,
parse HTML to extract type and attribute list
to return list of attributes
'''
typeClass="col-md-10 col-md-offset-1 col-xs-12"
punk_page=requests.get(f"https://www.larvalabs.com/cryptopunks/details/{id}")
if(punk_page.status_code != 200):
print(punk_page.status_code)
return {}
punk_html=punk_page.text
soup = BeautifulSoup(punk_html, 'html.parser')
details = soup.find(id="punkDetails")
punkType = camel_case(details.find(class_=typeClass).find('a').contents[0])
attrs=[punkType]
attrTags = details.find(class_ = "row detail-row")
for attrTag in attrTags.find_all('a'):
attrs.append(camel_case(attrTag.contents[0]))
return attrs
def get_punk_dict(id):
'''
Retrieve a punk page, pull type and attributes
from HTML and return a dictionary of attribute to
(-1,1) mapping where 1 is truthy for existence of
attribute
'''
od = {k:__ATTR_DICT__[k] for k in __ATTR_DICT__}
attrs = get_punk_attrs(id)
for attr in attrs:
od[attr]=1
return od
def get_punks(start, end):
'''
Retrieve punks in range `start` to `end`
'''
punks={}
for id in range(start, end):
print(id)
time.sleep(3.3)
punks[id] = get_punk_dict(id)
return punks
def plot_in_grid(n, images, predictions, labels):
'''
Plot `images` in an n*n grid with
prediction and labels as header
'''
(x,y) = (n,n)
fig = plt.figure(figsize=(9,14))
i=0
for i in range(1,(x*y)+1):
fig.add_subplot(x, y, i)
plt.imshow(images[i])
plt.title(f"{predictions[i][0]},{labels[i][0]}")
plt.axis('off')
i=i+1
return fig
| [
"matplotlib.pyplot.imshow",
"collections.OrderedDict",
"matplotlib.pyplot.title",
"matplotlib.image.imread",
"requests.get",
"time.sleep",
"bs4.BeautifulSoup",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure",
"re.sub",
"numpy.concatenate",
"os.path.abspath",
"numpy.full",
"numpy.froms... | [((655, 680), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (670, 680), False, 'import os\n'), ((1216, 1269), 'matplotlib.image.imread', 'mpimg.imread', (['f"""{__PUNK_DIR__}/punk{\'%04d\' % id}.png"""'], {}), '(f"{__PUNK_DIR__}/punk{\'%04d\' % id}.png")\n', (1228, 1269), True, 'import matplotlib.image as mpimg\n'), ((1484, 1530), 'numpy.fromstring', 'np.fromstring', (['pixel_str[1:-1]', 'float'], {'sep': '""" """'}), "(pixel_str[1:-1], float, sep=' ')\n", (1497, 1530), True, 'import numpy as np\n'), ((1542, 1563), 'numpy.full', 'np.full', (['(x, y, 4)', 'c'], {}), '((x, y, 4), c)\n', (1549, 1563), True, 'import numpy as np\n'), ((2069, 2094), 'numpy.concatenate', 'np.concatenate', (['m'], {'axis': '(0)'}), '(m, axis=0)\n', (2083, 2094), True, 'import numpy as np\n'), ((3209, 3222), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3220, 3222), False, 'from collections import OrderedDict\n'), ((3623, 3690), 'requests.get', 'requests.get', (['f"""https://www.larvalabs.com/cryptopunks/details/{id}"""'], {}), "(f'https://www.larvalabs.com/cryptopunks/details/{id}')\n", (3635, 3690), False, 'import requests\n'), ((3825, 3864), 'bs4.BeautifulSoup', 'BeautifulSoup', (['punk_html', '"""html.parser"""'], {}), "(punk_html, 'html.parser')\n", (3838, 3864), False, 'from bs4 import BeautifulSoup\n'), ((4930, 4957), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 14)'}), '(figsize=(9, 14))\n', (4940, 4957), True, 'import matplotlib.pyplot as plt\n'), ((4698, 4713), 'time.sleep', 'time.sleep', (['(3.3)'], {}), '(3.3)\n', (4708, 4713), False, 'import time\n'), ((5025, 5046), 'matplotlib.pyplot.imshow', 'plt.imshow', (['images[i]'], {}), '(images[i])\n', (5035, 5046), True, 'import matplotlib.pyplot as plt\n'), ((5051, 5099), 'matplotlib.pyplot.title', 'plt.title', (['f"""{predictions[i][0]},{labels[i][0]}"""'], {}), "(f'{predictions[i][0]},{labels[i][0]}')\n", (5060, 5099), True, 'import matplotlib.pyplot as plt\n'), ((5104, 5119), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (5112, 5119), True, 'import matplotlib.pyplot as plt\n'), ((2031, 2056), 'numpy.concatenate', 'np.concatenate', (['l'], {'axis': '(1)'}), '(l, axis=1)\n', (2045, 2056), True, 'import numpy as np\n'), ((1930, 1988), 'numpy.fromstring', 'np.fromstring', (['pixel_strs[i * n + j][1:-1]', 'float'], {'sep': '""" """'}), "(pixel_strs[i * n + j][1:-1], float, sep=' ')\n", (1943, 1988), True, 'import numpy as np\n'), ((2514, 2552), 'numpy.fromstring', 'np.fromstring', (['c[1:-1]', 'float'], {'sep': '""" """'}), "(c[1:-1], float, sep=' ')\n", (2527, 2552), True, 'import numpy as np\n'), ((843, 869), 're.sub', 'sub', (['"""(_|-)+"""', '""" """', 'string'], {}), "('(_|-)+', ' ', string)\n", (846, 869), False, 'from re import sub\n')] |
#
# PyGUI - Tasks - Generic
#
from GUI.Properties import Properties, overridable_property
class Task(Properties):
"""A Task represents an action to be performed after a specified
time interval, either once or repeatedly.
Constructor:
Task(proc, interval, repeat = False, start = True)
Creates a task to call the given proc, which should be
a callable object of no arguments, after the specified
interval in seconds from the time the task is scheduled.
If repeat is true, the task will be automatically re-scheduled
each time the proc is called. If start is true, the task will be
automatically scheduled upon creation; otherwise the start()
method must be called to schedule the task.
"""
interval = overridable_property('interval', "Time in seconds between firings")
repeat = overridable_property('repeat', "Whether to fire repeatedly or once only")
def __del__(self):
self.stop()
scheduled = overridable_property('scheduled',
"True if the task is currently scheduled. Read-only.")
def start(self):
"""Schedule the task if it is not already scheduled."""
raise NotImplementedError("GUI.Task.start")
def stop(self):
"""Unschedules the task if it is currently scheduled."""
raise NotImplementedError("GUI.Task.stop")
| [
"GUI.Properties.overridable_property"
] | [((737, 804), 'GUI.Properties.overridable_property', 'overridable_property', (['"""interval"""', '"""Time in seconds between firings"""'], {}), "('interval', 'Time in seconds between firings')\n", (757, 804), False, 'from GUI.Properties import Properties, overridable_property\n'), ((815, 888), 'GUI.Properties.overridable_property', 'overridable_property', (['"""repeat"""', '"""Whether to fire repeatedly or once only"""'], {}), "('repeat', 'Whether to fire repeatedly or once only')\n", (835, 888), False, 'from GUI.Properties import Properties, overridable_property\n'), ((939, 1031), 'GUI.Properties.overridable_property', 'overridable_property', (['"""scheduled"""', '"""True if the task is currently scheduled. Read-only."""'], {}), "('scheduled',\n 'True if the task is currently scheduled. Read-only.')\n", (959, 1031), False, 'from GUI.Properties import Properties, overridable_property\n')] |
# Generated by Django 3.0.11 on 2021-01-01 16:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("bpp", "0231_ukryj_status_korekty"),
]
operations = [
migrations.AlterField(
model_name="autor",
name="pseudonim",
field=models.CharField(
blank=True,
help_text="\n Jeżeli w bazie danych znajdują się autorzy o zbliżonych imionach, nazwiskach i tytułach naukowych,\n skorzystaj z tego pola aby ułatwić ich rozróżnienie. Pseudonim pokaże się w polach wyszukiwania\n oraz na podstronie autora, po nazwisku i tytule naukowym.",
max_length=300,
null=True,
),
),
migrations.AlterField(
model_name="uczelnia",
name="sortuj_jednostki_alfabetycznie",
field=models.BooleanField(
default=True,
help_text="Jeżeli ustawione na 'FAŁSZ', sortowanie jednostek będzie odbywało się ręcznie\n tzn za pomocą ustalonej przez administratora systemu kolejności. ",
),
),
]
| [
"django.db.models.CharField",
"django.db.models.BooleanField"
] | [((337, 685), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""\n Jeżeli w bazie danych znajdują się autorzy o zbliżonych imionach, nazwiskach i tytułach naukowych,\n skorzystaj z tego pola aby ułatwić ich rozróżnienie. Pseudonim pokaże się w polach wyszukiwania\n oraz na podstronie autora, po nazwisku i tytule naukowym."""', 'max_length': '(300)', 'null': '(True)'}), '(blank=True, help_text=\n """\n Jeżeli w bazie danych znajdują się autorzy o zbliżonych imionach, nazwiskach i tytułach naukowych,\n skorzystaj z tego pola aby ułatwić ich rozróżnienie. Pseudonim pokaże się w polach wyszukiwania\n oraz na podstronie autora, po nazwisku i tytule naukowym."""\n , max_length=300, null=True)\n', (353, 685), False, 'from django.db import migrations, models\n'), ((901, 1113), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)', 'help_text': '"""Jeżeli ustawione na \'FAŁSZ\', sortowanie jednostek będzie odbywało się ręcznie\n tzn za pomocą ustalonej przez administratora systemu kolejności. """'}), '(default=True, help_text=\n """Jeżeli ustawione na \'FAŁSZ\', sortowanie jednostek będzie odbywało się ręcznie\n tzn za pomocą ustalonej przez administratora systemu kolejności. """\n )\n', (920, 1113), False, 'from django.db import migrations, models\n')] |
"""This module provides tools for assessing flood risk
"""
from datetime import timedelta
from floodsystem.datafetcher import fetch_measure_levels
import numpy as np
from floodsystem.analysis import polyfit
from matplotlib import dates as date
def stations_level_over_threshold(stations, tol):
"""For a list of MonitoringStation objects (stations) and a tolerance value (tol),
returns a list of tuples containing a MonitoringStation object and its corresponding relative water level.
The returned list is sorted by the relative level in descending order.
Note: "update_water_levels" function needs to be called at least once for this function to work."""
# Create the output list
output = []
for station in stations:
# Get the relative water level. Will be "None" if typical range is inconsistent or the latest level
# is not known
relative_level = station.relative_water_level()
# Check if the relative level is "None" and, if not "None", compare it with the tolerance value
if relative_level is not None and relative_level > tol:
# Append tuple of MonitoringStation object and relative level to the output list
output.append((station, relative_level))
# Sort the list in order of descending relative water levels
output.sort(key=lambda val: val[1], reverse=True)
# Return the output list
return output
def stations_highest_rel_level(stations, N):
"""For a list of MonitoringStaton objects (stations), returns a list of the N stations
at which the water level, relative to the typical range, is highest"""
#Filter list as to not include stations without relative water level
new_stations = list(filter(lambda station: station.relative_water_level() is not None, stations))
#Sorts stations in descending order of relative water level
new_stations.sort(key=lambda station: station.relative_water_level(), reverse = True)
#Return first N stations in lists (N stations with highest water level)
return new_stations[:N]
def get_station_flood_risk(station):
"""For a MonitoringStation object (station), returns flood a risk rating - a number between
0 and 4. Uses data for the relative water level and the rise in the """
flood_risk = 0
rel_level_threshold = 2
rise_threshold = 0.1
#First factor is the current relative water level of station - sets initial risk
rel_water_level = station.relative_water_level()
#If no data available for relative water level, cannot calculate score, so return None
if rel_water_level is None:
return None
if rel_water_level > rel_level_threshold:
flood_risk = 3 #If above threshold, set high risk
else:
flood_risk = 1 #If below threshold, set low risk
#Second factor is the rate of change of the water level (e.g., if rising rapidly, give a high score) - used to adjust risk
level_rise = get_level_rise(station)
#If no data available for level rise, cannot calculate score, so return None
if level_rise is None:
return None
#For decreasing level, reduce flood risk
if level_rise < 0:
flood_risk -= 1
#For increasing level above threshold, increase flood risk
if level_rise > rise_threshold:
flood_risk += 1
return flood_risk
def get_level_rise(station):
"""For a MonitoringStation object (station), returns a the rate of water level rise, specifically
the average value over the last 2 days"""
#Fetch data (if no data available, return None)
times, values = fetch_measure_levels(station.measure_id, timedelta(days=2))
#Only continue if data available, otherwise return None
if times and values and (None in times or None in values) == False:
#Get polynomial approximation of
poly, d0 = polyfit(times, values, p=4)
#Find derivative polynomial
level_der = np.polyder(poly)
#Obtain list of gradients over last 2 days using the derivative polynomial
grads = []
for t in times:
grads.append(level_der(date.date2num(t) - d0))
#Return average of gradient values
return np.average(grads)
else:
return None
def get_town_flood_risk(town, stations_by_town):
"""Obtains the flood risk for a town, based on the flood risks for the towns
respective station, using the same rating system - returned value is the highest
flood risk of the towns stations"""
#Get stations for town
stations_in_town = stations_by_town[town]
flood_risk = get_station_flood_risk(stations_in_town[0])
#Find highest flood risk value from town's stations by iterating through stations
for i in range(1, len(stations_in_town)):
new_flood_risk = get_station_flood_risk(stations_in_town[i])
if new_flood_risk is None:
break
if flood_risk is None or new_flood_risk > flood_risk:
flood_risk = new_flood_risk
#Return highest value
return flood_risk
def get_flood_risk_rating(num):
"""Converts an integer value of a flood risk rating to the rating it
represents - low (0/1), moderate (2), high (3), severe (4)"""
if num == 0 or num == 1:
return "Low"
if num == 2:
return "Moderate"
if num == 3:
return "High"
if num == 4:
return "Severe"
return None #default (for None value or other)
| [
"matplotlib.dates.date2num",
"numpy.average",
"numpy.polyder",
"floodsystem.analysis.polyfit",
"datetime.timedelta"
] | [((3640, 3657), 'datetime.timedelta', 'timedelta', ([], {'days': '(2)'}), '(days=2)\n', (3649, 3657), False, 'from datetime import timedelta\n'), ((3860, 3887), 'floodsystem.analysis.polyfit', 'polyfit', (['times', 'values'], {'p': '(4)'}), '(times, values, p=4)\n', (3867, 3887), False, 'from floodsystem.analysis import polyfit\n'), ((3945, 3961), 'numpy.polyder', 'np.polyder', (['poly'], {}), '(poly)\n', (3955, 3961), True, 'import numpy as np\n'), ((4215, 4232), 'numpy.average', 'np.average', (['grads'], {}), '(grads)\n', (4225, 4232), True, 'import numpy as np\n'), ((4132, 4148), 'matplotlib.dates.date2num', 'date.date2num', (['t'], {}), '(t)\n', (4145, 4148), True, 'from matplotlib import dates as date\n')] |
# A python svg graph plotting library and creating interactive charts !
# PyPi: https://pypi.org/project/pygal/
# Docs: http://www.pygal.org/en/stable/index.html
# Chart types: http://www.pygal.org/en/stable/documentation/types/index.html
# Maps: http://www.pygal.org/en/stable/documentation/types/maps/pygal_maps_world.html
# pip install pygal
# pip install pygal_maps_world
import pygal
import seaborn as sns # just for datasets
from pygal.style import Style
# Loading Dataset
df = sns.load_dataset('tips')
# Simple Bar Chart
bar_chart = pygal.Bar()
bar_chart.add('Tip', df['tip'])
bar_chart.title = "Bla bla"
bar_chart.render_to_file('bar_chart.svg')
# bar_chart.render_in_browser()
# Customizing the graph and using a Style
custom_style = Style(colors=('#E80080', '#404040', '#9BC850'))
bar_chart = pygal.Bar(style=custom_style)
bar_chart.title = "Some text"
bar_chart.add("A", [0.95])
bar_chart.add("B", [1.25])
bar_chart.add("C", [1])
bar_chart.render_in_browser()
# Double Bar Chart
bar_chart.add('Tip', df['tip'][:10])
bar_chart.add('Total Bill', df['total'][:10])
bar_chart.render_to_file('bar_chart_2.svg')
# Horizontal bar diagram
line_chart = pygal.HorizontalBar()
line_chart.title = 'Browser usage in February 2012 (in %)'
line_chart.add('IE', 19.5)
line_chart.add('Firefox', 36.6)
line_chart.add('Chrome', 36.3)
line_chart.add('Safari', 4.5)
line_chart.add('Opera', 2.3)
line_chart.render()
# Line Chart
line_chart = pygal.Line()
line_chart.add('Total', df['total'][:15])
line_chart.render_to_file('line.svg')
# Double Line Chart
line_chart.add('Total', df['total_bill'][:15])
line_chart.add('Tip', df['tip'][:15])
line_chart.render_to_file('line_2.svg')
# Box Plot
box_plot = pygal.Box()
box_plot.title = 'Tips'
box_plot.add('Tip', df['tip'])
box_plot.render_to_file('box1.svg')
# Funnel Chart
funnel_chart = pygal.Funnel()
funnel_chart.title = 'Total'
funnel_chart.add('Total', df['total_bill'][:15])
funnel_chart.add('Tip', df['tip'][:15])
funnel_chart.render_to_file('funnel.svg')
# Working with maps
worldmap_chart = pygal.maps.world.World()
worldmap_chart.title = 'Some countries'
worldmap_chart.add('F countries', ['fr', 'fi'])
worldmap_chart.add('M countries', ['ma', 'mc', 'md', 'me', 'mg',
'mk', 'ml', 'mm', 'mn', 'mo',
'mr', 'mt', 'mu', 'mv', 'mw',
'mx', 'my', 'mz'])
worldmap_chart.add('U countries', ['ua', 'ug', 'us', 'uy', 'uz'])
worldmap_chart.render()
# specify a value for a country
worldmap_chart = pygal.maps.world.World()
worldmap_chart.title = 'Minimum deaths by capital punishement (source: Amnesty International)'
worldmap_chart.add('In 2012', {
'af': 14,
'bd': 1,
'by': 3,
'cn': 1000,
'gm': 9,
'in': 1,
'ir': 314,
'iq': 129,
'jp': 7,
'kp': 6,
'pk': 1,
'ps': 6,
'sa': 79,
'so': 6,
'sd': 5,
'tw': 6,
'ae': 1,
'us': 43,
'ye': 28
})
worldmap_chart.render()
# access to continents
supra = pygal.maps.world.SupranationalWorld()
supra.add('Asia', [('asia', 1)])
supra.add('Europe', [('europe', 1)])
supra.add('Africa', [('africa', 1)])
supra.add('North america', [('north_america', 1)])
supra.add('South america', [('south_america', 1)])
supra.add('Oceania', [('oceania', 1)])
supra.add('Antartica', [('antartica', 1)])
supra.render()
| [
"pygal.Line",
"pygal.HorizontalBar",
"pygal.maps.world.SupranationalWorld",
"pygal.Bar",
"seaborn.load_dataset",
"pygal.Box",
"pygal.maps.world.World",
"pygal.Funnel",
"pygal.style.Style"
] | [((492, 516), 'seaborn.load_dataset', 'sns.load_dataset', (['"""tips"""'], {}), "('tips')\n", (508, 516), True, 'import seaborn as sns\n'), ((549, 560), 'pygal.Bar', 'pygal.Bar', ([], {}), '()\n', (558, 560), False, 'import pygal\n'), ((758, 805), 'pygal.style.Style', 'Style', ([], {'colors': "('#E80080', '#404040', '#9BC850')"}), "(colors=('#E80080', '#404040', '#9BC850'))\n", (763, 805), False, 'from pygal.style import Style\n'), ((818, 847), 'pygal.Bar', 'pygal.Bar', ([], {'style': 'custom_style'}), '(style=custom_style)\n', (827, 847), False, 'import pygal\n'), ((1174, 1195), 'pygal.HorizontalBar', 'pygal.HorizontalBar', ([], {}), '()\n', (1193, 1195), False, 'import pygal\n'), ((1452, 1464), 'pygal.Line', 'pygal.Line', ([], {}), '()\n', (1462, 1464), False, 'import pygal\n'), ((1716, 1727), 'pygal.Box', 'pygal.Box', ([], {}), '()\n', (1725, 1727), False, 'import pygal\n'), ((1852, 1866), 'pygal.Funnel', 'pygal.Funnel', ([], {}), '()\n', (1864, 1866), False, 'import pygal\n'), ((2066, 2090), 'pygal.maps.world.World', 'pygal.maps.world.World', ([], {}), '()\n', (2088, 2090), False, 'import pygal\n'), ((2570, 2594), 'pygal.maps.world.World', 'pygal.maps.world.World', ([], {}), '()\n', (2592, 2594), False, 'import pygal\n'), ((3002, 3039), 'pygal.maps.world.SupranationalWorld', 'pygal.maps.world.SupranationalWorld', ([], {}), '()\n', (3037, 3039), False, 'import pygal\n')] |
# coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import telestream_cloud_qc
from telestream_cloud_qc.models.video_config import VideoConfig # noqa: E501
from telestream_cloud_qc.rest import ApiException
class TestVideoConfig(unittest.TestCase):
"""VideoConfig unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test VideoConfig
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = telestream_cloud_qc.models.video_config.VideoConfig() # noqa: E501
if include_optional :
return VideoConfig(
track_select_test = telestream_cloud_qc.models.track_select_test.track_select_test(
selector = 56,
selector_type = 'TrackIndex',
checked = True, ),
track_id_test = telestream_cloud_qc.models.track_id_test.track_id_test(
track_id = 56,
reject_on_error = True,
checked = True, ),
ignore_vbi_test = telestream_cloud_qc.models.ignore_vbi_test.ignore_vbi_test(
reject_on_error = True,
checked = True, ),
force_color_space_test = telestream_cloud_qc.models.force_color_space_test.force_color_space_test(
color_space = 'CSUnknown',
checked = True, ),
video_segment_detection_test = telestream_cloud_qc.models.video_segment_detection_test.video_segment_detection_test(
black_level_default_or_custom = 'Default',
black_level = 56,
percentage_of_frame = 56,
min_duration_required = 1.337,
min_duration_required_secs_or_frames = 'Seconds',
require_digital_silence = True,
reject_on_error = True,
checked = True, ),
video_layout_test = telestream_cloud_qc.models.layout_test.layout_test(
layout_type = 'LayoutTypeFixedIgnoreStartAndEnd',
start_duration = 1.337,
start_duration_secs_or_frames = 'Seconds',
end_duration = 1.337,
end_duration_secs_or_frames = 'Seconds',
start_enabled = True,
start_hours = 56,
start_minutes = 56,
start_seconds = 56,
start_frames = 56,
end_enabled = True,
end_hours = 56,
end_minutes = 56,
end_seconds = 56,
end_frames = 56,
checked = True, ),
letterboxing_test = telestream_cloud_qc.models.letterboxing_test.letterboxing_test(
ratio_or_lines = 'Ratio',
ratio_horizontal = 56,
ratio_vertical = 56,
lines_top_and_bottom = 56,
lines_left_and_right = 56,
tolerance = 56,
black_level_default_or_custom = 'Default',
black_level = 56,
reject_on_error = True,
checked = True, ),
blanking_test = telestream_cloud_qc.models.blanking_test.blanking_test(
black_level_default_or_custom = 'Default',
black_level = 56,
checked = True, ),
loss_of_chroma_test = telestream_cloud_qc.models.loss_of_chroma_test.loss_of_chroma_test(
level_default_or_custom = 'Default',
level = 56,
tolerance = 56,
reject_on_error = True,
checked = True, ),
chroma_level_test = telestream_cloud_qc.models.chroma_level_test.chroma_level_test(
y_level_default_or_custom = 'Default',
y_level_lower = 56,
y_level_upper = 56,
y_level_max_outside_range = 1.337,
y_level_tolerance_low = 1.337,
y_level_tolerance_high = 1.337,
u_vlevel_default_or_custom = 'Default',
u_vlevel_lower = 56,
u_vlevel_upper = 56,
u_vlevel_max_outside_range = 1.337,
low_pass_filter = 'NoFilter',
reject_on_error = True,
do_correction = True,
checked = True, ),
black_level_test = telestream_cloud_qc.models.black_level_test.black_level_test(
level_default_or_custom = 'Default',
level = 56,
level_max_outside_range = 1.337,
reject_on_error = True,
do_correction = True,
checked = True, ),
rgb_gamut_test = telestream_cloud_qc.models.rgb_gamut_test.rgb_gamut_test(
level_default_or_custom = 'Default',
level_lower = 56,
level_upper = 56,
level_max_outside_range = 1.337,
level_tolerance = 1.337,
low_pass_filter = 'NoFilter',
reject_on_error = True,
do_correction = True,
checked = True, ),
hdr_test = telestream_cloud_qc.models.hdr_test.hdr_test(
hdr_standard = 'GenericHdr',
max_fall_max_enabled = True,
max_fall_max = 56,
max_fall_error_enabled = True,
max_fall_error = 56,
max_cll_max_enabled = True,
max_cll_max = 56,
max_cll_error_enabled = True,
max_cll_error = 56,
always_calculate = True,
always_report = True,
reject_on_error = True,
checked = True, ),
colour_bars_test = telestream_cloud_qc.models.colour_bars_test.colour_bars_test(
color_bar_standard = 'AnyColorBars',
tolerance = 56,
time_range_enabled = True,
start_time = 1.337,
end_time = 1.337,
range_tolerance = 1.337,
time_secs_or_frames = 'Seconds',
not_at_any_other_time = True,
reject_on_error = True,
do_correction = True,
checked = True, ),
black_frame_test = telestream_cloud_qc.models.black_frame_test.black_frame_test(
level_default_or_custom = 'Default',
level = 56,
percentage_of_frame = 56,
start_range_enabled = True,
start_time = 1.337,
end_time = 1.337,
start_range_tolerance = 1.337,
time_secs_or_frames = 'Seconds',
end_range_enabled = True,
end_range = 1.337,
end_range_tolerance = 1.337,
end_secs_or_frames = 'Seconds',
not_at_any_other_time = True,
max_time_allowed = 1.337,
max_time_allowed_secs_or_frames = 'Seconds',
max_time_at_start = True,
max_time_allowed_at_start = 1.337,
max_time_allowed_at_start_secs_or_frames = 'Seconds',
max_time_at_end = True,
max_time_allowed_at_end = 1.337,
max_time_allowed_at_end_secs_or_frames = 'Seconds',
reject_on_error = True,
do_correction = True,
checked = True, ),
single_color_test = telestream_cloud_qc.models.single_color_test.single_color_test(
max_time_allowed = 1.337,
time_secs_or_frames = 'Seconds',
percentage_of_frame = 1.337,
ignore_below = 56,
reject_on_error = True,
checked = True, ),
freeze_frame_test = telestream_cloud_qc.models.freeze_frame_test.freeze_frame_test(
sensitivity = 'Low',
time_range_enabled = True,
start_time = 1.337,
end_time = 1.337,
start_range_tolerance = 1.337,
time_secs_or_frames = 'Seconds',
end_range_enabled = True,
end_range = 1.337,
end_range_duration = 1.337,
end_range_tolerance = 1.337,
end_secs_or_frames = 'Seconds',
not_at_any_other_time = True,
max_time_allowed = 1.337,
max_time_allowed_secs_or_frames = 'Seconds',
reject_on_error = True,
checked = True, ),
blockiness_test = telestream_cloud_qc.models.blockiness_test.blockiness_test(
quality_level = 56,
max_time_below_quality = 1.337,
max_time_below_quality_secs_or_frames = 'Seconds',
reject_on_error = True,
checked = True, ),
field_order_test = telestream_cloud_qc.models.field_order_test.field_order_test(
flagged_field_order = 'UnknownFieldOrder',
baseband_enabled = True,
simple = True,
baseband_field_order = 'UnknownFieldOrder',
reject_on_error = True,
checked = True, ),
cadence_test = telestream_cloud_qc.models.cadence_test.cadence_test(
check_cadence = True,
cadence_required = 'CadenceUnknown',
check_cadence_breaks = True,
report_cadence = True,
check_for_poor_cadence = True,
reject_on_error = True,
checked = True, ),
dropout_test = telestream_cloud_qc.models.dropout_test.dropout_test(
sensitivity = 'Low',
reject_on_error = True,
do_correction = True,
checked = True, ),
digital_dropout_test = telestream_cloud_qc.models.digital_dropout_test.digital_dropout_test(
sensitivity = 'Low',
reject_on_error = True,
checked = True, ),
stripe_test = telestream_cloud_qc.models.stripe_test.stripe_test(
sensitivity = 'Low',
reject_on_error = True,
do_correction = True,
checked = True, ),
corrupt_frame_test = telestream_cloud_qc.models.corrupt_frame_test.corrupt_frame_test(
sensitivity = 'Low',
reject_on_error = True,
do_correction = True,
checked = True, ),
flash_test = telestream_cloud_qc.models.flash_test.flash_test(
check_type = 'PSEStandard',
check_for_extended = True,
check_for_red = True,
check_for_patterns = True,
reject_on_error = True,
do_correction = True,
checked = True, ),
media_offline_test = telestream_cloud_qc.models.media_offline_test.media_offline_test(
reject_on_error = True,
checked = True, )
)
else :
return VideoConfig(
)
def testVideoConfig(self):
"""Test VideoConfig"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"telestream_cloud_qc.models.blanking_test.blanking_test",
"telestream_cloud_qc.models.field_order_test.field_order_test",
"telestream_cloud_qc.models.digital_dropout_test.digital_dropout_test",
"telestream_cloud_qc.models.layout_test.layout_test",
"telestream_cloud_qc.models.media_offline_test.media_offline... | [((12759, 12774), 'unittest.main', 'unittest.main', ([], {}), '()\n', (12772, 12774), False, 'import unittest\n'), ((12499, 12512), 'telestream_cloud_qc.models.video_config.VideoConfig', 'VideoConfig', ([], {}), '()\n', (12510, 12512), False, 'from telestream_cloud_qc.models.video_config import VideoConfig\n'), ((996, 1117), 'telestream_cloud_qc.models.track_select_test.track_select_test', 'telestream_cloud_qc.models.track_select_test.track_select_test', ([], {'selector': '(56)', 'selector_type': '"""TrackIndex"""', 'checked': '(True)'}), "(selector=56,\n selector_type='TrackIndex', checked=True)\n", (1058, 1117), False, 'import telestream_cloud_qc\n'), ((1219, 1326), 'telestream_cloud_qc.models.track_id_test.track_id_test', 'telestream_cloud_qc.models.track_id_test.track_id_test', ([], {'track_id': '(56)', 'reject_on_error': '(True)', 'checked': '(True)'}), '(track_id=56,\n reject_on_error=True, checked=True)\n', (1273, 1326), False, 'import telestream_cloud_qc\n'), ((1430, 1529), 'telestream_cloud_qc.models.ignore_vbi_test.ignore_vbi_test', 'telestream_cloud_qc.models.ignore_vbi_test.ignore_vbi_test', ([], {'reject_on_error': '(True)', 'checked': '(True)'}), '(reject_on_error=\n True, checked=True)\n', (1488, 1529), False, 'import telestream_cloud_qc\n'), ((1616, 1732), 'telestream_cloud_qc.models.force_color_space_test.force_color_space_test', 'telestream_cloud_qc.models.force_color_space_test.force_color_space_test', ([], {'color_space': '"""CSUnknown"""', 'checked': '(True)'}), "(\n color_space='CSUnknown', checked=True)\n", (1688, 1732), False, 'import telestream_cloud_qc\n'), ((1825, 2151), 'telestream_cloud_qc.models.video_segment_detection_test.video_segment_detection_test', 'telestream_cloud_qc.models.video_segment_detection_test.video_segment_detection_test', ([], {'black_level_default_or_custom': '"""Default"""', 'black_level': '(56)', 'percentage_of_frame': '(56)', 'min_duration_required': '(1.337)', 'min_duration_required_secs_or_frames': '"""Seconds"""', 'require_digital_silence': '(True)', 'reject_on_error': '(True)', 'checked': '(True)'}), "(\n black_level_default_or_custom='Default', black_level=56,\n percentage_of_frame=56, min_duration_required=1.337,\n min_duration_required_secs_or_frames='Seconds', require_digital_silence\n =True, reject_on_error=True, checked=True)\n", (1909, 2151), False, 'import telestream_cloud_qc\n'), ((2358, 2787), 'telestream_cloud_qc.models.layout_test.layout_test', 'telestream_cloud_qc.models.layout_test.layout_test', ([], {'layout_type': '"""LayoutTypeFixedIgnoreStartAndEnd"""', 'start_duration': '(1.337)', 'start_duration_secs_or_frames': '"""Seconds"""', 'end_duration': '(1.337)', 'end_duration_secs_or_frames': '"""Seconds"""', 'start_enabled': '(True)', 'start_hours': '(56)', 'start_minutes': '(56)', 'start_seconds': '(56)', 'start_frames': '(56)', 'end_enabled': '(True)', 'end_hours': '(56)', 'end_minutes': '(56)', 'end_seconds': '(56)', 'end_frames': '(56)', 'checked': '(True)'}), "(layout_type=\n 'LayoutTypeFixedIgnoreStartAndEnd', start_duration=1.337,\n start_duration_secs_or_frames='Seconds', end_duration=1.337,\n end_duration_secs_or_frames='Seconds', start_enabled=True, start_hours=\n 56, start_minutes=56, start_seconds=56, start_frames=56, end_enabled=\n True, end_hours=56, end_minutes=56, end_seconds=56, end_frames=56,\n checked=True)\n", (2408, 2787), False, 'import telestream_cloud_qc\n'), ((3169, 3470), 'telestream_cloud_qc.models.letterboxing_test.letterboxing_test', 'telestream_cloud_qc.models.letterboxing_test.letterboxing_test', ([], {'ratio_or_lines': '"""Ratio"""', 'ratio_horizontal': '(56)', 'ratio_vertical': '(56)', 'lines_top_and_bottom': '(56)', 'lines_left_and_right': '(56)', 'tolerance': '(56)', 'black_level_default_or_custom': '"""Default"""', 'black_level': '(56)', 'reject_on_error': '(True)', 'checked': '(True)'}), "(ratio_or_lines\n ='Ratio', ratio_horizontal=56, ratio_vertical=56, lines_top_and_bottom=\n 56, lines_left_and_right=56, tolerance=56,\n black_level_default_or_custom='Default', black_level=56,\n reject_on_error=True, checked=True)\n", (3231, 3470), False, 'import telestream_cloud_qc\n'), ((3719, 3849), 'telestream_cloud_qc.models.blanking_test.blanking_test', 'telestream_cloud_qc.models.blanking_test.blanking_test', ([], {'black_level_default_or_custom': '"""Default"""', 'black_level': '(56)', 'checked': '(True)'}), "(\n black_level_default_or_custom='Default', black_level=56, checked=True)\n", (3773, 3849), False, 'import telestream_cloud_qc\n'), ((3956, 4126), 'telestream_cloud_qc.models.loss_of_chroma_test.loss_of_chroma_test', 'telestream_cloud_qc.models.loss_of_chroma_test.loss_of_chroma_test', ([], {'level_default_or_custom': '"""Default"""', 'level': '(56)', 'tolerance': '(56)', 'reject_on_error': '(True)', 'checked': '(True)'}), "(\n level_default_or_custom='Default', level=56, tolerance=56,\n reject_on_error=True, checked=True)\n", (4022, 4126), False, 'import telestream_cloud_qc\n'), ((4273, 4719), 'telestream_cloud_qc.models.chroma_level_test.chroma_level_test', 'telestream_cloud_qc.models.chroma_level_test.chroma_level_test', ([], {'y_level_default_or_custom': '"""Default"""', 'y_level_lower': '(56)', 'y_level_upper': '(56)', 'y_level_max_outside_range': '(1.337)', 'y_level_tolerance_low': '(1.337)', 'y_level_tolerance_high': '(1.337)', 'u_vlevel_default_or_custom': '"""Default"""', 'u_vlevel_lower': '(56)', 'u_vlevel_upper': '(56)', 'u_vlevel_max_outside_range': '(1.337)', 'low_pass_filter': '"""NoFilter"""', 'reject_on_error': '(True)', 'do_correction': '(True)', 'checked': '(True)'}), "(\n y_level_default_or_custom='Default', y_level_lower=56, y_level_upper=56,\n y_level_max_outside_range=1.337, y_level_tolerance_low=1.337,\n y_level_tolerance_high=1.337, u_vlevel_default_or_custom='Default',\n u_vlevel_lower=56, u_vlevel_upper=56, u_vlevel_max_outside_range=1.337,\n low_pass_filter='NoFilter', reject_on_error=True, do_correction=True,\n checked=True)\n", (4335, 4719), False, 'import telestream_cloud_qc\n'), ((5056, 5258), 'telestream_cloud_qc.models.black_level_test.black_level_test', 'telestream_cloud_qc.models.black_level_test.black_level_test', ([], {'level_default_or_custom': '"""Default"""', 'level': '(56)', 'level_max_outside_range': '(1.337)', 'reject_on_error': '(True)', 'do_correction': '(True)', 'checked': '(True)'}), "(\n level_default_or_custom='Default', level=56, level_max_outside_range=\n 1.337, reject_on_error=True, do_correction=True, checked=True)\n", (5116, 5258), False, 'import telestream_cloud_qc\n'), ((5424, 5699), 'telestream_cloud_qc.models.rgb_gamut_test.rgb_gamut_test', 'telestream_cloud_qc.models.rgb_gamut_test.rgb_gamut_test', ([], {'level_default_or_custom': '"""Default"""', 'level_lower': '(56)', 'level_upper': '(56)', 'level_max_outside_range': '(1.337)', 'level_tolerance': '(1.337)', 'low_pass_filter': '"""NoFilter"""', 'reject_on_error': '(True)', 'do_correction': '(True)', 'checked': '(True)'}), "(\n level_default_or_custom='Default', level_lower=56, level_upper=56,\n level_max_outside_range=1.337, level_tolerance=1.337, low_pass_filter=\n 'NoFilter', reject_on_error=True, do_correction=True, checked=True)\n", (5480, 5699), False, 'import telestream_cloud_qc\n'), ((5924, 6270), 'telestream_cloud_qc.models.hdr_test.hdr_test', 'telestream_cloud_qc.models.hdr_test.hdr_test', ([], {'hdr_standard': '"""GenericHdr"""', 'max_fall_max_enabled': '(True)', 'max_fall_max': '(56)', 'max_fall_error_enabled': '(True)', 'max_fall_error': '(56)', 'max_cll_max_enabled': '(True)', 'max_cll_max': '(56)', 'max_cll_error_enabled': '(True)', 'max_cll_error': '(56)', 'always_calculate': '(True)', 'always_report': '(True)', 'reject_on_error': '(True)', 'checked': '(True)'}), "(hdr_standard='GenericHdr',\n max_fall_max_enabled=True, max_fall_max=56, max_fall_error_enabled=True,\n max_fall_error=56, max_cll_max_enabled=True, max_cll_max=56,\n max_cll_error_enabled=True, max_cll_error=56, always_calculate=True,\n always_report=True, reject_on_error=True, checked=True)\n", (5968, 6270), False, 'import telestream_cloud_qc\n'), ((6593, 6918), 'telestream_cloud_qc.models.colour_bars_test.colour_bars_test', 'telestream_cloud_qc.models.colour_bars_test.colour_bars_test', ([], {'color_bar_standard': '"""AnyColorBars"""', 'tolerance': '(56)', 'time_range_enabled': '(True)', 'start_time': '(1.337)', 'end_time': '(1.337)', 'range_tolerance': '(1.337)', 'time_secs_or_frames': '"""Seconds"""', 'not_at_any_other_time': '(True)', 'reject_on_error': '(True)', 'do_correction': '(True)', 'checked': '(True)'}), "(color_bar_standard\n ='AnyColorBars', tolerance=56, time_range_enabled=True, start_time=\n 1.337, end_time=1.337, range_tolerance=1.337, time_secs_or_frames=\n 'Seconds', not_at_any_other_time=True, reject_on_error=True,\n do_correction=True, checked=True)\n", (6653, 6918), False, 'import telestream_cloud_qc\n'), ((7192, 7948), 'telestream_cloud_qc.models.black_frame_test.black_frame_test', 'telestream_cloud_qc.models.black_frame_test.black_frame_test', ([], {'level_default_or_custom': '"""Default"""', 'level': '(56)', 'percentage_of_frame': '(56)', 'start_range_enabled': '(True)', 'start_time': '(1.337)', 'end_time': '(1.337)', 'start_range_tolerance': '(1.337)', 'time_secs_or_frames': '"""Seconds"""', 'end_range_enabled': '(True)', 'end_range': '(1.337)', 'end_range_tolerance': '(1.337)', 'end_secs_or_frames': '"""Seconds"""', 'not_at_any_other_time': '(True)', 'max_time_allowed': '(1.337)', 'max_time_allowed_secs_or_frames': '"""Seconds"""', 'max_time_at_start': '(True)', 'max_time_allowed_at_start': '(1.337)', 'max_time_allowed_at_start_secs_or_frames': '"""Seconds"""', 'max_time_at_end': '(True)', 'max_time_allowed_at_end': '(1.337)', 'max_time_allowed_at_end_secs_or_frames': '"""Seconds"""', 'reject_on_error': '(True)', 'do_correction': '(True)', 'checked': '(True)'}), "(\n level_default_or_custom='Default', level=56, percentage_of_frame=56,\n start_range_enabled=True, start_time=1.337, end_time=1.337,\n start_range_tolerance=1.337, time_secs_or_frames='Seconds',\n end_range_enabled=True, end_range=1.337, end_range_tolerance=1.337,\n end_secs_or_frames='Seconds', not_at_any_other_time=True,\n max_time_allowed=1.337, max_time_allowed_secs_or_frames='Seconds',\n max_time_at_start=True, max_time_allowed_at_start=1.337,\n max_time_allowed_at_start_secs_or_frames='Seconds', max_time_at_end=\n True, max_time_allowed_at_end=1.337,\n max_time_allowed_at_end_secs_or_frames='Seconds', reject_on_error=True,\n do_correction=True, checked=True)\n", (7252, 7948), False, 'import telestream_cloud_qc\n'), ((8495, 8701), 'telestream_cloud_qc.models.single_color_test.single_color_test', 'telestream_cloud_qc.models.single_color_test.single_color_test', ([], {'max_time_allowed': '(1.337)', 'time_secs_or_frames': '"""Seconds"""', 'percentage_of_frame': '(1.337)', 'ignore_below': '(56)', 'reject_on_error': '(True)', 'checked': '(True)'}), "(max_time_allowed\n =1.337, time_secs_or_frames='Seconds', percentage_of_frame=1.337,\n ignore_below=56, reject_on_error=True, checked=True)\n", (8557, 8701), False, 'import telestream_cloud_qc\n'), ((8871, 9355), 'telestream_cloud_qc.models.freeze_frame_test.freeze_frame_test', 'telestream_cloud_qc.models.freeze_frame_test.freeze_frame_test', ([], {'sensitivity': '"""Low"""', 'time_range_enabled': '(True)', 'start_time': '(1.337)', 'end_time': '(1.337)', 'start_range_tolerance': '(1.337)', 'time_secs_or_frames': '"""Seconds"""', 'end_range_enabled': '(True)', 'end_range': '(1.337)', 'end_range_duration': '(1.337)', 'end_range_tolerance': '(1.337)', 'end_secs_or_frames': '"""Seconds"""', 'not_at_any_other_time': '(True)', 'max_time_allowed': '(1.337)', 'max_time_allowed_secs_or_frames': '"""Seconds"""', 'reject_on_error': '(True)', 'checked': '(True)'}), "(sensitivity=\n 'Low', time_range_enabled=True, start_time=1.337, end_time=1.337,\n start_range_tolerance=1.337, time_secs_or_frames='Seconds',\n end_range_enabled=True, end_range=1.337, end_range_duration=1.337,\n end_range_tolerance=1.337, end_secs_or_frames='Seconds',\n not_at_any_other_time=True, max_time_allowed=1.337,\n max_time_allowed_secs_or_frames='Seconds', reject_on_error=True,\n checked=True)\n", (8933, 9355), False, 'import telestream_cloud_qc\n'), ((9733, 9933), 'telestream_cloud_qc.models.blockiness_test.blockiness_test', 'telestream_cloud_qc.models.blockiness_test.blockiness_test', ([], {'quality_level': '(56)', 'max_time_below_quality': '(1.337)', 'max_time_below_quality_secs_or_frames': '"""Seconds"""', 'reject_on_error': '(True)', 'checked': '(True)'}), "(quality_level=56,\n max_time_below_quality=1.337, max_time_below_quality_secs_or_frames=\n 'Seconds', reject_on_error=True, checked=True)\n", (9791, 9933), False, 'import telestream_cloud_qc\n'), ((10079, 10308), 'telestream_cloud_qc.models.field_order_test.field_order_test', 'telestream_cloud_qc.models.field_order_test.field_order_test', ([], {'flagged_field_order': '"""UnknownFieldOrder"""', 'baseband_enabled': '(True)', 'simple': '(True)', 'baseband_field_order': '"""UnknownFieldOrder"""', 'reject_on_error': '(True)', 'checked': '(True)'}), "(\n flagged_field_order='UnknownFieldOrder', baseband_enabled=True, simple=\n True, baseband_field_order='UnknownFieldOrder', reject_on_error=True,\n checked=True)\n", (10139, 10308), False, 'import telestream_cloud_qc\n'), ((10468, 10700), 'telestream_cloud_qc.models.cadence_test.cadence_test', 'telestream_cloud_qc.models.cadence_test.cadence_test', ([], {'check_cadence': '(True)', 'cadence_required': '"""CadenceUnknown"""', 'check_cadence_breaks': '(True)', 'report_cadence': '(True)', 'check_for_poor_cadence': '(True)', 'reject_on_error': '(True)', 'checked': '(True)'}), "(check_cadence=True,\n cadence_required='CadenceUnknown', check_cadence_breaks=True,\n report_cadence=True, check_for_poor_cadence=True, reject_on_error=True,\n checked=True)\n", (10520, 10700), False, 'import telestream_cloud_qc\n'), ((10885, 11016), 'telestream_cloud_qc.models.dropout_test.dropout_test', 'telestream_cloud_qc.models.dropout_test.dropout_test', ([], {'sensitivity': '"""Low"""', 'reject_on_error': '(True)', 'do_correction': '(True)', 'checked': '(True)'}), "(sensitivity='Low',\n reject_on_error=True, do_correction=True, checked=True)\n", (10937, 11016), False, 'import telestream_cloud_qc\n'), ((11148, 11276), 'telestream_cloud_qc.models.digital_dropout_test.digital_dropout_test', 'telestream_cloud_qc.models.digital_dropout_test.digital_dropout_test', ([], {'sensitivity': '"""Low"""', 'reject_on_error': '(True)', 'checked': '(True)'}), "(\n sensitivity='Low', reject_on_error=True, checked=True)\n", (11216, 11276), False, 'import telestream_cloud_qc\n'), ((11375, 11504), 'telestream_cloud_qc.models.stripe_test.stripe_test', 'telestream_cloud_qc.models.stripe_test.stripe_test', ([], {'sensitivity': '"""Low"""', 'reject_on_error': '(True)', 'do_correction': '(True)', 'checked': '(True)'}), "(sensitivity='Low',\n reject_on_error=True, do_correction=True, checked=True)\n", (11425, 11504), False, 'import telestream_cloud_qc\n'), ((11634, 11778), 'telestream_cloud_qc.models.corrupt_frame_test.corrupt_frame_test', 'telestream_cloud_qc.models.corrupt_frame_test.corrupt_frame_test', ([], {'sensitivity': '"""Low"""', 'reject_on_error': '(True)', 'do_correction': '(True)', 'checked': '(True)'}), "(sensitivity\n ='Low', reject_on_error=True, do_correction=True, checked=True)\n", (11698, 11778), False, 'import telestream_cloud_qc\n'), ((11899, 12107), 'telestream_cloud_qc.models.flash_test.flash_test', 'telestream_cloud_qc.models.flash_test.flash_test', ([], {'check_type': '"""PSEStandard"""', 'check_for_extended': '(True)', 'check_for_red': '(True)', 'check_for_patterns': '(True)', 'reject_on_error': '(True)', 'do_correction': '(True)', 'checked': '(True)'}), "(check_type='PSEStandard',\n check_for_extended=True, check_for_red=True, check_for_patterns=True,\n reject_on_error=True, do_correction=True, checked=True)\n", (11947, 12107), False, 'import telestream_cloud_qc\n'), ((12302, 12407), 'telestream_cloud_qc.models.media_offline_test.media_offline_test', 'telestream_cloud_qc.models.media_offline_test.media_offline_test', ([], {'reject_on_error': '(True)', 'checked': '(True)'}), '(\n reject_on_error=True, checked=True)\n', (12366, 12407), False, 'import telestream_cloud_qc\n')] |
from datetime import timezone
from functools import partial, update_wrapper
from django.utils.cache import get_conditional_response
from django.utils.http import http_date, quote_etag
from rest_framework import status
from rest_framework.metadata import BaseMetadata
from rest_framework.response import Response
from rest_framework.views import APIView
from scrud_django import __version__
from scrud_django.utils import get_string_or_evaluate, link_content
class ScrudfulViewFunc:
def __init__(
self,
view_method,
view_is_class_method=True,
etag_func=None,
last_modified_func=None,
schema_link_or_func=None,
schema_rel_or_func=None,
schema_type_or_func=None,
context_link_or_func=None,
context_rel_or_func=None,
context_type_or_func=None,
http_schema_or_func=None,
):
self.view_is_class_method = view_is_class_method
self.view_method = view_method
self.etag_func = etag_func
self.last_modified_func = last_modified_func
self.schema_link_or_func = schema_link_or_func
self.schema_rel_or_func = schema_rel_or_func
self.schema_type_or_func = schema_type_or_func
self.context_link_or_func = context_link_or_func
self.context_rel_or_func = context_rel_or_func
self.context_type_or_func = context_type_or_func
self.http_schema_or_func = http_schema_or_func
update_wrapper(self, self.view_method)
def __get__(self, obj, objtype):
return partial(self.__call__, obj)
def __call__(self, *args, **kwargs):
if self.view_is_class_method:
request = args[1]
else:
request = args[0]
if request.method in ("PUT", "DELETE"):
missing_required_headers = []
if self.etag_func and not request.META.get("HTTP_IF_MATCH"):
missing_required_headers.append("If-Match")
if self.last_modified_func and not request.META.get(
"HTTP_IF_UNMODIFIED_SINCE"
):
missing_required_headers.append("If-Unmodified-Since")
if missing_required_headers:
# TODO Define standard error json
response = Response(
{"missing-required-headers": missing_required_headers}, status=400,
)
return response
# Compute values (if any) for the requested resource.
def get_last_modified():
if self.last_modified_func:
last_modified = self.last_modified_func(*args, **kwargs)
if last_modified:
return http_date(
last_modified.replace(tzinfo=timezone.utc).timestamp()
)
return None
etag = None
last_modified = None
if request.method not in ("POST", "OPTIONS"):
if self.etag_func:
etag = self.etag_func(*args, **kwargs)
etag = etag + __version__ if etag else None
etag = quote_etag(etag) if etag else None
last_modified = get_last_modified()
else:
etag = None
last_modified = None
response = get_conditional_response(
request, etag=etag, last_modified=last_modified
)
if response is None:
response = self.view_method(*args, **kwargs)
schema_link = self.schema_link_header(*args, **kwargs) or ""
context_link = self.context_link_header(*args, **kwargs) or ""
join_links = ", " if schema_link and context_link else ""
link_content = schema_link + join_links + context_link
if etag:
response["ETag"] = etag
if last_modified:
response["Last-Modified"] = last_modified
if link_content:
response["Link"] = link_content
self.add_expose_headers(response)
return response
def add_expose_headers(self, response):
"""If the Link and/or Location header are provided on the response add the
'Access-Control-Expose-Headers` header to expose them over CORS requests.
"""
expose_headers = ""
if "Link" in response:
expose_headers = "Link"
if "Location" in response:
if expose_headers:
expose_headers = expose_headers + ", "
expose_headers = expose_headers + "Location"
if expose_headers:
response["Access-Control-Expose-Headers"] = expose_headers
def schema_link(self, *args, **kwargs):
return get_string_or_evaluate(self.schema_link_or_func, *args, **kwargs)
def schema_link_header(self, *args, **kwargs):
link = self.schema_link(*args, **kwargs)
if link:
link_rel = (
get_string_or_evaluate(self.schema_rel_or_func, *args, **kwargs,)
or "describedBy"
)
link_type = (
get_string_or_evaluate(self.schema_type_or_func, *args, **kwargs,)
or "application/json"
)
return link_content(link, link_rel, link_type)
return None
def context_link(self, *args, **kwargs):
return get_string_or_evaluate(self.context_link_or_func, *args, **kwargs)
def context_link_header(self, *args, **kwargs):
link = self.context_link(*args, **kwargs)
if link:
link_rel = (
get_string_or_evaluate(self.context_rel_or_func, *args, **kwargs,)
or "http://www.w3.org/ns/json-ld#context"
)
link_type = (
get_string_or_evaluate(self.context_type_or_func, *args, **kwargs,)
or "application/ld+json"
)
return link_content(link, link_rel, link_type)
return None
def scrudful(
etag_func=None,
last_modified_func=None,
schema_link_or_func=None,
schema_rel_or_func=None,
schema_type_or_func=None,
context_link_or_func=None,
context_rel_or_func=None,
context_type_or_func=None,
http_schema_or_func=None,
):
"""Decorator to make a view method SCRUDful"""
# TODO what about 400 Bad Request context and schema?
def decorator(view_method):
return ScrudfulViewFunc(
view_method,
etag_func=etag_func,
last_modified_func=last_modified_func,
schema_link_or_func=schema_link_or_func,
schema_rel_or_func=schema_rel_or_func,
schema_type_or_func=schema_type_or_func,
context_link_or_func=context_link_or_func,
context_rel_or_func=schema_rel_or_func,
context_type_or_func=schema_type_or_func,
http_schema_or_func=http_schema_or_func,
)
return decorator
def scrudful_api_view(
etag_func=None,
last_modified_func=None,
schema_link_or_func=None,
schema_rel_or_func=None,
schema_type_or_func=None,
context_link_or_func=None,
context_rel_or_func=None,
context_type_or_func=None,
http_schema_or_func=['GET'],
):
def decorator(view_method, *args, **kwargs):
http_method_names = http_schema_or_func
allowed_methods = set(http_method_names) | {'options'}
cls_attr = {
'__doc__': view_method.__doc__,
'metadata_class': ScrudfulAPIViewMetadata,
}
handler = ScrudfulViewFunc(
lambda self, *args, **kwargs: view_method(*args, **kwargs),
etag_func=etag_func,
last_modified_func=last_modified_func,
schema_link_or_func=schema_link_or_func,
schema_rel_or_func=schema_rel_or_func,
schema_type_or_func=schema_type_or_func,
context_link_or_func=context_link_or_func,
context_rel_or_func=context_rel_or_func,
context_type_or_func=context_type_or_func,
)
for method in http_method_names:
cls_attr[method.lower()] = handler
ScrudAPIView = type('ScrudAPIView', (APIView,), cls_attr)
ScrudAPIView.http_method_names = [method.lower() for method in allowed_methods]
ScrudAPIView.__name__ = view_method.__name__
ScrudAPIView.__module__ = view_method.__module__
ScrudAPIView.permission_classes = getattr(
view_method, 'permission_classes', APIView.permission_classes
)
ScrudAPIView.schema = getattr(view_method, 'schema', APIView.schema)
ScrudAPIView.schema_link_or_func = schema_link_or_func
ScrudAPIView.context_link_or_func = context_link_or_func
# ScrudAPIView.options = options
new_view_method = ScrudAPIView.as_view()
return new_view_method
return decorator
class ScrudfulMetadata(BaseMetadata):
def determine_metadata(self, request, view, *args, **kwargs):
if len(args) > 0 or len(kwargs) > 0: # this is a detail request
return self.determine_metadata_for_detail(request, view)
return self.determine_metadata_for_list(request, view)
def determine_metadata_for_detail(self, request, view):
metadata = dict()
metadata.update(
{
key: value
for key, value in {
"get": self.determine_metadata_for_get(request, view, "retrieve"),
"put": self.determine_metadata_for_put(request, view),
"delete": self.determine_metadata_for_delete(request, view),
}.items()
if value is not None
}
)
return metadata
def determine_metadata_for_list(self, request, view):
metadata = dict()
metadata.update(
{
key: value
for key, value in {
"post": self.determine_metadata_for_post(request, view),
"get": self.determine_metadata_for_get(request, view, "list"),
}.items()
if value is not None
}
)
return metadata
def get_method(self, view, name):
method_partial = getattr(view, name, None)
if method_partial:
return method_partial.func.__self__
return None
def determine_metadata_for_post(self, request, view, name="create"):
create_method = self.get_method(view, name)
if create_method is None:
return None
schema_link = create_method.schema_link(view, request)
context_link = create_method.context_link(view, request)
request_body = {
"description": "The content for the resource to be created.",
"required": True,
}
if schema_link or context_link:
json_content = {}
if schema_link:
json_content["schema"] = schema_link
if context_link:
json_content["context"] = context_link
request_body["content"] = {
"application/json": json_content,
}
metadata = {
"requestBody": request_body,
"responses": {"201": {"description": "CREATED"}},
}
return metadata
def determine_metadata_for_get(self, request, view, name):
list_method = self.get_method(view, name)
if list_method is None:
return
schema_link = list_method.schema_link(view, request)
context_link = list_method.context_link(view, request)
json_content = None
if schema_link or context_link:
json_content = {}
if schema_link:
json_content["schema"] = schema_link
if context_link:
json_content["context"] = context_link
responses = {
"200": {"description": "OK"},
}
if json_content:
responses["200"]["content"] = {
"application/json": json_content,
}
return {
"responses": responses,
}
def required_conditional_headers(self, method):
supports_etag = method.etag_func is not None
supports_last_modified = method.last_modified_func is not None
parameters = None
if supports_etag or supports_last_modified:
parameters = []
if supports_etag:
parameters.append(
{
"in": "header",
"name": "If-Match",
"schema": {"type": "string"},
"required": True,
}
)
if supports_last_modified:
parameters.append(
{
"in": "header",
"name": "If-Unmodified-Since",
"schema": {"type": "string"},
"required": True,
}
)
return parameters
def determine_metadata_for_put(self, request, view, name="update"):
update_method = self.get_method(view, name)
if update_method is None:
return
schema_link = update_method.schema_link(view, request)
context_link = update_method.context_link(view, request)
request_body = {
"description": "The content for the resource to be created.",
"required": True,
}
if schema_link or context_link:
json_content = {}
if schema_link:
json_content["schema"] = schema_link
if context_link:
json_content["context"] = context_link
request_body["content"] = {
"application/json": json_content,
}
metadata = {
"requestBody": request_body,
"responses": {"200": {"description": "OK"}},
}
parameters = self.required_conditional_headers(update_method)
if parameters:
metadata["parameters"] = parameters
return metadata
def determine_metadata_for_delete(self, request, view, name="destroy"):
delete_method = self.get_method(view, name)
if delete_method is None:
return None
metadata = {
"responses": {"200": {"description": "OK"}},
}
parameters = self.required_conditional_headers(delete_method)
if parameters:
metadata["parameters"] = parameters
return metadata
class ScrudfulAPIViewMetadata(ScrudfulMetadata):
def determine_metadata(self, request, view, *args, **kwargs):
metadata = dict()
metadata.update(
{
key: value
for key, value in {
"get": self.determine_metadata_for_get(request, view, "get"),
"post": self.determine_metadata_for_post(request, view, "post"),
"put": self.determine_metadata_for_put(request, view, "put"),
"delete": self.determine_metadata_for_delete(
request, view, "delete"
),
}.items()
if value is not None
}
)
return metadata
def options(view_instance, request, *args, **kwargs):
data = ScrudfulMetadata().determine_metadata(
request, view_instance, *args, **kwargs
)
return Response(data, status=status.HTTP_200_OK)
def scrudful_viewset(cls):
setattr(cls, "options", options)
meta = getattr(cls, "Meta", None)
etag_func = getattr(meta, "etag_func", None)
last_modified_func = getattr(meta, "last_modified_func", None)
schema_link_or_func = getattr(meta, "schema_link_or_func", None)
schema_rel_or_func = getattr(meta, "schema_rel_or_func", None)
schema_type_or_func = getattr(meta, "schema_type_or_func", None)
context_link_or_func = getattr(meta, "context_link_or_func", None)
context_rel_or_func = getattr(meta, "context_rel_or_func", None)
context_type_or_func = getattr(meta, "context_type_or_func", None)
extra_view_methods = getattr(meta, "extra_view_methods", [])
scrudful_item = scrudful(
etag_func=etag_func,
last_modified_func=last_modified_func,
schema_link_or_func=schema_link_or_func,
schema_rel_or_func=schema_rel_or_func,
schema_type_or_func=schema_type_or_func,
context_link_or_func=context_link_or_func,
context_rel_or_func=context_rel_or_func,
context_type_or_func=context_type_or_func,
)
view_methods = ["create", "retrieve", "update", "destroy"]
view_methods.extend(extra_view_methods)
for method_name in view_methods:
method = getattr(cls, method_name, None)
setattr(cls, method_name, scrudful_item(method))
if hasattr(cls, "list"):
scrudful_list = scrudful(
etag_func=getattr(meta, "list_etag_func", None),
last_modified_func=getattr(meta, "list_last_modified_func", None),
schema_link_or_func=getattr(meta, "list_schema_link_or_func", None),
schema_rel_or_func=getattr(meta, "list_schema_rel_or_func", None),
schema_type_or_func=getattr(meta, "list_schema_type_or_func", None),
context_link_or_func=getattr(meta, "list_context_link_or_func", None),
context_rel_or_func=getattr(meta, "list_context_rel_or_func", None),
context_type_or_func=getattr(meta, "list_context_type_or_func", None),
)
list_method = getattr(cls, "list")
setattr(cls, "list", scrudful_list(list_method))
return cls
| [
"django.utils.http.quote_etag",
"django.utils.cache.get_conditional_response",
"rest_framework.response.Response",
"functools.partial",
"scrud_django.utils.get_string_or_evaluate",
"scrud_django.utils.link_content",
"functools.update_wrapper"
] | [((15482, 15523), 'rest_framework.response.Response', 'Response', (['data'], {'status': 'status.HTTP_200_OK'}), '(data, status=status.HTTP_200_OK)\n', (15490, 15523), False, 'from rest_framework.response import Response\n'), ((1456, 1494), 'functools.update_wrapper', 'update_wrapper', (['self', 'self.view_method'], {}), '(self, self.view_method)\n', (1470, 1494), False, 'from functools import partial, update_wrapper\n'), ((1548, 1575), 'functools.partial', 'partial', (['self.__call__', 'obj'], {}), '(self.__call__, obj)\n', (1555, 1575), False, 'from functools import partial, update_wrapper\n'), ((3261, 3334), 'django.utils.cache.get_conditional_response', 'get_conditional_response', (['request'], {'etag': 'etag', 'last_modified': 'last_modified'}), '(request, etag=etag, last_modified=last_modified)\n', (3285, 3334), False, 'from django.utils.cache import get_conditional_response\n'), ((4677, 4742), 'scrud_django.utils.get_string_or_evaluate', 'get_string_or_evaluate', (['self.schema_link_or_func', '*args'], {}), '(self.schema_link_or_func, *args, **kwargs)\n', (4699, 4742), False, 'from scrud_django.utils import get_string_or_evaluate, link_content\n'), ((5316, 5382), 'scrud_django.utils.get_string_or_evaluate', 'get_string_or_evaluate', (['self.context_link_or_func', '*args'], {}), '(self.context_link_or_func, *args, **kwargs)\n', (5338, 5382), False, 'from scrud_django.utils import get_string_or_evaluate, link_content\n'), ((5195, 5234), 'scrud_django.utils.link_content', 'link_content', (['link', 'link_rel', 'link_type'], {}), '(link, link_rel, link_type)\n', (5207, 5234), False, 'from scrud_django.utils import get_string_or_evaluate, link_content\n'), ((5867, 5906), 'scrud_django.utils.link_content', 'link_content', (['link', 'link_rel', 'link_type'], {}), '(link, link_rel, link_type)\n', (5879, 5906), False, 'from scrud_django.utils import get_string_or_evaluate, link_content\n'), ((2265, 2341), 'rest_framework.response.Response', 'Response', (["{'missing-required-headers': missing_required_headers}"], {'status': '(400)'}), "({'missing-required-headers': missing_required_headers}, status=400)\n", (2273, 2341), False, 'from rest_framework.response import Response\n'), ((3088, 3104), 'django.utils.http.quote_etag', 'quote_etag', (['etag'], {}), '(etag)\n', (3098, 3104), False, 'from django.utils.http import http_date, quote_etag\n'), ((4902, 4966), 'scrud_django.utils.get_string_or_evaluate', 'get_string_or_evaluate', (['self.schema_rel_or_func', '*args'], {}), '(self.schema_rel_or_func, *args, **kwargs)\n', (4924, 4966), False, 'from scrud_django.utils import get_string_or_evaluate, link_content\n'), ((5057, 5122), 'scrud_django.utils.get_string_or_evaluate', 'get_string_or_evaluate', (['self.schema_type_or_func', '*args'], {}), '(self.schema_type_or_func, *args, **kwargs)\n', (5079, 5122), False, 'from scrud_django.utils import get_string_or_evaluate, link_content\n'), ((5544, 5609), 'scrud_django.utils.get_string_or_evaluate', 'get_string_or_evaluate', (['self.context_rel_or_func', '*args'], {}), '(self.context_rel_or_func, *args, **kwargs)\n', (5566, 5609), False, 'from scrud_django.utils import get_string_or_evaluate, link_content\n'), ((5725, 5791), 'scrud_django.utils.get_string_or_evaluate', 'get_string_or_evaluate', (['self.context_type_or_func', '*args'], {}), '(self.context_type_or_func, *args, **kwargs)\n', (5747, 5791), False, 'from scrud_django.utils import get_string_or_evaluate, link_content\n')] |
#coding=utf-8
import os
from bs4 import BeautifulSoup
files = []
gestores = {
'EDY11': {'fiis': [], 'vacancia':0}
}
for r,d,f in os.walk('../gestores_ifix'):
for file in f:
handle = open(os.path.join(r, file), 'r')
html = handle.read()
soup = BeautifulSoup(html, 'html.parser')
handle.close()
gestor = u'%s'%(soup.find('div').text)
print(gestor)
if(not gestor in gestores):
gestores[gestor] = {
'fiis': [],
'vacancia': 0
}
gestores[gestor]['fiis'].append(file.replace('.html', ''))
files.append(file)
print(gestores) | [
"bs4.BeautifulSoup",
"os.path.join",
"os.walk"
] | [((137, 164), 'os.walk', 'os.walk', (['"""../gestores_ifix"""'], {}), "('../gestores_ifix')\n", (144, 164), False, 'import os\n'), ((279, 313), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (292, 313), False, 'from bs4 import BeautifulSoup\n'), ((207, 228), 'os.path.join', 'os.path.join', (['r', 'file'], {}), '(r, file)\n', (219, 228), False, 'import os\n')] |
import statsmodels.api as sm
import statsmodels.formula.api as smf
import numpy as np
import pandas as pd
from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS
from sklearn.linear_model import LinearRegression
import sys; import re
def AIC(data,model,model_type,k=2):
if model_type=='linear':
return len(data)* np.log(model.ssr/len(data)) + k * (model.df_model+1)
elif model_type=='logistic' :
return model.aic
def Cp(data,model,sigma2):
return model.ssr/sigma2 - (len(data) - 2.*model.df_model- 1)
def BIC(data,model,model_type='linear'):
if model_type=='linear':
return np.log(model.ssr/model.centered_tss) * len(data) + (model.df_model+1) * np.log(len(data))
elif model_type=='logistic':
return model.bicllf
def regressor(y,X, model_type):
if model_type =="linear":
regressor = sm.OLS(y, X)
regressor_fitted = regressor.fit()
elif model_type == 'logistic':
regressor = sm.GLM(y, X,family=sm.families.Binomial())
regressor_fitted = regressor.fit()
return regressor_fitted
def criterion_f(X,model,model_type,elimination_criterion):
if elimination_criterion=='aic':
return AIC(X,model,model_type)
elif elimination_criterion=='bic':
return AIC(X,model,model_type,k=np.log(len(X)))
def detect_dummies(X,variable):
'''
If no dummies simply returns the variable to remove (or add)
'''
cols = X.columns.tolist()
dummy_cols = []
if (len(X[variable].value_counts())==2) and (X[variable].min()==0) and (X[variable].max()==1):
cols.remove(variable)
dummy_cols.append(variable)
if re.search('^([a-zA-Z0-9]+)[\[_]',variable):
prefix = (re.search('^([a-zA-Z0-9]+)[\[_]',variable).group(1))
for var in cols:
if prefix in var:
dummy_cols.append(var)
else :
dummy_cols.append(variable)
return dummy_cols
def forwardSelection(X, y, model_type ="linear",elimination_criterion = "aic",verbose=False):
'''
Compute model selection based on elimination_criterion (here only aic and bic)
Forward Selection : from simple model with only intercept to complete model with all variables present in X
X : predictors, pandas dataframe nxp or array nxp
y : output, pandas series (DataFrame with one column), nx1, or 1d array of length n
elimination_criterion : here only aic available
----
returns final model fitted with selected variables
'''
return __forwardSelectionRaw__(X, y, model_type = model_type,elimination_criterion = elimination_criterion,verbose=verbose)
def backwardSelection(X, y, model_type ="linear",elimination_criterion = "aic",verbose=False):
'''
Compute model selection based on elimination_criterion (here only aic and bic)
Backward Selection : from complete with all columns in X to simple model with only intercept
X : predictors, pandas dataframe nxp or array nxp
y : output, pandas series (DataFrame with one column), nx1, or 1d array of length n
elimination_criterion : here only aic available
----
returns final model fitted with selected variables
'''
return __backwardSelectionRaw__(X, y, model_type = model_type,elimination_criterion = elimination_criterion,verbose=verbose )
def bothSelection(X, y, model_type ="linear",elimination_criterion = "aic",start='full',verbose=False):
return __bothSelectionRaw__(X, y, model_type = model_type,elimination_criterion = elimination_criterion,start=start,verbose=verbose)
def __forwardSelectionRaw__(X, y, model_type ="linear",elimination_criterion = "aic",verbose=False):
cols = X.columns.tolist()
## Begin from a simple model with only intercept
selected_cols = ["Intercept"]
other_cols = cols.copy()
other_cols.remove("Intercept")
model = regressor(y, X[selected_cols],model_type)
criterion = criterion_f(X,model,model_type,elimination_criterion)
for i in range(X.shape[1]):
aicvals = pd.DataFrame(columns = ["Cols","aic"])
for j in other_cols:
cols_to_add = detect_dummies(X,j)
model = regressor(y, X[selected_cols+cols_to_add],model_type)
aicvals = aicvals.append(pd.DataFrame([[j, criterion_f(X,model,model_type,elimination_criterion)]],columns = ["Cols","aic"]),ignore_index=True)
aicvals = aicvals.sort_values(by = ["aic"]).reset_index(drop=True)
if verbose :
print(aicvals)
if aicvals.shape[0] > 0:
new_criterion = aicvals["aic"][0]
if new_criterion < criterion:
cols_to_add = detect_dummies(X,aicvals["Cols"][0])
print("Entered :", aicvals["Cols"][0], "\tCriterion :", aicvals["aic"][0])
for i in cols_to_add:
selected_cols.append(i)
other_cols.remove(i)
criterion = new_criterion
else:
print("break : criterion")
break
model = regressor(y,X[selected_cols],model_type)
print(model.summary())
print("Criterion: "+str(criterion_f(X,model,model_type,elimination_criterion)))
print("Final Variables:", selected_cols)
return model
def __backwardSelectionRaw__(X, y, model_type ="linear",elimination_criterion = "aic",verbose=False):
selected_cols = X.columns.tolist()
selected_cols.remove('Intercept')
model = regressor(y,X,model_type)
criterion = criterion_f(X,model,model_type,elimination_criterion)
for i in range(X.shape[1]):
aicvals = pd.DataFrame(columns = ["Cols","aic"])
if len(selected_cols)==0:
print("break : Only Intercept left")
break
else :
for j in selected_cols:
temp_cols = selected_cols.copy()
### Detect dummies and remove several columns if necessary
cols_to_remove = detect_dummies(X,j)
for i in cols_to_remove:
temp_cols.remove(i)
model = regressor(y, X[['Intercept']+temp_cols],model_type)
aicvals = aicvals.append(pd.DataFrame([[j, criterion_f(X,model,model_type,elimination_criterion)]],columns = ["Cols","aic"]),ignore_index=True)
aicvals = aicvals.sort_values(by = ["aic"]).reset_index(drop=True)
if verbose :
print(aicvals)
new_criterion = aicvals["aic"][0]
if new_criterion < criterion:
print("Eliminated :" ,aicvals["Cols"][0],"\tCriterion :", aicvals["aic"][0])
cols_removed = detect_dummies(X,aicvals["Cols"][0])
for i in cols_removed:
selected_cols.remove(i)
criterion = new_criterion
else:
print("break : criterion")
break
model = regressor(y,X[['Intercept']+selected_cols],model_type)
print(str(model.summary())+"\nCriterion: "+ str(criterion_f(X,model,model_type,elimination_criterion)))
print("Final Variables:", selected_cols)
return model
def __bothSelectionRaw__(X, y, model_type ="linear",elimination_criterion = "aic",start='full',verbose=False):
'''
Compute model selection based on elimination_criterion (here only aic and bic)
Both direction Selection : from complete (full) with all columns in X to simple model with only intercept, but try to add or delete one variable at each step
X : predictors, pandas dataframe nxp or array nxp
y : output, pandas series (DataFrame with one column), nx1, or 1d array of length n
elimination_criterion : here only aic available
----
returns final model fitted with selected variables
'''
cols = X.columns.tolist()
if start=='full':
removed_cols = []
selected_cols = cols.copy()
selected_cols.remove("Intercept")
else :
selected_cols = []
removed_cols = cols.copy()
removed_cols.remove("Intercept")
model = regressor(y,X[['Intercept']+selected_cols],model_type)
criterion = criterion_f(X,model,model_type,elimination_criterion)
while True :
aicvals = pd.DataFrame(columns = ["Cols","aic",'way'])
###### Try to remove variables still present in the model
if len(selected_cols)==0:
continue
else :
for j in selected_cols:
temp_cols = selected_cols.copy()
### Detect dummies and remove several columns if necessary
cols_to_remove = detect_dummies(X,j)
for i in cols_to_remove:
temp_cols.remove(i)
model = regressor(y, X[['Intercept']+temp_cols],model_type)
aicvals = aicvals.append(pd.DataFrame([[j, criterion_f(X,model,model_type,elimination_criterion),'delete']],columns = ["Cols","aic",'way']),ignore_index=True)
###### Try to add previously removed variables
for j in removed_cols:
cols_to_add = detect_dummies(X,j)
model = regressor(y, X[['Intercept']+selected_cols+cols_to_add],model_type)
aicvals = aicvals.append(pd.DataFrame([[j, criterion_f(X,model,model_type,elimination_criterion),'add']],columns = ["Cols","aic",'way']),ignore_index=True)
aicvals = aicvals.sort_values(by = ["aic"]).reset_index(drop=True)
if verbose :
print(aicvals)
if aicvals.shape[0] > 0:
new_criterion = aicvals["aic"][0]
if new_criterion < criterion:
cols_concerned = detect_dummies(X,aicvals["Cols"][0])
if aicvals["way"][0]=='delete':
print("Eliminated :" ,aicvals["Cols"][0],"\tCriterion :", aicvals["aic"][0])
criterion = new_criterion
for i in cols_concerned:
selected_cols.remove(i)
removed_cols.append(i)
# removed_cols.append(aicvals["Cols"][0])
# selected_cols.remove(aicvals["Cols"][0])
elif aicvals["way"][0]=='add':
print("Entered :", aicvals["Cols"][0], "\tCriterion :", aicvals["aic"][0])
for i in cols_concerned:
selected_cols.append(i)
removed_cols.remove(i)
# selected_cols.append(aicvals["Cols"][0])
# removed_cols.remove(aicvals["Cols"][0])
criterion = new_criterion
else:
print("break : criterion")
break
model = regressor(y,X[['Intercept']+selected_cols],model_type)
print(str(model.summary())+"\nCriterion: "+ str(criterion_f(X,model,model_type,elimination_criterion)))
print("Final Variables:", selected_cols)
return model
def exhaustivesearch_selectionmodel(X,y,vmin=1,vmax=10):
'''
Function to compute exhaustive search for LINEAR regression y ~X : test all models with p features from X with p between vmin and vmax.
For each size p : select the best model based on MSE.
Then compute R2,adj R2, Cp and BIC on selected models.
X : Dataframe of explanatory variables, WITHOUT intercept column, nxp
y : Dataframe of output variable
---------
Returns these different criterion in a DataFrame.
'''
if ('const' in X.columns.tolist()) or ('Intercept' in X.columns.tolist()):
raise SystemExit('Delete Intercept column in X before to pass it to this function')
# sys.exit('Delete Intercept column in X before to pass it to this function')
### First, exhaustive search with LienarRegression() from sklearn and EFS() from mlxtend
### Returns a dictionnary with all estimated models for each model dimension
lm = LinearRegression(fit_intercept=True)
efs1 = EFS(lm,min_features=1,max_features=vmax,scoring='neg_mean_squared_error',print_progress=True,cv=False)
efs1 = efs1.fit(X, y)
#### Find for each model size the best model in terms of (neg) MSE
best_idxs_all = []
for k in range(1,vmax+1):
best_score = -np.infty
best_idx = 0
for i in efs1.subsets_:
if (len(efs1.subsets_[i]['feature_idx'])) == k:
if efs1.subsets_[i]['avg_score'] > best_score:
best_score = efs1.subsets_[i]['avg_score']
best_idx = i
best_idxs_all.append(best_idx)
df_subsets = pd.DataFrame(index=best_idxs_all,columns=['Variables','R2','R2_adj','Cp','BIC','Number of variables (except intercept)'])
X_copy = X.copy()
X_copy = sm.add_constant(X_copy)
full_model = sm.OLS(y,X_copy).fit()
sigma2 = (full_model.ssr)/(len(X_copy)-full_model.df_model-1)
for index in best_idxs_all:
df_subsets['Variables'] = df_subsets['Variables'].astype(object)
variables = (efs1.subsets_[index]['feature_names'])
variables = np.array(variables).tolist()
df_subsets.loc[index,'Number of variables (except intercept)'] = len(variables)
model = sm.OLS(y,X_copy[['const']+variables]).fit()
df_subsets.loc[index,'R2'] = model.rsquared
df_subsets.loc[index,'R2_adj'] = model.rsquared_adj
df_subsets.loc[index,'BIC'] = BIC(X_copy,model)
df_subsets.loc[index,'Cp'] = Cp(X_copy,model,sigma2)
df_subsets.loc[index,'Variables'] = variables
return df_subsets
| [
"mlxtend.feature_selection.ExhaustiveFeatureSelector",
"numpy.log",
"statsmodels.api.families.Binomial",
"numpy.array",
"statsmodels.api.add_constant",
"pandas.DataFrame",
"statsmodels.api.OLS",
"sklearn.linear_model.LinearRegression",
"re.search"
] | [((10412, 10448), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(True)'}), '(fit_intercept=True)\n', (10428, 10448), False, 'from sklearn.linear_model import LinearRegression\n'), ((10457, 10568), 'mlxtend.feature_selection.ExhaustiveFeatureSelector', 'EFS', (['lm'], {'min_features': '(1)', 'max_features': 'vmax', 'scoring': '"""neg_mean_squared_error"""', 'print_progress': '(True)', 'cv': '(False)'}), "(lm, min_features=1, max_features=vmax, scoring='neg_mean_squared_error',\n print_progress=True, cv=False)\n", (10460, 10568), True, 'from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS\n'), ((10984, 11115), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'best_idxs_all', 'columns': "['Variables', 'R2', 'R2_adj', 'Cp', 'BIC',\n 'Number of variables (except intercept)']"}), "(index=best_idxs_all, columns=['Variables', 'R2', 'R2_adj',\n 'Cp', 'BIC', 'Number of variables (except intercept)'])\n", (10996, 11115), True, 'import pandas as pd\n'), ((11135, 11158), 'statsmodels.api.add_constant', 'sm.add_constant', (['X_copy'], {}), '(X_copy)\n', (11150, 11158), True, 'import statsmodels.api as sm\n'), ((819, 831), 'statsmodels.api.OLS', 'sm.OLS', (['y', 'X'], {}), '(y, X)\n', (825, 831), True, 'import statsmodels.api as sm\n'), ((1544, 1588), 're.search', 're.search', (['"""^([a-zA-Z0-9]+)[\\\\[_]"""', 'variable'], {}), "('^([a-zA-Z0-9]+)[\\\\[_]', variable)\n", (1553, 1588), False, 'import re\n'), ((3767, 3804), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Cols', 'aic']"}), "(columns=['Cols', 'aic'])\n", (3779, 3804), True, 'import pandas as pd\n'), ((5110, 5147), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Cols', 'aic']"}), "(columns=['Cols', 'aic'])\n", (5122, 5147), True, 'import pandas as pd\n'), ((7344, 7388), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Cols', 'aic', 'way']"}), "(columns=['Cols', 'aic', 'way'])\n", (7356, 7388), True, 'import pandas as pd\n'), ((11173, 11190), 'statsmodels.api.OLS', 'sm.OLS', (['y', 'X_copy'], {}), '(y, X_copy)\n', (11179, 11190), True, 'import statsmodels.api as sm\n'), ((603, 641), 'numpy.log', 'np.log', (['(model.ssr / model.centered_tss)'], {}), '(model.ssr / model.centered_tss)\n', (609, 641), True, 'import numpy as np\n'), ((11426, 11445), 'numpy.array', 'np.array', (['variables'], {}), '(variables)\n', (11434, 11445), True, 'import numpy as np\n'), ((11547, 11587), 'statsmodels.api.OLS', 'sm.OLS', (['y', "X_copy[['const'] + variables]"], {}), "(y, X_copy[['const'] + variables])\n", (11553, 11587), True, 'import statsmodels.api as sm\n'), ((935, 957), 'statsmodels.api.families.Binomial', 'sm.families.Binomial', ([], {}), '()\n', (955, 957), True, 'import statsmodels.api as sm\n'), ((1601, 1645), 're.search', 're.search', (['"""^([a-zA-Z0-9]+)[\\\\[_]"""', 'variable'], {}), "('^([a-zA-Z0-9]+)[\\\\[_]', variable)\n", (1610, 1645), False, 'import re\n')] |
import os
import tempfile
from django.core.files.storage import FileSystemStorage
import django.core.files.storage
# dummy django.conf.settings
class Settings():
MEDIA_ROOT = os.path.dirname(os.path.abspath(__file__))
MEDIA_URL = 'http://local/'
FILE_UPLOAD_PERMISSIONS = 0o777
FILE_UPLOAD_DIRECTORY_PERMISSIONS = 0o777
USE_TZ = False
# switch settings
django.core.files.storage.settings = Settings()
def get_test_storage():
temp_dir = tempfile.mkdtemp()
storage = FileSystemStorage(location=temp_dir, base_url='/')
return temp_dir, storage
def get_storage(folder):
storage = FileSystemStorage(location=folder, base_url='/')
return storage
| [
"django.core.files.storage.FileSystemStorage",
"tempfile.mkdtemp",
"os.path.abspath"
] | [((465, 483), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (481, 483), False, 'import tempfile\n'), ((498, 548), 'django.core.files.storage.FileSystemStorage', 'FileSystemStorage', ([], {'location': 'temp_dir', 'base_url': '"""/"""'}), "(location=temp_dir, base_url='/')\n", (515, 548), False, 'from django.core.files.storage import FileSystemStorage\n'), ((619, 667), 'django.core.files.storage.FileSystemStorage', 'FileSystemStorage', ([], {'location': 'folder', 'base_url': '"""/"""'}), "(location=folder, base_url='/')\n", (636, 667), False, 'from django.core.files.storage import FileSystemStorage\n'), ((197, 222), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (212, 222), False, 'import os\n')] |
# data/models.py
import datetime
from sqlalchemy.ext.hybrid import hybrid_property
from app.extensions import db
class CallTableModel(db.Model):
__tablename__ = 'c_call'
__repr_attrs__ = ['call_id', 'calling_party_number', 'dialed_party_number',
'start_time', 'end_time', 'caller_id']
call_id = db.Column(db.Integer, primary_key=True)
call_direction = db.Column(db.Integer)
calling_party_number = db.Column(db.String)
dialed_party_number = db.Column(db.String)
account_code = db.Column(db.String)
start_time = db.Column(db.DateTime)
end_time = db.Column(db.DateTime)
system_id = db.Column(db.Integer)
caller_id = db.Column(db.String)
inbound_route = db.Column(db.String)
events = db.relationship("EventTableModel", lazy="dynamic")
@hybrid_property
def length(self):
delta = self.end_time - self.start_time
return delta - datetime.timedelta(microseconds=delta.microseconds)
@classmethod
def set_empty(cls, model):
model.data = {}
return model
| [
"datetime.timedelta",
"app.extensions.db.relationship",
"app.extensions.db.Column"
] | [((333, 372), 'app.extensions.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (342, 372), False, 'from app.extensions import db\n'), ((394, 415), 'app.extensions.db.Column', 'db.Column', (['db.Integer'], {}), '(db.Integer)\n', (403, 415), False, 'from app.extensions import db\n'), ((443, 463), 'app.extensions.db.Column', 'db.Column', (['db.String'], {}), '(db.String)\n', (452, 463), False, 'from app.extensions import db\n'), ((490, 510), 'app.extensions.db.Column', 'db.Column', (['db.String'], {}), '(db.String)\n', (499, 510), False, 'from app.extensions import db\n'), ((530, 550), 'app.extensions.db.Column', 'db.Column', (['db.String'], {}), '(db.String)\n', (539, 550), False, 'from app.extensions import db\n'), ((568, 590), 'app.extensions.db.Column', 'db.Column', (['db.DateTime'], {}), '(db.DateTime)\n', (577, 590), False, 'from app.extensions import db\n'), ((606, 628), 'app.extensions.db.Column', 'db.Column', (['db.DateTime'], {}), '(db.DateTime)\n', (615, 628), False, 'from app.extensions import db\n'), ((645, 666), 'app.extensions.db.Column', 'db.Column', (['db.Integer'], {}), '(db.Integer)\n', (654, 666), False, 'from app.extensions import db\n'), ((683, 703), 'app.extensions.db.Column', 'db.Column', (['db.String'], {}), '(db.String)\n', (692, 703), False, 'from app.extensions import db\n'), ((724, 744), 'app.extensions.db.Column', 'db.Column', (['db.String'], {}), '(db.String)\n', (733, 744), False, 'from app.extensions import db\n'), ((758, 808), 'app.extensions.db.relationship', 'db.relationship', (['"""EventTableModel"""'], {'lazy': '"""dynamic"""'}), "('EventTableModel', lazy='dynamic')\n", (773, 808), False, 'from app.extensions import db\n'), ((924, 975), 'datetime.timedelta', 'datetime.timedelta', ([], {'microseconds': 'delta.microseconds'}), '(microseconds=delta.microseconds)\n', (942, 975), False, 'import datetime\n')] |
from django.db import models
# Create your models here.
class user(models.Model):
name = models.CharField(max_length=25)
phone = models.CharField(max_length=25,default='+92')
email = models.EmailField()
city = models.CharField(max_length=20)
content = models.TextField()
def __str__(self):
return self.name
| [
"django.db.models.EmailField",
"django.db.models.TextField",
"django.db.models.CharField"
] | [((95, 126), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(25)'}), '(max_length=25)\n', (111, 126), False, 'from django.db import models\n'), ((139, 185), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(25)', 'default': '"""+92"""'}), "(max_length=25, default='+92')\n", (155, 185), False, 'from django.db import models\n'), ((197, 216), 'django.db.models.EmailField', 'models.EmailField', ([], {}), '()\n', (214, 216), False, 'from django.db import models\n'), ((228, 259), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (244, 259), False, 'from django.db import models\n'), ((274, 292), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (290, 292), False, 'from django.db import models\n')] |
from collections import OrderedDict
import numpy as np
from pandas import DataFrame
from sv import SVData, CorTiming
def loadSVDataFromBUGSDataset(filepath, logreturnforward, logreturnscale, dtfilepath=None):
dts = None
if dtfilepath is not None:
with open(dtfilepath) as f:
content = f.readlines()
dts = np.array([float(x) for x in content[1:-1]])
with open(filepath) as f:
content = f.readlines()
logreturns = np.array([float(x) for x in content[1:-1]])
times = range(len(logreturns))
if dts is not None:
svdf = DataFrame(OrderedDict((('logreturn', logreturns), ('dt', dts))), index=times)
else:
svdf = DataFrame(OrderedDict((('logreturn', logreturns),)), index=times)
return SVData(
sourcekind='loader',
source=loadSVDataFromBUGSDataset,
svdf=svdf,
params=None,
cortiming=CorTiming.unknown,
logreturnforward=logreturnforward,
logreturnscale=logreturnscale)
| [
"collections.OrderedDict",
"sv.SVData"
] | [((789, 978), 'sv.SVData', 'SVData', ([], {'sourcekind': '"""loader"""', 'source': 'loadSVDataFromBUGSDataset', 'svdf': 'svdf', 'params': 'None', 'cortiming': 'CorTiming.unknown', 'logreturnforward': 'logreturnforward', 'logreturnscale': 'logreturnscale'}), "(sourcekind='loader', source=loadSVDataFromBUGSDataset, svdf=svdf,\n params=None, cortiming=CorTiming.unknown, logreturnforward=\n logreturnforward, logreturnscale=logreturnscale)\n", (795, 978), False, 'from sv import SVData, CorTiming\n'), ((614, 667), 'collections.OrderedDict', 'OrderedDict', (["(('logreturn', logreturns), ('dt', dts))"], {}), "((('logreturn', logreturns), ('dt', dts)))\n", (625, 667), False, 'from collections import OrderedDict\n'), ((717, 758), 'collections.OrderedDict', 'OrderedDict', (["(('logreturn', logreturns),)"], {}), "((('logreturn', logreturns),))\n", (728, 758), False, 'from collections import OrderedDict\n')] |
from utils import get_matcher
from lookup import ROMAN_TO_INT
from lines import Dialogue, Character, Instruction, Act, Scene
def get_speaking_characters(raw_play_lines, character_matcher):
""" Return a set of all character names
Parameters
----------
raw_play_lines : list of str
lines of the play.
character_matcher : compiled regex expression
used to extract character names from raw_play_lines, regex must include
group called 'name'a.
"""
return { matched_line.group('name').upper() for matched_line in
( character_matcher.search(line) for line in raw_play_lines )
if matched_line }
def parse_raw_text(raw_play_lines, speaking_characters, matcher):
""" Parse the lines of the play which is in HTML
Each line is either ignored or putting into a class derived from a
namedtuple.
Parameters
----------
raw_play_lines : list of str
lines of the play
speaking_characters : set of str
names of characters who speak
matcher : namedtuple
matcher must contain the following the following compiled regex
matchers, with the following groups.
MATCHER : GROUP NAMES
---------------------
dialogue : 'act' , 'scene', 'dialogue' ; opt : 'instruction'
character : 'name'
stage_direction : 'stage_direction'
instruction : no name, uses index 0
act : 'act'
scene : 'scene'
Notes
-----
character_chain
A list of the characters who speak in turn, all capitalized
Example
-------
>>> PLAY_NAME
alls_well_that_ends_well
>>> character_chain
['COUNTESS', 'BERTRAM', 'LAFEU', 'COUNTESS', 'BERTRAM', ...]
"""
known_characters_matcher = get_matcher(speaking_characters, "character")
parsed_lines = []
character_chain = []
for i, line in enumerate(raw_play_lines):
d_match = matcher.dialogue.search(line)
# d has 3-4 groups : act, scene, dialogue, optional instruction
if d_match:
try:
instruction = d_match.group('instruction')
except IndexError:
instruction = None
dialogue = Dialogue(
d_match.group('dialogue'),
process_instructions(
d_match.group('instruction'),
known_characters_matcher,
matcher.instruction,
character_chain[-1]),
character_chain[-1],
d_match.group('act'),
d_match.group('scene'))
parsed_lines.append(dialogue)
continue
c_match = matcher.character.search(line)
if c_match:
name = c_match.group('name').upper()
character_chain.append(name)
parsed_lines.append(Character(name))
continue
sd_match = matcher.stage_direction.search(line)
if sd_match:
stage_direction = sd_match.group('stage_direction')
prev_character = character_chain[-1] if character_chain else None
instruction = process_instructions(
stage_direction,
known_characters_matcher,
matcher.instruction,
prev_character)
parsed_lines.append(instruction)
continue
act_match = matcher.act.search(line)
if act_match:
act_roman = act_match.group('act')
act = ROMAN_TO_INT[act_roman]
parsed_lines.append(Act(act))
prev_character = None
continue
scene_match = matcher.scene.search(line)
if scene_match:
scene_roman = scene_match.group('scene')
scene = ROMAN_TO_INT[scene_roman]
parsed_lines.append(Scene(scene))
prev_character = None
continue
return parsed_lines
def process_instructions(instruction, known_characters_matcher,
instruction_matcher, default_character):
"""
For each sentence only one action (the first) is matched, but a single
instruction can contain multiple sentences, which is why action are
returned as a list. Each action can be applied to multiple characters. Note
that all character names are shifted to uppercase
"""
if instruction is None:
return None
instruction_lines = instruction.split(".")
actions = [ match.group(0) if match else None for match in
( instruction_matcher.search(line)
for line in instruction_lines ) ]
characters = [
[ character.upper()
for character in known_characters]
for known_characters in
( known_characters_matcher.findall(line)
for line in instruction_lines) ]
return Instruction(instruction, actions, characters, default_character)
def preprocess(raw_play_lines, matcher):
speaking_characters = get_speaking_characters(raw_play_lines,
matcher.character)
play_lines = parse_raw_text(raw_play_lines, speaking_characters, matcher)
return speaking_characters, play_lines
| [
"utils.get_matcher",
"lines.Instruction",
"lines.Scene",
"lines.Character",
"lines.Act"
] | [((1797, 1842), 'utils.get_matcher', 'get_matcher', (['speaking_characters', '"""character"""'], {}), "(speaking_characters, 'character')\n", (1808, 1842), False, 'from utils import get_matcher\n'), ((4922, 4986), 'lines.Instruction', 'Instruction', (['instruction', 'actions', 'characters', 'default_character'], {}), '(instruction, actions, characters, default_character)\n', (4933, 4986), False, 'from lines import Dialogue, Character, Instruction, Act, Scene\n'), ((2918, 2933), 'lines.Character', 'Character', (['name'], {}), '(name)\n', (2927, 2933), False, 'from lines import Dialogue, Character, Instruction, Act, Scene\n'), ((3637, 3645), 'lines.Act', 'Act', (['act'], {}), '(act)\n', (3640, 3645), False, 'from lines import Dialogue, Character, Instruction, Act, Scene\n'), ((3906, 3918), 'lines.Scene', 'Scene', (['scene'], {}), '(scene)\n', (3911, 3918), False, 'from lines import Dialogue, Character, Instruction, Act, Scene\n')] |
from __future__ import print_function
import struct
import copy
#this class handles different protocol versions
class RobotStateRT(object):
@staticmethod
def unpack(buf):
rs = RobotStateRT()
(plen, ptype) = struct.unpack_from("!IB", buf)
if plen == 756:
return RobotStateRT_V15.unpack(buf)
elif plen == 812:
return RobotStateRT_V18.unpack(buf)
elif plen == 1044:
return RobotStateRT_V30.unpack(buf)
else:
print("RobotStateRT has wrong length: " + str(plen))
return rs
#this parses RobotStateRT for versions = v1.5
#http://wiki03.lynero.net/Technical/RealTimeClientInterface?foswiki_redirect_cache=9b4574b30760f720c6f79c5f1f2203dd
class RobotStateRT_V15(object):
__slots__ = ['time',
'q_target', 'qd_target', 'qdd_target', 'i_target', 'm_target',
'q_actual', 'qd_actual', 'i_actual', 'tool_acc_values',
'unused',
'tcp_force', 'tool_vector', 'tcp_speed',
'digital_input_bits', 'motor_temperatures', 'controller_timer',
'test_value']
@staticmethod
def unpack(buf):
offset = 0
message_size = struct.unpack_from("!i", buf, offset)[0]
offset+=4
if message_size != len(buf):
print(("MessageSize: ", message_size, "; BufferSize: ", len(buf)))
raise Exception("Could not unpack RobotStateRT packet: length field is incorrect")
rs = RobotStateRT_V15()
#time: 1x double (1x 8byte)
rs.time = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#q_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_target = copy.deepcopy(all_values)
#qd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_target = copy.deepcopy(all_values)
#qdd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qdd_target = copy.deepcopy(all_values)
#i_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_target = copy.deepcopy(all_values)
#m_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.m_target = copy.deepcopy(all_values)
#q_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_actual = copy.deepcopy(all_values)
#qd_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_actual = copy.deepcopy(all_values)
#i_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_actual = copy.deepcopy(all_values)
###
#tool_acc_values: 3x double (3x 8byte)
all_values = list(struct.unpack_from("!ddd",buf, offset))
offset+=3*8
rs.tool_acc_values = copy.deepcopy(all_values)
#unused: 15x double (15x 8byte)
offset+=120
rs.unused = []
#tcp_force: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_force = copy.deepcopy(all_values)
#tool_vector: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tool_vector = copy.deepcopy(all_values)
#tcp_speed: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_speed = copy.deepcopy(all_values)
#digital_input_bits: 1x double (1x 8byte) ?
rs.digital_input_bits = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#motor_temperatures: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.motor_temperatures = copy.deepcopy(all_values)
#controller_timer: 1x double (1x 8byte)
rs.controller_timer = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#test_value: 1x double (1x 8byte)
rs.test_value = struct.unpack_from("!d",buf, offset)[0]
offset+=8
return rs
#this parses RobotStateRT for versions <= v1.8 (i.e. 1.6, 1.7, 1.8)
class RobotStateRT_V18(object):
__slots__ = ['time',
'q_target', 'qd_target', 'qdd_target', 'i_target', 'm_target',
'q_actual', 'qd_actual', 'i_actual', 'tool_acc_values',
'unused',
'tcp_force', 'tool_vector', 'tcp_speed',
'digital_input_bits', 'motor_temperatures', 'controller_timer',
'test_value',
'robot_mode', 'joint_modes']
@staticmethod
def unpack(buf):
offset = 0
message_size = struct.unpack_from("!i", buf, offset)[0]
offset+=4
if message_size != len(buf):
print(("MessageSize: ", message_size, "; BufferSize: ", len(buf)))
raise Exception("Could not unpack RobotStateRT packet: length field is incorrect")
rs = RobotStateRT_V18()
#time: 1x double (1x 8byte)
rs.time = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#q_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_target = copy.deepcopy(all_values)
#qd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_target = copy.deepcopy(all_values)
#qdd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qdd_target = copy.deepcopy(all_values)
#i_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_target = copy.deepcopy(all_values)
#m_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.m_target = copy.deepcopy(all_values)
#q_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_actual = copy.deepcopy(all_values)
#qd_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_actual = copy.deepcopy(all_values)
#i_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_actual = copy.deepcopy(all_values)
#tool_acc_values: 3x double (3x 8byte)
all_values = list(struct.unpack_from("!ddd",buf, offset))
offset+=3*8
rs.tool_acc_values = copy.deepcopy(all_values)
#unused: 15x double (15x 8byte)
offset+=120
rs.unused = []
#tcp_force: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_force = copy.deepcopy(all_values)
#tool_vector: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tool_vector = copy.deepcopy(all_values)
#tcp_speed: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_speed = copy.deepcopy(all_values)
#digital_input_bits: 1x double (1x 8byte) ?
rs.digital_input_bits = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#motor_temperatures: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.motor_temperatures = copy.deepcopy(all_values)
#controller_timer: 1x double (1x 8byte)
rs.controller_timer = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#test_value: 1x double (1x 8byte)
rs.test_value = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#robot_mode: 1x double (1x 8byte)
rs.robot_mode = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#joint_mode: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.joint_modes = copy.deepcopy(all_values)
return rs
#this parses RobotStateRT for versions >=3.0 (i.e. 3.0)
class RobotStateRT_V30(object):
__slots__ = ['time',
'q_target', 'qd_target', 'qdd_target', 'i_target', 'm_target',
'q_actual', 'qd_actual', 'i_actual', 'i_control',
'tool_vector_actual', 'tcp_speed_actual', 'tcp_force',
'tool_vector_target', 'tcp_speed_target',
'digital_input_bits', 'motor_temperatures', 'controller_timer',
'test_value',
'robot_mode', 'joint_modes', 'safety_mode',
#6xd: unused
'tool_acc_values',
#6xd: unused
'speed_scaling', 'linear_momentum_norm',
#2xd: unused
'v_main', 'v_robot', 'i_robot', 'v_actual']
@staticmethod
def unpack(buf):
offset = 0
message_size = struct.unpack_from("!i", buf, offset)[0]
offset+=4
if message_size != len(buf):
print(("MessageSize: ", message_size, "; BufferSize: ", len(buf)))
raise Exception("Could not unpack RobotStateRT packet: length field is incorrect")
rs = RobotStateRT_V30()
#time: 1x double (1x 8byte)
rs.time = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#q_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_target = copy.deepcopy(all_values)
#qd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_target = copy.deepcopy(all_values)
#qdd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qdd_target = copy.deepcopy(all_values)
#i_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_target = copy.deepcopy(all_values)
#m_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.m_target = copy.deepcopy(all_values)
#q_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_actual = copy.deepcopy(all_values)
#qd_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_actual = copy.deepcopy(all_values)
#i_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_actual = copy.deepcopy(all_values)
#i_control: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_control = copy.deepcopy(all_values)
#tool_vector_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tool_vector_actual = copy.deepcopy(all_values)
#tcp_speed_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_speed_actual = copy.deepcopy(all_values)
#tcp_force: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_force = copy.deepcopy(all_values)
#tool_vector_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tool_vector_target = copy.deepcopy(all_values)
#tcp_speed_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_speed_target = copy.deepcopy(all_values)
#digital_input_bits: 1x double (1x 8byte) ?
rs.digital_input_bits = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#motor_temperatures: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.motor_temperatures = copy.deepcopy(all_values)
#controller_timer: 1x double (1x 8byte)
rs.controller_timer = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#test_value: 1x double (1x 8byte)
rs.test_value = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#robot_mode: 1x double (1x 8byte)
rs.robot_mode = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#joint_modes: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.joint_modes = copy.deepcopy(all_values)
#safety_mode: 1x double (1x 8byte)
rs.safety_mode = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#unused: 6x double (6x 8byte)
offset+=48
#tool_acc_values: 3x double (3x 8byte)
all_values = list(struct.unpack_from("!ddd",buf, offset))
offset+=3*8
rs.tool_acc_values = copy.deepcopy(all_values)
#unused: 6x double (6x 8byte)
offset+=48
#speed_scaling: 1x double (1x 8byte)
rs.speed_scaling = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#linear_momentum_norm: 1x double (1x 8byte)
rs.linear_momentum_norm = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#unused: 2x double (2x 8byte)
offset+=16
#v_main: 1x double (1x 8byte)
rs.v_main = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#v_robot: 1x double (1x 8byte)
rs.v_robot = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#i_robot: 1x double (1x 8byte)
rs.i_robot = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#v_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.v_actual = copy.deepcopy(all_values)
return rs
| [
"copy.deepcopy",
"struct.unpack_from"
] | [((232, 262), 'struct.unpack_from', 'struct.unpack_from', (['"""!IB"""', 'buf'], {}), "('!IB', buf)\n", (250, 262), False, 'import struct\n'), ((1826, 1851), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (1839, 1851), False, 'import copy\n'), ((2014, 2039), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (2027, 2039), False, 'import copy\n'), ((2204, 2229), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (2217, 2229), False, 'import copy\n'), ((2391, 2416), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (2404, 2416), False, 'import copy\n'), ((2577, 2602), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (2590, 2602), False, 'import copy\n'), ((2763, 2788), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (2776, 2788), False, 'import copy\n'), ((2951, 2976), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (2964, 2976), False, 'import copy\n'), ((3137, 3162), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (3150, 3162), False, 'import copy\n'), ((3347, 3372), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (3360, 3372), False, 'import copy\n'), ((3627, 3652), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (3640, 3652), False, 'import copy\n'), ((3819, 3844), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (3832, 3844), False, 'import copy\n'), ((4007, 4032), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (4020, 4032), False, 'import copy\n'), ((4356, 4381), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (4369, 4381), False, 'import copy\n'), ((5868, 5893), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (5881, 5893), False, 'import copy\n'), ((6056, 6081), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (6069, 6081), False, 'import copy\n'), ((6246, 6271), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (6259, 6271), False, 'import copy\n'), ((6433, 6458), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (6446, 6458), False, 'import copy\n'), ((6619, 6644), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (6632, 6644), False, 'import copy\n'), ((6805, 6830), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (6818, 6830), False, 'import copy\n'), ((6993, 7018), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (7006, 7018), False, 'import copy\n'), ((7179, 7204), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (7192, 7204), False, 'import copy\n'), ((7376, 7401), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (7389, 7401), False, 'import copy\n'), ((7656, 7681), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (7669, 7681), False, 'import copy\n'), ((7848, 7873), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (7861, 7873), False, 'import copy\n'), ((8036, 8061), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (8049, 8061), False, 'import copy\n'), ((8385, 8410), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (8398, 8410), False, 'import copy\n'), ((8987, 9012), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (9000, 9012), False, 'import copy\n'), ((10509, 10534), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (10522, 10534), False, 'import copy\n'), ((10697, 10722), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (10710, 10722), False, 'import copy\n'), ((10887, 10912), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (10900, 10912), False, 'import copy\n'), ((11074, 11099), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (11087, 11099), False, 'import copy\n'), ((11260, 11285), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (11273, 11285), False, 'import copy\n'), ((11446, 11471), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (11459, 11471), False, 'import copy\n'), ((11634, 11659), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (11647, 11659), False, 'import copy\n'), ((11820, 11845), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (11833, 11845), False, 'import copy\n'), ((12008, 12033), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (12021, 12033), False, 'import copy\n'), ((12214, 12239), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (12227, 12239), False, 'import copy\n'), ((12408, 12433), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (12421, 12433), False, 'import copy\n'), ((12596, 12621), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (12609, 12621), False, 'import copy\n'), ((12802, 12827), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (12815, 12827), False, 'import copy\n'), ((13004, 13029), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (13017, 13029), False, 'import copy\n'), ((13353, 13378), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (13366, 13378), False, 'import copy\n'), ((13956, 13981), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (13969, 13981), False, 'import copy\n'), ((14354, 14379), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (14367, 14379), False, 'import copy\n'), ((15343, 15368), 'copy.deepcopy', 'copy.deepcopy', (['all_values'], {}), '(all_values)\n', (15356, 15368), False, 'import copy\n'), ((1251, 1288), 'struct.unpack_from', 'struct.unpack_from', (['"""!i"""', 'buf', 'offset'], {}), "('!i', buf, offset)\n", (1269, 1288), False, 'import struct\n'), ((1608, 1645), 'struct.unpack_from', 'struct.unpack_from', (['"""!d"""', 'buf', 'offset'], {}), "('!d', buf, offset)\n", (1626, 1645), False, 'import struct\n'), ((1741, 1783), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (1759, 1783), False, 'import struct\n'), ((1928, 1970), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (1946, 1970), False, 'import struct\n'), ((2117, 2159), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (2135, 2159), False, 'import struct\n'), ((2306, 2348), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (2324, 2348), False, 'import struct\n'), ((2492, 2534), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (2510, 2534), False, 'import struct\n'), ((2678, 2720), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (2696, 2720), False, 'import struct\n'), ((2865, 2907), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (2883, 2907), False, 'import struct\n'), ((3052, 3094), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (3070, 3094), False, 'import struct\n'), ((3258, 3297), 'struct.unpack_from', 'struct.unpack_from', (['"""!ddd"""', 'buf', 'offset'], {}), "('!ddd', buf, offset)\n", (3276, 3297), False, 'import struct\n'), ((3541, 3583), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (3559, 3583), False, 'import struct\n'), ((3731, 3773), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (3749, 3773), False, 'import struct\n'), ((3921, 3963), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (3939, 3963), False, 'import struct\n'), ((4126, 4163), 'struct.unpack_from', 'struct.unpack_from', (['"""!d"""', 'buf', 'offset'], {}), "('!d', buf, offset)\n", (4144, 4163), False, 'import struct\n'), ((4261, 4303), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (4279, 4303), False, 'import struct\n'), ((4469, 4506), 'struct.unpack_from', 'struct.unpack_from', (['"""!d"""', 'buf', 'offset'], {}), "('!d', buf, offset)\n", (4487, 4506), False, 'import struct\n'), ((4602, 4639), 'struct.unpack_from', 'struct.unpack_from', (['"""!d"""', 'buf', 'offset'], {}), "('!d', buf, offset)\n", (4620, 4639), False, 'import struct\n'), ((5293, 5330), 'struct.unpack_from', 'struct.unpack_from', (['"""!i"""', 'buf', 'offset'], {}), "('!i', buf, offset)\n", (5311, 5330), False, 'import struct\n'), ((5650, 5687), 'struct.unpack_from', 'struct.unpack_from', (['"""!d"""', 'buf', 'offset'], {}), "('!d', buf, offset)\n", (5668, 5687), False, 'import struct\n'), ((5783, 5825), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (5801, 5825), False, 'import struct\n'), ((5970, 6012), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (5988, 6012), False, 'import struct\n'), ((6159, 6201), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (6177, 6201), False, 'import struct\n'), ((6348, 6390), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (6366, 6390), False, 'import struct\n'), ((6534, 6576), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (6552, 6576), False, 'import struct\n'), ((6720, 6762), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (6738, 6762), False, 'import struct\n'), ((6907, 6949), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (6925, 6949), False, 'import struct\n'), ((7094, 7136), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (7112, 7136), False, 'import struct\n'), ((7287, 7326), 'struct.unpack_from', 'struct.unpack_from', (['"""!ddd"""', 'buf', 'offset'], {}), "('!ddd', buf, offset)\n", (7305, 7326), False, 'import struct\n'), ((7570, 7612), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (7588, 7612), False, 'import struct\n'), ((7760, 7802), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (7778, 7802), False, 'import struct\n'), ((7950, 7992), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (7968, 7992), False, 'import struct\n'), ((8155, 8192), 'struct.unpack_from', 'struct.unpack_from', (['"""!d"""', 'buf', 'offset'], {}), "('!d', buf, offset)\n", (8173, 8192), False, 'import struct\n'), ((8290, 8332), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (8308, 8332), False, 'import struct\n'), ((8498, 8535), 'struct.unpack_from', 'struct.unpack_from', (['"""!d"""', 'buf', 'offset'], {}), "('!d', buf, offset)\n", (8516, 8535), False, 'import struct\n'), ((8631, 8668), 'struct.unpack_from', 'struct.unpack_from', (['"""!d"""', 'buf', 'offset'], {}), "('!d', buf, offset)\n", (8649, 8668), False, 'import struct\n'), ((8764, 8801), 'struct.unpack_from', 'struct.unpack_from', (['"""!d"""', 'buf', 'offset'], {}), "('!d', buf, offset)\n", (8782, 8801), False, 'import struct\n'), ((8899, 8941), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (8917, 8941), False, 'import struct\n'), ((9934, 9971), 'struct.unpack_from', 'struct.unpack_from', (['"""!i"""', 'buf', 'offset'], {}), "('!i', buf, offset)\n", (9952, 9971), False, 'import struct\n'), ((10291, 10328), 'struct.unpack_from', 'struct.unpack_from', (['"""!d"""', 'buf', 'offset'], {}), "('!d', buf, offset)\n", (10309, 10328), False, 'import struct\n'), ((10424, 10466), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (10442, 10466), False, 'import struct\n'), ((10611, 10653), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (10629, 10653), False, 'import struct\n'), ((10800, 10842), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (10818, 10842), False, 'import struct\n'), ((10989, 11031), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (11007, 11031), False, 'import struct\n'), ((11175, 11217), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (11193, 11217), False, 'import struct\n'), ((11361, 11403), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (11379, 11403), False, 'import struct\n'), ((11548, 11590), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (11566, 11590), False, 'import struct\n'), ((11735, 11777), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (11753, 11777), False, 'import struct\n'), ((11922, 11964), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (11940, 11964), False, 'import struct\n'), ((12119, 12161), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (12137, 12161), False, 'import struct\n'), ((12315, 12357), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (12333, 12357), False, 'import struct\n'), ((12510, 12552), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (12528, 12552), False, 'import struct\n'), ((12707, 12749), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (12725, 12749), False, 'import struct\n'), ((12911, 12953), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (12929, 12953), False, 'import struct\n'), ((13123, 13160), 'struct.unpack_from', 'struct.unpack_from', (['"""!d"""', 'buf', 'offset'], {}), "('!d', buf, offset)\n", (13141, 13160), False, 'import struct\n'), ((13258, 13300), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (13276, 13300), False, 'import struct\n'), ((13466, 13503), 'struct.unpack_from', 'struct.unpack_from', (['"""!d"""', 'buf', 'offset'], {}), "('!d', buf, offset)\n", (13484, 13503), False, 'import struct\n'), ((13599, 13636), 'struct.unpack_from', 'struct.unpack_from', (['"""!d"""', 'buf', 'offset'], {}), "('!d', buf, offset)\n", (13617, 13636), False, 'import struct\n'), ((13732, 13769), 'struct.unpack_from', 'struct.unpack_from', (['"""!d"""', 'buf', 'offset'], {}), "('!d', buf, offset)\n", (13750, 13769), False, 'import struct\n'), ((13868, 13910), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (13886, 13910), False, 'import struct\n'), ((14059, 14096), 'struct.unpack_from', 'struct.unpack_from', (['"""!d"""', 'buf', 'offset'], {}), "('!d', buf, offset)\n", (14077, 14096), False, 'import struct\n'), ((14265, 14304), 'struct.unpack_from', 'struct.unpack_from', (['"""!ddd"""', 'buf', 'offset'], {}), "('!ddd', buf, offset)\n", (14283, 14304), False, 'import struct\n'), ((14527, 14564), 'struct.unpack_from', 'struct.unpack_from', (['"""!d"""', 'buf', 'offset'], {}), "('!d', buf, offset)\n", (14545, 14564), False, 'import struct\n'), ((14680, 14717), 'struct.unpack_from', 'struct.unpack_from', (['"""!d"""', 'buf', 'offset'], {}), "('!d', buf, offset)\n", (14698, 14717), False, 'import struct\n'), ((14871, 14908), 'struct.unpack_from', 'struct.unpack_from', (['"""!d"""', 'buf', 'offset'], {}), "('!d', buf, offset)\n", (14889, 14908), False, 'import struct\n'), ((14998, 15035), 'struct.unpack_from', 'struct.unpack_from', (['"""!d"""', 'buf', 'offset'], {}), "('!d', buf, offset)\n", (15016, 15035), False, 'import struct\n'), ((15125, 15162), 'struct.unpack_from', 'struct.unpack_from', (['"""!d"""', 'buf', 'offset'], {}), "('!d', buf, offset)\n", (15143, 15162), False, 'import struct\n'), ((15258, 15300), 'struct.unpack_from', 'struct.unpack_from', (['"""!dddddd"""', 'buf', 'offset'], {}), "('!dddddd', buf, offset)\n", (15276, 15300), False, 'import struct\n')] |
# --------------
#Header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data=pd.read_csv(path)
#path of the data file- path
#Code starts here
data.Gender.replace('-','Agender',inplace=True)
gender_count=data.Gender.value_counts()
gender_count.plot(kind='bar')
# --------------
#Code starts here
alignment=data.Alignment.value_counts()
alignment.plot(kind='pie')
# --------------
#Code starts here
sc_df=pd.DataFrame(data,columns=['Strength','Combat'])
sc_covariance=sc_df.cov()
sc_strength=sc_df.Strength.std()
sc_combat=sc_df.Combat.std()
sc_pearson=sc_df.corr(method='pearson',min_periods=1)
sc_pearson=sc_pearson.Combat[0]
sc_covariance=sc_covariance.Combat[0]
ic_df=pd.DataFrame(data,columns=['Intelligence','Combat'])
ic_covariance=ic_df.cov()
ic_intelligence=ic_df.Intelligence.std()
ic_combat=ic_df.Combat.std()
ic_pearson=ic_df.corr(method='pearson',min_periods=1)
ic_pearson=ic_pearson.Combat[0]
ic_covariance=ic_covariance.Combat[0]
# --------------
#Code starts here
total_high=data.Total.quantile(q=.99)
super_best=data[data.Total>total_high]
super_best_names=list(super_best.Name[:])
print(super_best_names)
# --------------
#Code starts here
fig,(ax_1,ax_2,ax_3)=plt.subplots(1,3,figsize=(20,8))
data.Intelligence.plot(kind='box',ax=ax_1)
data.Speed.plot(kind='box',ax=ax_2)
data.Power.plot(kind='box',ax=ax_3)
ax_1.set_title=('Intelligence1')
| [
"pandas.DataFrame",
"matplotlib.pyplot.subplots",
"pandas.read_csv"
] | [((113, 130), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (124, 130), True, 'import pandas as pd\n'), ((456, 506), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['Strength', 'Combat']"}), "(data, columns=['Strength', 'Combat'])\n", (468, 506), True, 'import pandas as pd\n'), ((736, 790), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['Intelligence', 'Combat']"}), "(data, columns=['Intelligence', 'Combat'])\n", (748, 790), True, 'import pandas as pd\n'), ((1268, 1303), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(20, 8)'}), '(1, 3, figsize=(20, 8))\n', (1280, 1303), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import math
import pickle
def get_data(data, frame_nos, dataset, topic, usernum, fps, milisec, width, height, view_width, view_height):
"""
Read and return the viewport data
"""
VIEW_PATH = '../../Viewport/'
view_info = pickle.load(open(VIEW_PATH + 'ds{}/viewport_ds{}_topic{}_user{}'.format(dataset, dataset, topic, usernum), 'rb'), encoding='latin1')
if dataset == 1:
max_frame = int(view_info[-1][0]*1.0*fps/milisec)
for i in range(len(view_info)-1):
frame = int(view_info[i][0]*1.0*fps/milisec)
frame += int(offset*1.0*fps/milisec)
frame_nos.append(frame)
if(frame > max_frame):
break
X={}
X['VIEWPORT_x']=int(view_info[i][1][1]*width/view_width)
X['VIEWPORT_y']=int(view_info[i][1][0]*height/view_height)
data.append((X, int(view_info[i+1][1][1]*width/view_width),int(view_info[i+1][1][0]*height/view_height)))
elif dataset == 2:
for k in range(len(view_info)-1):
if view_info[k][0]<=offset+60 and view_info[k+1][0]>offset+60:
max_frame = int(view_info[k][0]*1.0*fps/milisec)
break
for k in range(len(view_info)-1):
if view_info[k][0]<=offset and view_info[k+1][0]>offset:
min_index = k+1
break
prev_frame = 0
for i in range(min_index,len(view_info)-1):
frame = int((view_info[i][0])*1.0*fps/milisec)
if frame == prev_frame:
continue
if(frame > max_frame):
break
frame_nos.append(frame)
X={}
X['VIEWPORT_x']=int(view_info[i][1][1]*width/view_width)
X['VIEWPORT_y']=int(view_info[i][1][0]*height/view_height)
data.append((X, int(view_info[i+1][1][1]*width/view_width),int(view_info[i+1][1][0]*height/view_height)))
prev_frame = frame
return data, frame_nos, max_frame
def tiling(data, frame_nos, max_frame, width, height, nrow_tiles, ncol_tiles, fps, pred_nframe):
"""
Calculate the tiles corresponding to the viewport and segment them into different chunks
"""
count=0
i=0
act_tiles = []
chunk_frames = []
# Leaving the first 5 seconds ( to keep consistent with our model)
while True:
curr_frame = frame_nos[i]
if curr_frame<5*fps:
i=i+1
[inp_i,x,y]=data[curr_frame]
else:
break
# Calulate the tiles and store it in chunks
while True:
curr_frame = frame_nos[i]
nframe = min(pred_nframe, max_frame - frame_nos[i])
if(nframe <= 0):
break
# Add the frames that will be in the current chunk
frames = {i}
for k in range(i+1, len(frame_nos)):
if(frame_nos[k] < curr_frame + nframe):
frames.add(k)
else:
i=k
break
if(i!=k):
i=k
if(i==(len(frame_nos)-1)):
break
frames = sorted(frames)
chunk_frames.append(frames)
# Get the actual tile
for k in range(len(frames)):
[inp_k, x_act, y_act] = data[frames[k]]
# print(x_act, y_act)
actual_tile_col = int(x_act * ncol_tiles / width)
actual_tile_row = int(y_act * nrow_tiles / height)
# print(actual_tile_col, actual_tile_row)
actual_tile_row = actual_tile_row-nrow_tiles if(actual_tile_row >= nrow_tiles) else actual_tile_row
actual_tile_col = actual_tile_col-ncol_tiles if(actual_tile_col >= ncol_tiles) else actual_tile_col
actual_tile_row = actual_tile_row+nrow_tiles if actual_tile_row < 0 else actual_tile_row
actual_tile_col = actual_tile_col+ncol_tiles if actual_tile_col < 0 else actual_tile_col
# print(actual_tile_col, actual_tile_row)
# print()
act_tiles.append((actual_tile_row, actual_tile_col))
return act_tiles, chunk_frames
def alloc_bitrate(frame_nos, chunk_frames, pref_bitrate, nrow_tiles, ncol_tiles):
"""
Allocates equal bitrate to all the tiles
"""
vid_bitrate = []
for i in range(len(chunk_frames)):
chunk = chunk_frames[i]
chunk_bitrate = [[-1 for x in range(ncol_tiles)] for y in range(nrow_tiles)]
chunk_weight = [[1. for x in range(ncol_tiles)] for y in range(nrow_tiles)]
total_weight = sum(sum(x) for x in chunk_weight)
for x in range(nrow_tiles):
for y in range(ncol_tiles):
chunk_bitrate[x][y] = chunk_weight[x][y]*pref_bitrate/total_weight;
vid_bitrate.append(chunk_bitrate)
return vid_bitrate
def calc_qoe(vid_bitrate, act_tiles, frame_nos, chunk_frames, width, height, nrow_tiles, ncol_tiles, player_width, player_height):
"""
Calculate QoE based on the video bitrates
"""
qoe = 0
prev_qoe_1 = 0
weight_1 = 1
weight_2 = 1
weight_3 = 1
tile_width = width/ncol_tiles
tile_height = height/nrow_tiles
for i in range(len(chunk_frames[:55])):
qoe_1, qoe_2, qoe_3, qoe_4 = 0, 0, 0, 0
tile_count = 0
rows, cols = set(), set()
rate = []
chunk = chunk_frames[i]
chunk_bitrate = vid_bitrate[i]
chunk_act = act_tiles[chunk[0]-chunk_frames[0][0] : chunk[-1]-chunk_frames[0][0]]
for j in range(len(chunk_act)):
if(chunk_act[j][0] not in rows or chunk_act[j][1] not in cols):
tile_count += 1
rows.add(chunk_act[j][0])
cols.add(chunk_act[j][1])
row, col = chunk_act[j][0], chunk_act[j][1]
# Find the number of tiles that can be accomodated from the center of the viewport
n_tiles_width = math.ceil((player_width/2 - tile_width/2)/tile_width)
n_tiles_height = math.ceil((player_height/2 - tile_height/2)/tile_height)
tot_tiles = (2 * n_tiles_width+1) * (2 * n_tiles_height+1)
local_qoe = 0
local_rate = [] # a new metric to get the standard deviation of bitrate within the player view (qoe2)
for x in range(2*n_tiles_height+1):
for y in range(2*n_tiles_width+1):
sub_row = row - n_tiles_height + x
sub_col = col - n_tiles_width + y
sub_row = nrow_tiles+row+sub_row if sub_row < 0 else sub_row
sub_col = ncol_tiles+col+sub_col if sub_col < 0 else sub_col
sub_row = sub_row-nrow_tiles if sub_row >= nrow_tiles else sub_row
sub_col = sub_col-ncol_tiles if sub_col >= ncol_tiles else sub_col
local_qoe += chunk_bitrate[sub_row][sub_col]
local_rate.append(chunk_bitrate[sub_row][sub_col])
qoe_1 += local_qoe / tot_tiles
if(len(local_rate)>0):
qoe_2 += np.std(local_rate)
rate.append(local_qoe / tot_tiles)
tile_count = 1 if tile_count==0 else tile_count
qoe_1 /= tile_count
qoe_2 /= tile_count
if(len(rate)>0):
qoe_3 = np.std(rate)
qoe_3 /= tile_count
if(i>0):
qoe_4 = abs(prev_qoe_1 - qoe_1)
qoe += qoe_1 - weight_1*qoe_2 - weight_2*qoe_3 - weight_3*qoe_4
prev_qoe_1 = qoe_1
return qoe
| [
"math.ceil",
"numpy.std"
] | [((5199, 5258), 'math.ceil', 'math.ceil', (['((player_width / 2 - tile_width / 2) / tile_width)'], {}), '((player_width / 2 - tile_width / 2) / tile_width)\n', (5208, 5258), False, 'import math\n'), ((5274, 5336), 'math.ceil', 'math.ceil', (['((player_height / 2 - tile_height / 2) / tile_height)'], {}), '((player_height / 2 - tile_height / 2) / tile_height)\n', (5283, 5336), False, 'import math\n'), ((6345, 6357), 'numpy.std', 'np.std', (['rate'], {}), '(rate)\n', (6351, 6357), True, 'import numpy as np\n'), ((6152, 6170), 'numpy.std', 'np.std', (['local_rate'], {}), '(local_rate)\n', (6158, 6170), True, 'import numpy as np\n')] |
""".. Line to protect from pydocstyle D205, D400.
Plot distribution RNA-map
-------------------------
Plot distribution of crosslinks relative to landmark of specific type.
"""
import os
import pandas as pd
import iCount
# pylint: disable=wrong-import-order
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
from matplotlib import pyplot as plt # pylint: disable=wrong-import-position
# pylint: enable=wrong-import-order
def normalize_cpm(value, total):
"""Normalize by CPM."""
return value / total * 10**6
def parse_results_basic(fname):
"""Parse RNA-maps results file."""
# First read total cdna info:
with open(fname, 'rt') as handle:
total_cdna_line = handle.readline()
total_cdna = int(total_cdna_line.strip().split(':')[1])
df = pd.read_csv(fname, delimiter='\t', header=1, index_col=0)
return df, total_cdna
def parse_results(fname):
"""Parse RNA-maps results file."""
df, total_cdna = parse_results_basic(fname)
landmark_count = len(df)
distro = df.sum(axis=0)
# Perform CPM normalization
normalized_data = {int(pos): normalize_cpm(score, total_cdna) for pos, score in distro.to_dict().items()}
return normalized_data, landmark_count
def smooth(list_, half_window=1):
"""Use box averaging smoothing."""
new_list = []
for i, _ in enumerate(list_):
jjs = [j for j in range(i - half_window, i + half_window + 1) if 0 <= j < len(list_)]
new_list.append(sum([list_[k] for k in jjs]) / len(jjs))
return new_list
def make_outfile_name(fname, imgfmt):
"""Make output filename."""
basename = iCount.files.remove_extension(fname, ['.tsv'])
dirname = os.path.dirname(fname)
return os.path.join(dirname, '{}_distro.{}'.format(basename, imgfmt))
def guess_maptype(fname):
"""Try to get RNA maptype from filename."""
# Since "noncoding-gene-start" can be mistaken for "gene-start" check longest names first.
for mtype in sorted(iCount.analysis.rnamaps.MAP_TYPES.keys(), key=len, reverse=True):
if fname.endswith('{}.tsv'.format(mtype)):
return mtype
def plot_rnamap(fnames,
outfile=None,
up_limit=100,
down_limit=100,
ax=None,
ylim=None,
imgfmt='png',
smoothing=1,
):
"""
Plot distribution RNA-map.
Parameters
----------
fnames : list_str
List of rnamaps result files to plot.
outfile : str
Output file.
up_limit : int
Upstream plot limit.
down_limit : int
Downstream plot limit.
ax : str
An ``matplotlib.axes.Axes`` instance onto which this plot can
be drawn. This is useful if you would like to use this function
to plot this image as a subplot of a more complex figure.
ylim : int
Limit of the y-axis.
imgfmt : str
Image format. Note that image format is automatically
determined from outfile. This parameters only applies if
outfile is None.
smoothing : int
Smoothing half-window. Average smoothing is used.
Returns
-------
None
"""
# Make sure llimits have correct signs.
up_limit = -abs(int(up_limit))
down_limit = abs(int(down_limit))
if not isinstance(fnames, list):
fnames = [fnames]
if not outfile:
outfile = make_outfile_name(fnames[0], imgfmt)
# User can provide axes instance (subplot) into which to plot this heatmap.
# If not given a figure and axes instances are created.
if ax:
is_independent = False
else:
is_independent = True
fig = plt.figure()
ax = plt.subplot(1, 1, 1)
for fname in fnames:
data, landmark_count = parse_results(fname)
if not data:
continue
positions, scores = zip(*sorted(data.items()))
label = '{} ({} landmarks)'.format(
guess_maptype(os.path.basename(fname)),
landmark_count,
)
ax.plot(positions, smooth(scores, smoothing), label=label)
ax.set_xlim((up_limit, down_limit))
ax.set_xlabel('Position')
if ylim:
ax.set_ylim((0, ylim))
ax.set_ylabel('Score [CPM]')
ax.set_title('RNA-map')
ax.grid(b=True, which='major', axis='both')
ax.legend()
if is_independent:
fig.savefig(outfile)
| [
"pandas.read_csv",
"matplotlib.use",
"matplotlib.pyplot.subplot",
"os.path.dirname",
"matplotlib.pyplot.figure",
"iCount.files.remove_extension",
"os.path.basename",
"iCount.analysis.rnamaps.MAP_TYPES.keys"
] | [((333, 354), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (347, 354), False, 'import matplotlib\n'), ((828, 885), 'pandas.read_csv', 'pd.read_csv', (['fname'], {'delimiter': '"""\t"""', 'header': '(1)', 'index_col': '(0)'}), "(fname, delimiter='\\t', header=1, index_col=0)\n", (839, 885), True, 'import pandas as pd\n'), ((1665, 1711), 'iCount.files.remove_extension', 'iCount.files.remove_extension', (['fname', "['.tsv']"], {}), "(fname, ['.tsv'])\n", (1694, 1711), False, 'import iCount\n'), ((1726, 1748), 'os.path.dirname', 'os.path.dirname', (['fname'], {}), '(fname)\n', (1741, 1748), False, 'import os\n'), ((2018, 2058), 'iCount.analysis.rnamaps.MAP_TYPES.keys', 'iCount.analysis.rnamaps.MAP_TYPES.keys', ([], {}), '()\n', (2056, 2058), False, 'import iCount\n'), ((3734, 3746), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3744, 3746), True, 'from matplotlib import pyplot as plt\n'), ((3760, 3780), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (3771, 3780), True, 'from matplotlib import pyplot as plt\n'), ((4027, 4050), 'os.path.basename', 'os.path.basename', (['fname'], {}), '(fname)\n', (4043, 4050), False, 'import os\n')] |
#
# Copyright (c) 2021 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# span.py
#
# Part of text_extensions_for_pandas
#
# Support for span-centric Jupyter rendering and utilities
#
import textwrap
from typing import *
from enum import Enum
import text_extensions_for_pandas.resources
# TODO: This try/except block is for Python 3.6 support, and should be
# reduced to just importing importlib.resources when 3.6 support is dropped.
try:
import importlib.resources as pkg_resources
except ImportError:
import importlib_resources as pkg_resources
# Limits the max number of displayed documents. Matches Pandas' default display.max_seq_items.
_DOCUMENT_DISPLAY_LIMIT = 100
class SetType(Enum):
NESTED=1
OVERLAP=2
class RegionType(Enum):
NESTED=1
COMPLEX=2
SOLO=3
def pretty_print_html(column: Union["SpanArray", "TokenSpanArray"],
show_offsets: bool) -> str:
"""
HTML pretty-printing of a series of spans for Jupyter notebooks.
Args:
column: Span column (either character or token spans).
show_offsets: True to generate a table of span offsets in addition
to the marked-up text
"""
# Local import to prevent circular dependencies
from text_extensions_for_pandas.array.span import SpanArray
from text_extensions_for_pandas.array.token_span import TokenSpanArray
if not isinstance(column, (SpanArray, TokenSpanArray)):
raise TypeError(f"Expected SpanArray or TokenSpanArray, but received "
f"{column} of type {type(column)}")
# Gets the main script and stylesheet from the 'resources' sub-package
style_text: str = pkg_resources.read_text(text_extensions_for_pandas.resources, "span_array.css")
script_text: str = pkg_resources.read_text(text_extensions_for_pandas.resources, "span_array.js")
# Declare initial variables common to all render calls
instance_init_script_list: List[str] = []
# For each document, pass the array of spans and document text into the script's render function
document_columns = column.split_by_document()
for column_index in range(min(_DOCUMENT_DISPLAY_LIMIT, len(document_columns))):
# Get a javascript representation of the column
span_array = []
token_span_array = []
for e in document_columns[column_index]:
span_array.append(f"""[{e.begin},{e.end}]""")
if hasattr(e, "tokens"):
token_span_array.append(f"""[{e.begin_token},{e.end_token}]""")
document_object_script = f"""
const doc_spans = [{','.join(span_array)}]
const doc_text = '{_get_escaped_doctext(document_columns[column_index])}'
"""
# If the documents are a TokenSpanArray, include the start and end token indices in the document object.
if len(token_span_array) > 0:
document_object_script += f"""
const doc_token_spans = [{','.join(token_span_array)}]
documents.push({{doc_text: doc_text, doc_spans: doc_spans, doc_token_spans: doc_token_spans}})
"""
else:
document_object_script += """
documents.push({doc_text: doc_text, doc_spans: doc_spans})
"""
instance_init_script_list.append(f"""
{{
{document_object_script}
}}
""")
# Defines a list of DOM strings to be appended to the end of the returned HTML.
postfix_tags: List[str] = []
if len(document_columns) > _DOCUMENT_DISPLAY_LIMIT:
postfix_tags.append(f"""
<footer>Documents truncated. Showing {_DOCUMENT_DISPLAY_LIMIT} of {len(document_columns)}</footer>
""")
# Get the show_offsets parameter as a JavaScript boolean
show_offset_string = 'true' if show_offsets else 'false'
return textwrap.dedent(f"""
<style class="span-array-css">
{textwrap.indent(style_text, ' ')}
</style>
<script>
{{
{textwrap.indent(script_text, ' ')}
}}
</script>
<div class="span-array">
{_get_initial_static_html(column, show_offsets)}
<span style="font-size: 0.8em;color: #b3b3b3;">Your notebook viewer does not support Javascript execution. The above rendering will not be interactive.</span>
</div>
<script>
{{
const Span = window.SpanArray.Span
const script_context = document.currentScript
const documents = []
{''.join(instance_init_script_list)}
const instance = new window.SpanArray.SpanArray(documents, {show_offset_string}, script_context)
instance.render()
}}
</script>
{''.join(postfix_tags)}
""")
def _get_escaped_doctext(column: Union["SpanArray", "TokenSpanArray"]) -> List[str]:
# Subroutine of pretty_print_html() above.
# Should only be called for single-document span arrays.
if not column.is_single_document:
raise ValueError("Array contains spans from multiple documents. Can only "
"render one document at a time.")
text = column.document_text
text_pieces = []
for i in range(len(text)):
if text[i] == "'":
text_pieces.append("\\'")
elif text[i] == "\n":
text_pieces.append("\\n")
else:
text_pieces.append(text[i])
return "".join(text_pieces)
def _get_initial_static_html(column: Union["SpanArray", "TokenSpanArray"],
show_offsets: bool) -> str:
# Subroutine of pretty_print_html above.
# Gets the initial static html representation of the column for notebook viewers without JavaScript support.
# Iterates over each document and constructs the DOM string with template literals.
# ! Text inserted into the DOM as raw HTML should always be sanitized to prevent unintended DOM manipulation
# and XSS attacks.
documents = column.split_by_document()
documents_html = []
for column_index in range(min(_DOCUMENT_DISPLAY_LIMIT, len(documents))):
document = documents[column_index]
# Generate a dictionary to store span information, including relationships with spans occupying the same region.
spans = {}
is_token_document = False
sorted_span_ids = []
for i in range(len(document)):
span_data = {}
span_data["id"] = i
span_data["begin"] = document[i].begin
span_data["end"] = document[i].end
if hasattr(document[i], "tokens"):
is_token_document = True
span_data["begin_token"] = document[i].begin_token
span_data["end_token"] = document[i].end_token
span_data["sets"] = []
spans[i] = span_data
sorted_span_ids.append(i)
# Sort IDs
sorted_span_ids.sort(key=lambda id: (spans[id]["begin"], -spans[id]["end"]))
for i in range(len(sorted_span_ids)):
span_data = spans[sorted_span_ids[i]]
for j in range(i+1, len(sorted_span_ids)):
sub_span_data = spans[sorted_span_ids[j]]
# If the spans do not overlap, exit the sub-loop
if(sub_span_data["begin"] >= span_data["end"]):
break
else:
if(sub_span_data["end"] <= span_data["end"]):
span_data["sets"].append({"type": SetType.NESTED, "id": sub_span_data["id"]})
else:
span_data["sets"].append({"type": SetType.OVERLAP, "id": sub_span_data["id"]})
spans[sorted_span_ids[i]] = span_data
# Generate the table rows DOM string from span data.
table_rows_html = []
for i in range(len(spans)):
span = spans[i]
table_rows_html.append(f"""
<tr>
<td><b>{span["id"]}</b></td>
<td>{span["begin"]}</td>
<td>{span["end"]}</td>
""")
if is_token_document:
table_rows_html.append(f"""
<td>{span["begin_token"]}</td>
<td>{span["end_token"]}</td>
""")
table_rows_html.append(f"""
<td>{_get_sanitized_text(document.document_text[span["begin"]:span["end"]])}</td>
</tr>
""")
# Generate the regions of the document_text to highlight from span data.
mark_regions = []
i = 0
while i < len(document):
region = {}
region["root_id"] = i
region["begin"] = spans[i]["begin"]
set_span = _get_set_span(spans, i)
region["end"] = set_span["end"]
if len(spans[i]["sets"]) > 0:
# get set span and type
if(_is_complex(spans, i)):
region["type"] = RegionType.COMPLEX
else:
region["type"] = RegionType.NESTED
else:
region["type"] = RegionType.SOLO
mark_regions.append(region)
i = set_span["highest_id"] + 1
# Generate the document_text DOM string from the regions created above.
context_html = []
if len(mark_regions) == 0:
# There are no marked regions. Just append the sanitized text as a raw string.
context_html.append(_get_sanitized_text(document.document_text))
else:
# Iterate over each marked region and contruct the HTML for preceding text and marked text.
# Then, append that HTML to the list of DOM strings for the document_text.
snippet_begin = 0
for region in mark_regions:
context_html.append(f"""
{_get_sanitized_text(document.document_text[snippet_begin:region["begin"]])}
""")
if region["type"] == RegionType.COMPLEX:
context_html.append(f"""
<span class='mark btn-info complex-set' style='
padding:0.4em;
border-radius:0.35em;
background:linear-gradient(to right, #a0c4ff, #ffadad);
color: black;
'>{_get_sanitized_text(document.document_text[region["begin"]:region["end"]])}
<span class='mark-tag' style='
font-weight: bolder;
font-size: 0.8em;
font-variant: small-caps;
font-variant-caps: small-caps;
font-variant-caps: all-small-caps;
margin-left: 8px;
text-transform: uppercase;
color: black;
'>Set</span>
</span>
""")
elif region["type"] == RegionType.NESTED:
mark_html = []
nested_snippet_begin = region["begin"]
# Iterate over each span nested within the root span of the marked region
for nested_span in map( \
lambda set: spans[set["id"]],
spans[region["root_id"]]["sets"]):
mark_html.append(f"""
{_get_sanitized_text(document.document_text[nested_snippet_begin:nested_span["begin"]])}
<span class='mark btn-warning' style='
padding:0.2em 0.4em;
border-radius:0.35em;
background-color: #ffadad;
color: black;
'>{_get_sanitized_text(document.document_text[nested_span["begin"]:nested_span["end"]])}</span>
""")
nested_snippet_begin = nested_span["end"]
mark_html.append(_get_sanitized_text(document.document_text[nested_snippet_begin:region["end"]]))
context_html.append(f"""
<span class='mark btn-primary' style='padding:0.4em;border-radius:0.35em;background-color: #a0c4ff;color:black;'>{"".join(mark_html)}</span>
""")
elif region["type"] == RegionType.SOLO:
context_html.append(f"""
<span class='mark btn-primary' style='padding:0.4em;border-radius:0.35em;background-color: #a0c4ff;color:black;'>{_get_sanitized_text(document.document_text[region["begin"]:region["end"]])}</span>
""")
snippet_begin = region["end"]
context_html.append(_get_sanitized_text(document.document_text[snippet_begin:]))
# Generate the document's DOM string
documents_html.append(f"""
<div class='document'>
<table style='
table-layout: auto;
overflow: hidden;
width: 100%;
border-collapse: collapse;
'>
<thead style='font-variant-caps: all-petite-caps;'>
<th></th>
<th>begin</th>
<th>end</th>
{"<th>begin token</th><th>end token</th>" if is_token_document else ""}
<th style='text-align:right;width:100%'>context</th>
</tr></thead>
<tbody>
{"".join(table_rows_html)}
</tbody>
</table>
<p style='
padding: 1em;
line-height: calc(var(--jp-content-line-height, 1.6) * 1.6);
'>
{"".join(context_html)}
</p>
</div>
""")
# Concat all documents and return the final DOM string
return "".join(documents_html)
def _get_set_span(spans: Dict, id: int) -> Dict:
# Subroutine of _get_initial_static_html() above.
# Recursive algorithm to get the last end and ID values of the set of spans connected to span with the given ID
# Will raise a KeyError exception if an invalid key is given
end = spans[id]["end"]
highest_id = id
# For each span in the set of spans, get the return values and track the greatest endpoint index and ID values.
for set in spans[id]["sets"]:
other = _get_set_span(spans, set["id"])
if other["end"] > end:
end = other["end"]
if other["highest_id"] > highest_id:
highest_id = other["highest_id"]
return {"end": end, "highest_id": highest_id}
def _is_complex(spans: Dict, id: int) -> bool:
# Subroutine of _get_initial_static_html() above.
# Returns True if the provided span should be considered a "Complex" span. Implementation details below.
# Will raise a KeyError exception if an invalid key is given
# If any connection sets are of type:overlap or nested beyond a depth of 1, return True
for set in spans[id]["sets"]:
if set["type"] == SetType.OVERLAP:
return True
elif set["type"] == SetType.NESTED:
if len(spans[set["id"]]["sets"]) > 0:
return True
return False
def _get_sanitized_text(text: str) -> str:
# Subroutine of _get_initial_static_html() above.
# Returns a string with HTML reserved character replacements to avoid issues while rendering text as HTML
text_pieces = []
for i in range(len(text)):
if text[i] == "&":
text_pieces.append("&")
elif text[i] == "<":
text_pieces.append("<")
elif text[i] == ">":
text_pieces.append(">")
elif text[i] == "\"":
# Not strictly necessary, but just in case.
text_pieces.append(""")
elif text[i] == "'":
# Not strictly necessary, but just in case.
text_pieces.append("'")
elif text[i] == "$":
# Dollar sign messes up Jupyter's JavaScript UI.
# Place dollar sign in its own sub-span to avoid being misinterpeted as a LaTeX delimiter
text_pieces.append("<span>$</span>")
elif text[i] == "\n" or text[i] == "\r":
# Support for in-document newlines by replacing with line break elements
text_pieces.append("<br>")
else:
text_pieces.append(text[i])
return "".join(text_pieces)
| [
"importlib_resources.read_text",
"textwrap.indent"
] | [((2188, 2267), 'importlib_resources.read_text', 'pkg_resources.read_text', (['text_extensions_for_pandas.resources', '"""span_array.css"""'], {}), "(text_extensions_for_pandas.resources, 'span_array.css')\n", (2211, 2267), True, 'import importlib_resources as pkg_resources\n'), ((2291, 2369), 'importlib_resources.read_text', 'pkg_resources.read_text', (['text_extensions_for_pandas.resources', '"""span_array.js"""'], {}), "(text_extensions_for_pandas.resources, 'span_array.js')\n", (2314, 2369), True, 'import importlib_resources as pkg_resources\n'), ((4494, 4533), 'textwrap.indent', 'textwrap.indent', (['style_text', '""" """'], {}), "(style_text, ' ')\n", (4509, 4533), False, 'import textwrap\n'), ((4593, 4633), 'textwrap.indent', 'textwrap.indent', (['script_text', '""" """'], {}), "(script_text, ' ')\n", (4608, 4633), False, 'import textwrap\n')] |
"""
This file requests a new model from the storage pool.
"""
import os
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
import requests
from requests.auth import HTTPBasicAuth
from src.praxxis.sqlite import sqlite_telemetry
def update_model():
"""TODO: implement this"""
pass
| [
"urllib3.disable_warnings"
] | [((90, 157), 'urllib3.disable_warnings', 'urllib3.disable_warnings', (['urllib3.exceptions.InsecureRequestWarning'], {}), '(urllib3.exceptions.InsecureRequestWarning)\n', (114, 157), False, 'import urllib3\n')] |
""" Stockgrid View """
__docformat__ = "numpy"
import logging
from typing import List, Tuple
import pandas as pd
import requests
from gamestonk_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_dark_pool_short_positions(sort_field: str, ascending: bool) -> pd.DataFrame:
"""Get dark pool short positions. [Source: Stockgrid]
Parameters
----------
sort_field : str
Field for which to sort by, where 'sv': Short Vol. (1M),
'sv_pct': Short Vol. %%, 'nsv': Net Short Vol. (1M),
'nsv_dollar': Net Short Vol. ($100M), 'dpp': DP Position (1M),
'dpp_dollar': DP Position ($1B)
ascending : bool
Data in ascending order
Returns
----------
pd.DataFrame
Dark pool short position data
"""
d_fields_endpoints = {
"sv": "Short+Volume",
"sv_pct": "Short+Volume+%25",
"nsv": "Net+Short+Volume",
"nsv_dollar": "Net+Short+Volume+$",
"dpp": "Dark+Pools+Position",
"dpp_dollar": "Dark+Pools+Position+$",
}
field = d_fields_endpoints[sort_field]
if ascending:
order = "asc"
else:
order = "desc"
link = f"https://stockgridapp.herokuapp.com/get_dark_pool_data?top={field}&minmax={order}"
response = requests.get(link)
df = pd.DataFrame(response.json()["data"])
df = df[
[
"Ticker",
"Date",
"Short Volume",
"Short Volume %",
"Net Short Volume",
"Net Short Volume $",
"Dark Pools Position",
"Dark Pools Position $",
]
]
return df
@log_start_end(log=logger)
def get_short_interest_days_to_cover(sort_field: str) -> pd.DataFrame:
"""Get short interest and days to cover. [Source: Stockgrid]
Parameters
----------
sort_field : str
Field for which to sort by, where 'float': Float Short %%,
'dtc': Days to Cover, 'si': Short Interest
Returns
----------
pd.DataFrame
Short interest and days to cover data
"""
link = "https://stockgridapp.herokuapp.com/get_short_interest?top=days"
r = requests.get(link)
df = pd.DataFrame(r.json()["data"])
d_fields = {
"float": "%Float Short",
"dtc": "Days To Cover",
"si": "Short Interest",
}
df = df[
["Ticker", "Date", "%Float Short", "Days To Cover", "Short Interest"]
].sort_values(
by=d_fields[sort_field],
ascending=bool(sort_field == "dtc"),
)
return df
@log_start_end(log=logger)
def get_short_interest_volume(ticker: str) -> Tuple[pd.DataFrame, List]:
"""Get price vs short interest volume. [Source: Stockgrid]
Parameters
----------
ticker : str
Stock to get data from
Returns
----------
pd.DataFrame
Short interest volume data
List
Price data
"""
link = f"https://stockgridapp.herokuapp.com/get_dark_pool_individual_data?ticker={ticker}"
response = requests.get(link)
df = pd.DataFrame(response.json()["individual_short_volume_table"]["data"])
df["date"] = pd.to_datetime(df["date"])
return df, response.json()["prices"]["prices"]
@log_start_end(log=logger)
def get_net_short_position(ticker: str) -> pd.DataFrame:
"""Get net short position. [Source: Stockgrid]
Parameters
----------
ticker: str
Stock to get data from
Returns
----------
pd.DataFrame
Net short position
"""
link = f"https://stockgridapp.herokuapp.com/get_dark_pool_individual_data?ticker={ticker}"
response = requests.get(link)
df = pd.DataFrame(response.json()["individual_dark_pool_position_data"])
df["dates"] = pd.to_datetime(df["dates"])
return df
| [
"logging.getLogger",
"pandas.to_datetime",
"gamestonk_terminal.decorators.log_start_end",
"requests.get"
] | [((198, 225), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (215, 225), False, 'import logging\n'), ((229, 254), 'gamestonk_terminal.decorators.log_start_end', 'log_start_end', ([], {'log': 'logger'}), '(log=logger)\n', (242, 254), False, 'from gamestonk_terminal.decorators import log_start_end\n'), ((1688, 1713), 'gamestonk_terminal.decorators.log_start_end', 'log_start_end', ([], {'log': 'logger'}), '(log=logger)\n', (1701, 1713), False, 'from gamestonk_terminal.decorators import log_start_end\n'), ((2596, 2621), 'gamestonk_terminal.decorators.log_start_end', 'log_start_end', ([], {'log': 'logger'}), '(log=logger)\n', (2609, 2621), False, 'from gamestonk_terminal.decorators import log_start_end\n'), ((3262, 3287), 'gamestonk_terminal.decorators.log_start_end', 'log_start_end', ([], {'log': 'logger'}), '(log=logger)\n', (3275, 3287), False, 'from gamestonk_terminal.decorators import log_start_end\n'), ((1326, 1344), 'requests.get', 'requests.get', (['link'], {}), '(link)\n', (1338, 1344), False, 'import requests\n'), ((2203, 2221), 'requests.get', 'requests.get', (['link'], {}), '(link)\n', (2215, 2221), False, 'import requests\n'), ((3063, 3081), 'requests.get', 'requests.get', (['link'], {}), '(link)\n', (3075, 3081), False, 'import requests\n'), ((3180, 3206), 'pandas.to_datetime', 'pd.to_datetime', (["df['date']"], {}), "(df['date'])\n", (3194, 3206), True, 'import pandas as pd\n'), ((3664, 3682), 'requests.get', 'requests.get', (['link'], {}), '(link)\n', (3676, 3682), False, 'import requests\n'), ((3779, 3806), 'pandas.to_datetime', 'pd.to_datetime', (["df['dates']"], {}), "(df['dates'])\n", (3793, 3806), True, 'import pandas as pd\n')] |
# Copyright (C) 2018 <NAME>
#
# SPDX-License-Identifier: MIT
"""This module contains a collection of functions related to
geographical data.
"""
from floodsystem.utils import sorted_by_key # noqa
import math
def hav(theta):
return math.sin(theta*0.5)**2
def r(theta):
return math.radians(theta)
def dist(coor1,coor2):
hav_phi = hav(r(coor1[0]-coor2[0])) + math.cos(r(coor1[0]))*math.cos(r(coor2[0]))*hav(r(coor1[1]-coor2[1]))
return 2*6371*math.asin(math.sqrt(hav_phi))
def stations_by_distance(stations,p):
ret = []
for i in stations:
c = i.coord
distance = dist(c,p)
ret.append((i,distance))
return sorted_by_key(ret,1)
def rivers_with_station(stations):
return {stat.river for stat in stations}
def stations_by_river(stations):
retdict = {}
for i in rivers_with_station(stations):
stats = []
for j in stations:
if j.river == i:
stats.append(j)
retdict[i] = stats
return retdict
from haversine import haversine,Unit
def stations_within_radius(stations, centre, r):
"""This function returns a list of all station (type MonitoringStation),
within radius r of a geographic coordinate x.
"""
coordinates_results = []
for i in stations:
if haversine(i.coord, centre) < r:
coordinates_results.append(i.name)
coordinates_results.sort()
return coordinates_results
def rivers_by_station_number(stations, N):
"""This fuction determines the N rivers with the greatest number of monitoring stations.
In the case that there are more rivers with the same number of stations as the N th entry, include these rivers in the list.
"""
river_number = []
for key,value in stations_by_river(stations).items():
river_number.append((key,len(value)))
river_number_sorted = sorted_by_key(river_number,1,reverse=True)
river_final = []
count = 0
for river in river_number_sorted:
if count < N:
river_final.append(river)
count += 1
elif count == N:
if river[1] == river_final[-1][1]:
river_final.append(river)
else:
break
else:
break
return river_final
| [
"haversine.haversine",
"floodsystem.utils.sorted_by_key",
"math.sqrt",
"math.radians",
"math.sin"
] | [((301, 320), 'math.radians', 'math.radians', (['theta'], {}), '(theta)\n', (313, 320), False, 'import math\n'), ((685, 706), 'floodsystem.utils.sorted_by_key', 'sorted_by_key', (['ret', '(1)'], {}), '(ret, 1)\n', (698, 706), False, 'from floodsystem.utils import sorted_by_key\n'), ((1955, 1999), 'floodsystem.utils.sorted_by_key', 'sorted_by_key', (['river_number', '(1)'], {'reverse': '(True)'}), '(river_number, 1, reverse=True)\n', (1968, 1999), False, 'from floodsystem.utils import sorted_by_key\n'), ((249, 270), 'math.sin', 'math.sin', (['(theta * 0.5)'], {}), '(theta * 0.5)\n', (257, 270), False, 'import math\n'), ((489, 507), 'math.sqrt', 'math.sqrt', (['hav_phi'], {}), '(hav_phi)\n', (498, 507), False, 'import math\n'), ((1363, 1389), 'haversine.haversine', 'haversine', (['i.coord', 'centre'], {}), '(i.coord, centre)\n', (1372, 1389), False, 'from haversine import haversine, Unit\n')] |
"""
Breadth-First Search - Implemented using queues
This can be implemented for both Trees / Graphs
Here, we will use Trees as examples.
"""
from collections import deque
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Tree:
def level_order(self, root):
traverse = []
level = 0
if not root:
return traverse
# Put the entire tree into the Queue
q = deque([root])
while q:
traverse.append([])
for i in range(len(q)):
node = q.popleft()
traverse[level].append(node.val)
if node.left:
q.append(node.left)
if node.right:
q.append(node.right)
level += 1
return traverse
| [
"collections.deque"
] | [((539, 552), 'collections.deque', 'deque', (['[root]'], {}), '([root])\n', (544, 552), False, 'from collections import deque\n')] |
#All Django Imports
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
#All local imports (libs, contribs, models)
from users.models import *
#All external imports (libs, packages)
import hashlib
import jsonfield
import logging
import pytz
import uuid
# Init Logger
logger = logging.getLogger(__name__)
PACKAGE_SELECTION = (
('car', 'Car'),
('van', 'Van'),
('minivan', 'Minivan'),
)
STATUS_SELECTION = (
('new', 'New'),
('accepted', 'Accepted'),
('completed', 'Completed')
)
CITY_SELECTION = (
('Toronto', 'Toronto'),
('Brampton', 'Brampton'),
('Markham', 'Markham'),
('Mississauga', 'Mississauga'),
('<NAME>', '<NAME>'),
('Vaughan', 'Vaughan'),
('Oakville', 'Oakville')
)
def validate_pickuptime(pickup_time):
if (
pickup_time - timezone.now().astimezone(
pytz.timezone(settings.TIME_ZONE))
).total_seconds() < 0:
raise ValidationError('Pickup time cannot be before current time!')
class Quests(models.Model):
# Calculating delivery code before hand and inserting
# it as default so that it won't be tampered with.
hashstring = hashlib.sha256(
str(timezone.now()) + str(timezone.now()) + str(uuid.uuid4())
).hexdigest()
calc_delivery_code = hashstring[:3]+hashstring[-2:]
calc_tracking_number = hashstring[10:15]+hashstring[-15:-10]
current_time = timezone.now
questrs = models.ForeignKey(QuestrUserProfile, related_name='quests')
description = models.TextField(_('description'), blank=True)
title = models.CharField(
_('title'),
max_length=100,
blank=False
)
reward = models.DecimalField(
_('reward'),
decimal_places=2,
max_digits=1000)
item_images = models.ImageField(
_('item_images'),
max_length=9999,
upload_to='quest-item-cdn',
blank=True
)
map_image = models.URLField(
_('map_image'),
max_length=9999,
default=''
)
status = models.TextField(
_('status'),
choices=STATUS_SELECTION,
default='New'
)
creation_date = models.DateTimeField(
_('creation_date'),
default=current_time
)
size = models.TextField(
_('size'),
choices=PACKAGE_SELECTION,
default="backpack"
)
shipper = models.TextField(
_('shipper'),
blank=True,
null=True
)
pickup = jsonfield.JSONField(_('pickup'), default={})
dropoff = jsonfield.JSONField(_('dropoff'), default={})
isaccepted = models.BooleanField(_('isaccepted'), default=False)
isnotified = models.BooleanField(_('isnotified'), default=False)
is_questr_reviewed = models.BooleanField(
_('is_questr_reviewed'),
default=False
)
is_shipper_reviewed = models.BooleanField(
_('is_shipper_reviewed'),
default=False
)
is_complete = models.BooleanField(_('is_complete'), default=False)
ishidden = models.BooleanField(_('ishidden'), default=False)
distance = models.DecimalField(
_('distance'),
decimal_places=2,
max_digits=1000,
default=0
)
delivery_date = models.DateTimeField(
_('delivery_date'),
blank=True,
null=True
)
available_couriers = jsonfield.JSONField(
_('available_couriers'),
default={}
)
delivery_code = models.TextField(_('delivery_code'), blank=True)
tracking_number = models.TextField(_('tracking_number'), blank=True)
pickup_time = models.DateTimeField(
_('pickup_time'),
blank=True,
validators=[validate_pickuptime]
)
considered_couriers = models.TextField(_('considered_couriers'), default=[])
def __unicode__(self):
return str(self.id)
def get_delivery_code(self):
hashstring = hashlib.sha256(
str(timezone.now()) + str(timezone.now()) + str(uuid.uuid4())
).hexdigest()
return hashstring[:3]+hashstring[-2:]
def get_tracking_number(self):
hashstring = hashlib.sha256(
str(timezone.now()) + str(timezone.now()) + str(uuid.uuid4())
).hexdigest()
return hashstring[10:15]+hashstring[-15:-10]
#Overriding
def save(self, *args, **kwargs):
if not self.delivery_code:
self.delivery_code = self.get_delivery_code()
if not self.tracking_number:
self.tracking_number = self.get_tracking_number()
if not self.pickup_time:
logging.warn("no pickup time")
self.pickup_time = self.creation_date
super(Quests, self).save(*args, **kwargs)
# self.create_item_images_normal()
class QuestComments(models.Model):
quest = models.ForeignKey(Quests)
questr = models.ForeignKey(QuestrUserProfile)
time = models.DateTimeField(_('time'))
comment = models.TextField(_('comment'))
def __unicode__(self):
return self.id
class QuestTransactional(models.Model):
quest_code = models.CharField(_('quest_code'), max_length=64, unique=True)
quest = models.ForeignKey(Quests)
shipper = models.ForeignKey(QuestrUserProfile)
transaction_type = models.IntegerField(_('transaction_type'), default=1)
status = models.BooleanField(_('status'), default=False)
def generate_hash(self):
return hashlib.sha256(
str(timezone.now()) + str(self.shipper.email)
).hexdigest()
def get_truncated_quest_code(self):
return self.quest_code[:7]
def get_token_id(self):
return self.quest_code[-6:]
REQUIRED_FIELDS = [
'quest_code', 'id', 'quest', 'shipper', 'transaction_type']
def __unicode__(self):
return "{0}:{1} {2}".format(self.quest_code, self.quest, self.shipper)
#Overriding
def save(self, *args, **kwargs):
#check if the row with this hash already exists.
if not self.quest_code:
self.quest_code = self.generate_hash()
# self.my_stuff = 'something I want to save in that field'
super(QuestTransactional, self).save(*args, **kwargs)
class QuestToken(models.Model):
token_id = models.CharField(_('id'), max_length=20, primary_key=True)
timeframe = models.DateTimeField(_('create_date'), default=timezone.now)
def is_alive(self):
timedelta = timezone.now() - self.timeframe
hours = 2
allowable_time = float(hours * 60 * 60)
return timedelta.total_seconds() < allowable_time
def __unicode__(self):
return "Token verifying ..."
# Overriding
def save(self, *args, **kwargs):
if not self.timeframe:
self.timeframe = timezone.now()
super(QuestToken, self).save(*args, **kwargs)
class QuestEvents(models.Model):
"""Models for QuestEvents"""
current_time = timezone.now
quest = models.ForeignKey(Quests)
event = models.IntegerField(_('event'), max_length=2, default=1)
updated_on = models.DateTimeField(
_('updated_on'),
default=current_time
)
extrainfo = jsonfield.JSONField(
_('extrainfo'),
default='{}',
max_length=9999
)
def save(self, *args, **kwargs):
if not self.updated_on:
self.updated_on = current_time
super(QuestEvents, self).save(*args, **kwargs)
# class QuestPricing(models.Model):
# """Pricing model for quests"""
# current_time = timezone.now
# pricing = jsonfield.JSONField(_('pricing'), default={})
# questrs = models.ForeignKey(QuestrUserProfile, unique=True)
# updated_on = models.DateTimeField(
# _('updated_on'),
# default=current_time
# )
# def save(self, *args, **kwargs):
# if not self.updated_on:
# self.updated_on = current_time
# super(QuestPricing, self).save(*args, **kwargs)
| [
"logging.getLogger",
"pytz.timezone",
"django.utils.translation.ugettext_lazy",
"logging.warn",
"django.db.models.ForeignKey",
"django.core.exceptions.ValidationError",
"uuid.uuid4",
"django.utils.timezone.now"
] | [((399, 426), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (416, 426), False, 'import logging\n'), ((1532, 1591), 'django.db.models.ForeignKey', 'models.ForeignKey', (['QuestrUserProfile'], {'related_name': '"""quests"""'}), "(QuestrUserProfile, related_name='quests')\n", (1549, 1591), False, 'from django.db import models\n'), ((4880, 4905), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Quests'], {}), '(Quests)\n', (4897, 4905), False, 'from django.db import models\n'), ((4919, 4955), 'django.db.models.ForeignKey', 'models.ForeignKey', (['QuestrUserProfile'], {}), '(QuestrUserProfile)\n', (4936, 4955), False, 'from django.db import models\n'), ((5228, 5253), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Quests'], {}), '(Quests)\n', (5245, 5253), False, 'from django.db import models\n'), ((5268, 5304), 'django.db.models.ForeignKey', 'models.ForeignKey', (['QuestrUserProfile'], {}), '(QuestrUserProfile)\n', (5285, 5304), False, 'from django.db import models\n'), ((6996, 7021), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Quests'], {}), '(Quests)\n', (7013, 7021), False, 'from django.db import models\n'), ((1037, 1098), 'django.core.exceptions.ValidationError', 'ValidationError', (['"""Pickup time cannot be before current time!"""'], {}), "('Pickup time cannot be before current time!')\n", (1052, 1098), False, 'from django.core.exceptions import ValidationError\n'), ((1627, 1643), 'django.utils.translation.ugettext_lazy', '_', (['"""description"""'], {}), "('description')\n", (1628, 1643), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1695, 1705), 'django.utils.translation.ugettext_lazy', '_', (['"""title"""'], {}), "('title')\n", (1696, 1705), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1799, 1810), 'django.utils.translation.ugettext_lazy', '_', (['"""reward"""'], {}), "('reward')\n", (1800, 1810), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1908, 1924), 'django.utils.translation.ugettext_lazy', '_', (['"""item_images"""'], {}), "('item_images')\n", (1909, 1924), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2053, 2067), 'django.utils.translation.ugettext_lazy', '_', (['"""map_image"""'], {}), "('map_image')\n", (2054, 2067), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2158, 2169), 'django.utils.translation.ugettext_lazy', '_', (['"""status"""'], {}), "('status')\n", (2159, 2169), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2283, 2301), 'django.utils.translation.ugettext_lazy', '_', (['"""creation_date"""'], {}), "('creation_date')\n", (2284, 2301), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2375, 2384), 'django.utils.translation.ugettext_lazy', '_', (['"""size"""'], {}), "('size')\n", (2376, 2384), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2494, 2506), 'django.utils.translation.ugettext_lazy', '_', (['"""shipper"""'], {}), "('shipper')\n", (2495, 2506), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2585, 2596), 'django.utils.translation.ugettext_lazy', '_', (['"""pickup"""'], {}), "('pickup')\n", (2586, 2596), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2644, 2656), 'django.utils.translation.ugettext_lazy', '_', (['"""dropoff"""'], {}), "('dropoff')\n", (2645, 2656), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2707, 2722), 'django.utils.translation.ugettext_lazy', '_', (['"""isaccepted"""'], {}), "('isaccepted')\n", (2708, 2722), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2776, 2791), 'django.utils.translation.ugettext_lazy', '_', (['"""isnotified"""'], {}), "('isnotified')\n", (2777, 2791), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2862, 2885), 'django.utils.translation.ugettext_lazy', '_', (['"""is_questr_reviewed"""'], {}), "('is_questr_reviewed')\n", (2863, 2885), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2970, 2994), 'django.utils.translation.ugettext_lazy', '_', (['"""is_shipper_reviewed"""'], {}), "('is_shipper_reviewed')\n", (2971, 2994), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3062, 3078), 'django.utils.translation.ugettext_lazy', '_', (['"""is_complete"""'], {}), "('is_complete')\n", (3063, 3078), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3130, 3143), 'django.utils.translation.ugettext_lazy', '_', (['"""ishidden"""'], {}), "('ishidden')\n", (3131, 3143), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3204, 3217), 'django.utils.translation.ugettext_lazy', '_', (['"""distance"""'], {}), "('distance')\n", (3205, 3217), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3344, 3362), 'django.utils.translation.ugettext_lazy', '_', (['"""delivery_date"""'], {}), "('delivery_date')\n", (3345, 3362), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3462, 3485), 'django.utils.translation.ugettext_lazy', '_', (['"""available_couriers"""'], {}), "('available_couriers')\n", (3463, 3485), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3549, 3567), 'django.utils.translation.ugettext_lazy', '_', (['"""delivery_code"""'], {}), "('delivery_code')\n", (3550, 3567), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3620, 3640), 'django.utils.translation.ugettext_lazy', '_', (['"""tracking_number"""'], {}), "('tracking_number')\n", (3621, 3640), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3702, 3718), 'django.utils.translation.ugettext_lazy', '_', (['"""pickup_time"""'], {}), "('pickup_time')\n", (3703, 3718), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3830, 3854), 'django.utils.translation.ugettext_lazy', '_', (['"""considered_couriers"""'], {}), "('considered_couriers')\n", (3831, 3854), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4988, 4997), 'django.utils.translation.ugettext_lazy', '_', (['"""time"""'], {}), "('time')\n", (4989, 4997), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5030, 5042), 'django.utils.translation.ugettext_lazy', '_', (['"""comment"""'], {}), "('comment')\n", (5031, 5042), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5171, 5186), 'django.utils.translation.ugettext_lazy', '_', (['"""quest_code"""'], {}), "('quest_code')\n", (5172, 5186), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5348, 5369), 'django.utils.translation.ugettext_lazy', '_', (['"""transaction_type"""'], {}), "('transaction_type')\n", (5349, 5369), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5415, 5426), 'django.utils.translation.ugettext_lazy', '_', (['"""status"""'], {}), "('status')\n", (5416, 5426), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6314, 6321), 'django.utils.translation.ugettext_lazy', '_', (['"""id"""'], {}), "('id')\n", (6315, 6321), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6393, 6409), 'django.utils.translation.ugettext_lazy', '_', (['"""create_date"""'], {}), "('create_date')\n", (6394, 6409), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7054, 7064), 'django.utils.translation.ugettext_lazy', '_', (['"""event"""'], {}), "('event')\n", (7055, 7064), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7138, 7153), 'django.utils.translation.ugettext_lazy', '_', (['"""updated_on"""'], {}), "('updated_on')\n", (7139, 7153), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7235, 7249), 'django.utils.translation.ugettext_lazy', '_', (['"""extrainfo"""'], {}), "('extrainfo')\n", (7236, 7249), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4656, 4686), 'logging.warn', 'logging.warn', (['"""no pickup time"""'], {}), "('no pickup time')\n", (4668, 4686), False, 'import logging\n'), ((6478, 6492), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (6490, 6492), False, 'from django.utils import timezone\n'), ((6814, 6828), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (6826, 6828), False, 'from django.utils import timezone\n'), ((961, 994), 'pytz.timezone', 'pytz.timezone', (['settings.TIME_ZONE'], {}), '(settings.TIME_ZONE)\n', (974, 994), False, 'import pytz\n'), ((1332, 1344), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1342, 1344), False, 'import uuid\n'), ((922, 936), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (934, 936), False, 'from django.utils import timezone\n'), ((1288, 1302), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1300, 1302), False, 'from django.utils import timezone\n'), ((1310, 1324), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1322, 1324), False, 'from django.utils import timezone\n'), ((4055, 4067), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4065, 4067), False, 'import uuid\n'), ((4270, 4282), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4280, 4282), False, 'import uuid\n'), ((5520, 5534), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (5532, 5534), False, 'from django.utils import timezone\n'), ((4011, 4025), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (4023, 4025), False, 'from django.utils import timezone\n'), ((4033, 4047), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (4045, 4047), False, 'from django.utils import timezone\n'), ((4226, 4240), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (4238, 4240), False, 'from django.utils import timezone\n'), ((4248, 4262), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (4260, 4262), False, 'from django.utils import timezone\n')] |
from banco import con
from time import sleep
import os
# Validação de Valor Inteiro
def leiaint(valor):
while True:
try:
ent = int(input(valor))
except:
print('\033[1;33mDigite um valor inteiro\033[m')
else:
break
return ent
# Validação de String
def leiaTexto(txt):
while True:
try:
ent = str(input(txt))
except:
if ent.isnumeric():
print('\033[1;33mDigite um texto válido\033[m')
else:
break
return ent
# Cabecalho
def cabecalho(msg):
print('-'*40)
print(msg.center(40).upper())
print('-'*40)
# Menu Principal
def menuprincipal():
print('''
[1] - Inserir Contato
[2] - Listar Contatos
[3] - Consultar Contato
[4] - Editar Contato
[5] - Excluir
[6] - Sair
''')
# Inserir Contato
def insertContato():
cabecalho('NOVO CONTATO')
try:
while True:
regs = leiaTexto('Nº REGISTRO: ').strip()
if len(regs) < 5 or len(regs) > 5:
print('\033[1;33mPor favor, insira um registro válido\033[m')
else:
break
while True:
nome = leiaTexto('NOME: ').strip().title()
if len(nome) == 0 or nome.isnumeric():
print('\033[1;33mPreencha o campo\033[m')
else:
break
while True:
matr = leiaTexto('CHAPA: ').strip().upper()
if len(matr) <= 4 or len(matr) > 5:
print('\033[1;33mPor favor, insira uma matricula válida\033[m')
else:
break
while True:
func = leiaTexto('FUNÇÃO: ').strip().title()
if len(func) == 0 or func.isnumeric():
print('\033[1;33mPreencha o campo\033[m')
else:
break
while True:
period = leiaint('PERÍODO: ')
if period < 1 or period > 2:
print('\033[1;33mPor favor, insira um período corretamente\033[m')
else:
break
while True:
tel = leiaTexto('TELEFONE 1: ').strip()
if len(tel) < 11 or len(tel) > 14:
print('\033[1;33mPor favor, Insira um telefone válido\033[m')
else:
break
while True:
tel_2 = leiaTexto('TELEFONE 2: ').strip()
if len(tel_2) > 14:
print('\033[1;33mTelefone Inválido\033[m')
else:
break
except:
print('\033[1;31mErro na Inserção de dados\033[m')
else:
try:
c = con.cursor()
except ConnectionError:
print('\033[1;31mErro na conexão com o banco de dados\033[m')
else:
try:
ssql = 'SELECT * FROM contato WHERE registro= "'+regs+'"'
c.execute(ssql)
inserir = c.fetchall()
except:
print('\033[1;33mErro na conferência\033[m')
else:
if inserir:
print('\033[1;33mCONTATO JÁ EXISTE\033[m')
else:
try:
sql = 'INSERT INTO contato(registro, nome, matricula, funcao, periodo, telefone, telefone_2) SELECT "'+regs+'", "'+nome+'", "'+matr+'", "'+func+'", "'+str(period)+'", "'+tel+'", "'+tel_2+'" WHERE NOT EXISTS (SELECT 1 FROM contato WHERE registro = "'+regs+'")'
c.execute(sql)
except:
print(f'Erro ao inserir contato')
else:
print('\033[1;32mCONTATO INSERIDO COM SUCESSO!\033[m')
con.commit()
# Listar Contatos
def listarContatos():
cabecalho('LISTAR CONTATOS')
try:
c = con.cursor()
except ConnectionError:
print('\033[1;31mErro na conexão com o banco de dados\033[m')
else:
try:
lsql = 'SELECT * FROM contato ORDER BY registro asc'
c.execute(lsql)
except:
print('\033[1;33mErro ao listar contatos\033[m')
else:
dados = c.fetchall()
contador = 0
limite = 30
for d in dados:
print(f'\033[1;36mNº REGISTRO:\033[m{d[1]} \033[1;36mNOME:\033[m{d[2]:<32} \033[1;36mCHAPA:\033[m{d[3]} \033[1;36mFUNÇÃO:\033[m{d[4]:<10} \033[1;36mPERÍODO:\033[m{d[5]} \033[1;36mTELEFONE:\033[m{d[6]} \033[1;36mTELEFONE 2:\033[m{d[7]}')
print()
contador += 1
if contador > limite:
contador = 0
os.system('pause')
os.system('cls')
con.commit()
while True:
v = leiaint('PRESSIONE 8 PARA VOLTAR AO MENU: ')
if v < 8 or v > 8 :
print('\033[1;33mpressione a tecla correta\033[m')
else:
break
os.system('cls')
# Consultar Contato
def consContato():
cabecalho('CONSULTAR CONTATO')
try:
while True:
regs = leiaTexto('Nº REGISTRO: ').strip()
if len(regs) < 5 or len(regs) > 5:
print('\033[1;33mPor favor, insira um registro válido\033[m')
else:
break
except:
print('\033[1;31mErro na consulta do contato\033[m')
else:
try:
c = con.cursor()
except ConnectionError:
print('\033[1;31mErro na conexão com o banco de dados\033[m')
else:
try:
csql = 'SELECT * FROM contato WHERE registro = "'+regs+'"'
c.execute(csql)
mostra = c.fetchall()
except:
print('\033[1;33mErro ao Consultar Contato\033[m')
else:
if mostra:
for m in mostra:
print(f'\033[1;36mNº REGISTRO:\033[m{m[1]} \033[1;36mNOME:\033[m{m[2]} \033[1;36mCHAPA:\033[m{m[3]} \033[1;36mFUNÇÃO:\033[m{m[4]:^<8} \033[1;36mPERÍODO:\033[m{m[5]} \033[1;36mTELEFONE:\033[m{m[6]} \033[1;36mTELEFONE 2:\033[m{m[7]}')
else:
print('\033[1;33mESSE CONTATO NÃO ESTÁ CADASTRADO\033[m')
con.commit()
# Editar Contato
def editContato():
cabecalho('EDITAR CONTATO')
try:
while True:
regs = leiaTexto('Nº REGISTRO: ').strip()
if len(regs) < 5 or len(regs) > 5:
print('\033[1;33mPor favor, digite um registro válido\033[m')
else:
break
except:
print('\033[1;33mErro no contato\033[m')
else:
try:
c = con.cursor()
except:
print('\033[1;31mErro na Conexão com Banco de Dados\033[m')
else:
try:
sql = 'SELECT * FROM contato WHERE registro = "'+regs+'"'
c.execute(sql)
mostra = c.fetchall()
except:
print('\033[1;33mErro na busca do contato\033[m')
else:
if mostra:
while True:
period = leiaint('PERÍODO: ')
if period < 1 or period > 2:
print('\033[1;33mPor favor, insira um período corretamente\033[m')
else:
break
while True:
tel = leiaTexto('TELEFONE 1: ').strip()
if len(tel) < 11 or len(tel) > 14:
print('\033[1;33mPor favor, Insira um telefone válido\033[m')
else:
break
while True:
tel_2 = leiaTexto('TELEFONE 2: ').strip()
if len(tel_2) > 14:
print('\033[1;33mTelefone Inválido\033[m')
else:
break
esql = 'UPDATE contato SET periodo="'+str(period)+'", telefone="'+tel+'", telefone_2="'+tel_2+'" WHERE registro= "'+regs+'"'
c.execute(esql)
con.commit()
print('\033[1;32mCONTATO ALTERADO COM SUCESSO!\033[m')
sleep(1)
else:
print('\033[1;33mCONTATO NÃO ESTÁ CADASTRADO\033[m')
# Deletar Contato
def apagaContato():
cabecalho('APAGAR CONTATO')
try:
while True:
regs = leiaTexto('Nº Registro que deseja apagar o contato: ').strip()
if len(regs) < 5 or len(regs) > 5:
print('\033[1;33mPor favor, digite um registro válido\033[m')
else:
break
except:
print('\033[1;33mErro na busca do contato\033[m')
else:
try:
c = con.cursor()
except ConnectionError:
print('\033[1;31mErro na conexão com o banco de dados\033[m')
else:
try:
sql = 'SELECT * FROM contato WHERE registro = "'+regs+'"'
c.execute(sql)
mostra = c.fetchall()
except:
print('\033[1;33mErro na busca do contato\033[m')
else:
while True:
resp = leiaTexto('Tem certeza que deseja apagar o registro [S/N] ?: ').strip().upper()[0]
if resp not in 'SN':
print('Responda')
else:
break
if resp in 'S':
if mostra:
try:
dsql = 'DELETE FROM contato WHERE registro = "'+regs+'"'
c.execute(dsql)
except:
print('\033[1;33mErro ao deletar contato\033[m')
else:
print('\033[1;32mCONTATO DELETADO COM SUCESSO!\033[m')
con.commit()
else:
print('\033[1;33mCONTATO NÃO ESTÁ CADASTRADO\033[m')
else:
print('nada deletado')
| [
"banco.con.commit",
"os.system",
"time.sleep",
"banco.con.cursor"
] | [((3905, 3917), 'banco.con.cursor', 'con.cursor', ([], {}), '()\n', (3915, 3917), False, 'from banco import con\n'), ((2721, 2733), 'banco.con.cursor', 'con.cursor', ([], {}), '()\n', (2731, 2733), False, 'from banco import con\n'), ((4799, 4811), 'banco.con.commit', 'con.commit', ([], {}), '()\n', (4809, 4811), False, 'from banco import con\n'), ((5068, 5084), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (5077, 5084), False, 'import os\n'), ((5521, 5533), 'banco.con.cursor', 'con.cursor', ([], {}), '()\n', (5531, 5533), False, 'from banco import con\n'), ((6791, 6803), 'banco.con.cursor', 'con.cursor', ([], {}), '()\n', (6801, 6803), False, 'from banco import con\n'), ((8950, 8962), 'banco.con.cursor', 'con.cursor', ([], {}), '()\n', (8960, 8962), False, 'from banco import con\n'), ((6361, 6373), 'banco.con.commit', 'con.commit', ([], {}), '()\n', (6371, 6373), False, 'from banco import con\n'), ((4731, 4749), 'os.system', 'os.system', (['"""pause"""'], {}), "('pause')\n", (4740, 4749), False, 'import os\n'), ((4770, 4786), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (4779, 4786), False, 'import os\n'), ((8282, 8294), 'banco.con.commit', 'con.commit', ([], {}), '()\n', (8292, 8294), False, 'from banco import con\n'), ((8390, 8398), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (8395, 8398), False, 'from time import sleep\n'), ((3796, 3808), 'banco.con.commit', 'con.commit', ([], {}), '()\n', (3806, 3808), False, 'from banco import con\n'), ((10111, 10123), 'banco.con.commit', 'con.commit', ([], {}), '()\n', (10121, 10123), False, 'from banco import con\n')] |
import os
import os.path
import subprocess
import sys
if __name__ == "__main__":
dirname = sys.argv[1]
for x in os.listdir(dirname):
if x.endswith('.crt'):
try:
filename = os.path.join(dirname, x)
filehash = subprocess.check_output(['openssl', 'x509', '-noout', '-hash', '-in', filename]).strip()
filehash += '.0'
hash_filename = os.path.join(dirname, filehash)
if os.path.exists(hash_filename):
print(x, filehash)
os.remove(hash_filename)
os.symlink(x, hash_filename)
except:
print("error in handling file:", filename)
| [
"subprocess.check_output",
"os.path.exists",
"os.listdir",
"os.path.join",
"os.symlink",
"os.remove"
] | [((121, 140), 'os.listdir', 'os.listdir', (['dirname'], {}), '(dirname)\n', (131, 140), False, 'import os\n'), ((217, 241), 'os.path.join', 'os.path.join', (['dirname', 'x'], {}), '(dirname, x)\n', (229, 241), False, 'import os\n'), ((423, 454), 'os.path.join', 'os.path.join', (['dirname', 'filehash'], {}), '(dirname, filehash)\n', (435, 454), False, 'import os\n'), ((474, 503), 'os.path.exists', 'os.path.exists', (['hash_filename'], {}), '(hash_filename)\n', (488, 503), False, 'import os\n'), ((605, 633), 'os.symlink', 'os.symlink', (['x', 'hash_filename'], {}), '(x, hash_filename)\n', (615, 633), False, 'import os\n'), ((564, 588), 'os.remove', 'os.remove', (['hash_filename'], {}), '(hash_filename)\n', (573, 588), False, 'import os\n'), ((269, 354), 'subprocess.check_output', 'subprocess.check_output', (["['openssl', 'x509', '-noout', '-hash', '-in', filename]"], {}), "(['openssl', 'x509', '-noout', '-hash', '-in', filename]\n )\n", (292, 354), False, 'import subprocess\n')] |
# Copyright 2022 AI Singapore
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test for augment contrast node
"""
import numpy as np
import pytest
from peekingduck.pipeline.nodes.augment.contrast import Node
@pytest.fixture
def contrast_same():
node = Node({"input": ["img"], "output": ["img"], "alpha": 1.0})
return node
@pytest.fixture
def contrast_increase():
node = Node({"input": ["img"], "output": ["img"], "alpha": 2.0})
return node
class TestContrast:
def test_no_change(self, contrast_same, create_image):
original_img = create_image((28, 28, 3))
input1 = {"img": original_img}
results = contrast_same.run(input1)
np.testing.assert_equal(original_img, results["img"])
def test_increase_contrast(self, contrast_increase):
original_img = np.ones(shape=(28, 28, 3), dtype=np.uint8)
input1 = {"img": original_img}
results = contrast_increase.run(input1)
assert original_img.shape == results["img"].shape
with pytest.raises(AssertionError):
np.testing.assert_equal(original_img, results["img"])
np.testing.assert_equal(results["img"][0][0], original_img[0][0] * 2)
def test_overflow(self, contrast_increase):
# Test positive overflow - any values that sum up to higher than 255 will
# be clipped at 255
bright_img = np.ones(shape=(28, 28, 3), dtype=np.uint8) * 250
bright_input = {"img": bright_img}
results = contrast_increase.run(bright_input)
np.testing.assert_equal(results["img"][0][0], np.array([255, 255, 255]))
def test_beta_range(self):
with pytest.raises(ValueError) as excinfo:
Node({"input": ["img"], "output": ["img"], "alpha": -0.5})
assert str(excinfo.value) == "alpha must be between [0.0, 3.0]"
with pytest.raises(ValueError) as excinfo:
Node({"input": ["img"], "output": ["img"], "alpha": 3.1})
assert str(excinfo.value) == "alpha must be between [0.0, 3.0]"
| [
"numpy.ones",
"numpy.testing.assert_equal",
"numpy.array",
"pytest.raises",
"peekingduck.pipeline.nodes.augment.contrast.Node"
] | [((763, 820), 'peekingduck.pipeline.nodes.augment.contrast.Node', 'Node', (["{'input': ['img'], 'output': ['img'], 'alpha': 1.0}"], {}), "({'input': ['img'], 'output': ['img'], 'alpha': 1.0})\n", (767, 820), False, 'from peekingduck.pipeline.nodes.augment.contrast import Node\n'), ((891, 948), 'peekingduck.pipeline.nodes.augment.contrast.Node', 'Node', (["{'input': ['img'], 'output': ['img'], 'alpha': 2.0}"], {}), "({'input': ['img'], 'output': ['img'], 'alpha': 2.0})\n", (895, 948), False, 'from peekingduck.pipeline.nodes.augment.contrast import Node\n'), ((1186, 1239), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['original_img', "results['img']"], {}), "(original_img, results['img'])\n", (1209, 1239), True, 'import numpy as np\n'), ((1321, 1363), 'numpy.ones', 'np.ones', ([], {'shape': '(28, 28, 3)', 'dtype': 'np.uint8'}), '(shape=(28, 28, 3), dtype=np.uint8)\n', (1328, 1363), True, 'import numpy as np\n'), ((1628, 1697), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["results['img'][0][0]", '(original_img[0][0] * 2)'], {}), "(results['img'][0][0], original_img[0][0] * 2)\n", (1651, 1697), True, 'import numpy as np\n'), ((1523, 1552), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1536, 1552), False, 'import pytest\n'), ((1566, 1619), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['original_img', "results['img']"], {}), "(original_img, results['img'])\n", (1589, 1619), True, 'import numpy as np\n'), ((1878, 1920), 'numpy.ones', 'np.ones', ([], {'shape': '(28, 28, 3)', 'dtype': 'np.uint8'}), '(shape=(28, 28, 3), dtype=np.uint8)\n', (1885, 1920), True, 'import numpy as np\n'), ((2078, 2103), 'numpy.array', 'np.array', (['[255, 255, 255]'], {}), '([255, 255, 255])\n', (2086, 2103), True, 'import numpy as np\n'), ((2150, 2175), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2163, 2175), False, 'import pytest\n'), ((2200, 2258), 'peekingduck.pipeline.nodes.augment.contrast.Node', 'Node', (["{'input': ['img'], 'output': ['img'], 'alpha': -0.5}"], {}), "({'input': ['img'], 'output': ['img'], 'alpha': -0.5})\n", (2204, 2258), False, 'from peekingduck.pipeline.nodes.augment.contrast import Node\n'), ((2345, 2370), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2358, 2370), False, 'import pytest\n'), ((2395, 2452), 'peekingduck.pipeline.nodes.augment.contrast.Node', 'Node', (["{'input': ['img'], 'output': ['img'], 'alpha': 3.1}"], {}), "({'input': ['img'], 'output': ['img'], 'alpha': 3.1})\n", (2399, 2452), False, 'from peekingduck.pipeline.nodes.augment.contrast import Node\n')] |
# coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main training protocol used for unsupervised disentanglement models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from disentanglement_lib.data.ground_truth import named_data
from disentanglement_lib.data.ground_truth import util
from disentanglement_lib.data.ground_truth.ground_truth_data import *
from disentanglement_lib.methods.shared import losses
from disentanglement_lib.methods.unsupervised import gaussian_encoder_model
from disentanglement_lib.methods.unsupervised import model # pylint: disable=unused-import
from disentanglement_lib.methods.unsupervised.gaussian_encoder_model import GaussianModel
from disentanglement_lib.methods.unsupervised.model import gaussian_log_density
from disentanglement_lib.utils import results
from disentanglement_lib.evaluation.metrics import mig
import numpy as np
from argparse import ArgumentParser
import pytorch_lightning as pl
import torch
from torch import nn as nn
from torch.nn import functional as F
from torch.utils.data import Dataset, DataLoader
import gin
import pathlib, shutil
import wandb
from disentanglement_lib.utils.hub import convert_model
from disentanglement_lib.utils.mi_estimators import estimate_entropies
from disentanglement_lib.visualize.visualize_util import plt_sample_traversal
@gin.configurable("train", denylist=[])
class Train(pl.LightningModule):
"""Trains the estimator and exports the snapshot and the gin config.
The use of this function requires the gin binding 'dataset.name' to be
specified as that determines the data set used for training.
Args:
model: GaussianEncoderModel that should be trained and exported.
training_steps: Integer with number of training steps.
random_seed: Integer with random seed used for training.
batch_size: Integer with the batch size.
name: Optional string with name of the model (can be used to name models).
model_num: Optional integer with model number (can be used to identify
models).
"""
def __init__(self,
model=gin.REQUIRED,
training_steps=gin.REQUIRED,
random_seed=gin.REQUIRED,
batch_size=gin.REQUIRED,
opt_name=torch.optim.Adam,
lr=5e-4,
eval_numbers=10,
name="",
model_num=None):
super().__init__()
self.training_steps = training_steps
self.random_seed = random_seed
self.batch_size = batch_size
self.lr = lr
self.name = name
self.model_num = model_num
self.eval_numbers = eval_numbers
wandb.config['dataset'] = gin.query_parameter('dataset.name')
self.save_hyperparameters()
self.opt_name = opt_name
self.data = named_data.get_named_ground_truth_data()
img_shape = np.array(self.data.observation_shape)[[2, 0, 1]].tolist()
# img_shape = [1,64,64]
self.ae = model(img_shape)
def training_step(self, batch, batch_idx):
if (self.global_step + 1) % (self.training_steps // self.eval_numbers) == 0:
self.evaluate()
x = batch
loss, summary = self.ae.model_fn(x.float(), None)
self.log_dict(summary)
return loss
def evaluate(self) -> None:
model = self.ae
model.cpu()
model.eval()
dic_log = {}
dic_log.update(self.visualize_model(model))
wandb.log(dic_log)
model.cuda()
model.train()
def visualize_model(self, model) -> dict:
_encoder, _decoder = convert_model(model)
num_latent = self.ae.num_latent
mu = torch.zeros(1, num_latent)
fig = plt_sample_traversal(mu, _decoder, 8, range(num_latent), 2)
return {'traversal': wandb.Image(fig)}
def train_dataloader(self) -> DataLoader:
dl = DataLoader(self.data,
batch_size=self.batch_size,
num_workers=4,
shuffle=True,
pin_memory=True)
return dl
def configure_optimizers(self):
optimizer = self.opt_name(self.parameters(), lr=self.lr)
return optimizer
def save_model(self, file):
dir = '/tmp/models/' + str(np.random.randint(99999))
file_path = os.path.join(dir, file)
pathlib.Path(dir).mkdir(parents=True, exist_ok=True)
torch.save(self.ae.state_dict(), file_path)
wandb.save(file_path, base_path=dir)
| [
"wandb.log",
"disentanglement_lib.methods.unsupervised.model.train",
"disentanglement_lib.methods.unsupervised.model.cuda",
"wandb.save",
"wandb.Image",
"disentanglement_lib.data.ground_truth.named_data.get_named_ground_truth_data",
"disentanglement_lib.methods.unsupervised.model.cpu",
"gin.query_para... | [((1989, 2027), 'gin.configurable', 'gin.configurable', (['"""train"""'], {'denylist': '[]'}), "('train', denylist=[])\n", (2005, 2027), False, 'import gin\n'), ((3411, 3446), 'gin.query_parameter', 'gin.query_parameter', (['"""dataset.name"""'], {}), "('dataset.name')\n", (3430, 3446), False, 'import gin\n'), ((3536, 3576), 'disentanglement_lib.data.ground_truth.named_data.get_named_ground_truth_data', 'named_data.get_named_ground_truth_data', ([], {}), '()\n', (3574, 3576), False, 'from disentanglement_lib.data.ground_truth import named_data\n'), ((3705, 3721), 'disentanglement_lib.methods.unsupervised.model', 'model', (['img_shape'], {}), '(img_shape)\n', (3710, 3721), False, 'from disentanglement_lib.methods.unsupervised import model\n'), ((4075, 4086), 'disentanglement_lib.methods.unsupervised.model.cpu', 'model.cpu', ([], {}), '()\n', (4084, 4086), False, 'from disentanglement_lib.methods.unsupervised import model\n'), ((4095, 4107), 'disentanglement_lib.methods.unsupervised.model.eval', 'model.eval', ([], {}), '()\n', (4105, 4107), False, 'from disentanglement_lib.methods.unsupervised import model\n'), ((4189, 4207), 'wandb.log', 'wandb.log', (['dic_log'], {}), '(dic_log)\n', (4198, 4207), False, 'import wandb\n'), ((4216, 4228), 'disentanglement_lib.methods.unsupervised.model.cuda', 'model.cuda', ([], {}), '()\n', (4226, 4228), False, 'from disentanglement_lib.methods.unsupervised import model\n'), ((4237, 4250), 'disentanglement_lib.methods.unsupervised.model.train', 'model.train', ([], {}), '()\n', (4248, 4250), False, 'from disentanglement_lib.methods.unsupervised import model\n'), ((4327, 4347), 'disentanglement_lib.utils.hub.convert_model', 'convert_model', (['model'], {}), '(model)\n', (4340, 4347), False, 'from disentanglement_lib.utils.hub import convert_model\n'), ((4401, 4427), 'torch.zeros', 'torch.zeros', (['(1)', 'num_latent'], {}), '(1, num_latent)\n', (4412, 4427), False, 'import torch\n'), ((4609, 4709), 'torch.utils.data.DataLoader', 'DataLoader', (['self.data'], {'batch_size': 'self.batch_size', 'num_workers': '(4)', 'shuffle': '(True)', 'pin_memory': '(True)'}), '(self.data, batch_size=self.batch_size, num_workers=4, shuffle=\n True, pin_memory=True)\n', (4619, 4709), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((5060, 5083), 'os.path.join', 'os.path.join', (['dir', 'file'], {}), '(dir, file)\n', (5072, 5083), False, 'import os\n'), ((5205, 5241), 'wandb.save', 'wandb.save', (['file_path'], {'base_path': 'dir'}), '(file_path, base_path=dir)\n', (5215, 5241), False, 'import wandb\n'), ((4531, 4547), 'wandb.Image', 'wandb.Image', (['fig'], {}), '(fig)\n', (4542, 4547), False, 'import wandb\n'), ((5014, 5038), 'numpy.random.randint', 'np.random.randint', (['(99999)'], {}), '(99999)\n', (5031, 5038), True, 'import numpy as np\n'), ((5092, 5109), 'pathlib.Path', 'pathlib.Path', (['dir'], {}), '(dir)\n', (5104, 5109), False, 'import pathlib, shutil\n'), ((3597, 3634), 'numpy.array', 'np.array', (['self.data.observation_shape'], {}), '(self.data.observation_shape)\n', (3605, 3634), True, 'import numpy as np\n')] |
from django.db import models
from django.db.models import Model, TextField, DateTimeField, ForeignKey, CASCADE
from accounts.models import User
from rooms.models import Room
# Create your models here.
class MessageModel(Model):
"""
This class represents a chat message. It has a owner (user), timestamp and
the message body.
"""
user = ForeignKey(User, on_delete=CASCADE, verbose_name='user',
related_name='from_user', db_index=True)
room = ForeignKey(Room, on_delete=CASCADE, verbose_name='room',
related_name='to_room', db_index=True)
timestamp = DateTimeField('timestamp', auto_now_add=True, editable=False,
db_index=True)
body = TextField('body')
| [
"django.db.models.DateTimeField",
"django.db.models.TextField",
"django.db.models.ForeignKey"
] | [((357, 459), 'django.db.models.ForeignKey', 'ForeignKey', (['User'], {'on_delete': 'CASCADE', 'verbose_name': '"""user"""', 'related_name': '"""from_user"""', 'db_index': '(True)'}), "(User, on_delete=CASCADE, verbose_name='user', related_name=\n 'from_user', db_index=True)\n", (367, 459), False, 'from django.db.models import Model, TextField, DateTimeField, ForeignKey, CASCADE\n'), ((488, 588), 'django.db.models.ForeignKey', 'ForeignKey', (['Room'], {'on_delete': 'CASCADE', 'verbose_name': '"""room"""', 'related_name': '"""to_room"""', 'db_index': '(True)'}), "(Room, on_delete=CASCADE, verbose_name='room', related_name=\n 'to_room', db_index=True)\n", (498, 588), False, 'from django.db.models import Model, TextField, DateTimeField, ForeignKey, CASCADE\n'), ((627, 703), 'django.db.models.DateTimeField', 'DateTimeField', (['"""timestamp"""'], {'auto_now_add': '(True)', 'editable': '(False)', 'db_index': '(True)'}), "('timestamp', auto_now_add=True, editable=False, db_index=True)\n", (640, 703), False, 'from django.db.models import Model, TextField, DateTimeField, ForeignKey, CASCADE\n'), ((745, 762), 'django.db.models.TextField', 'TextField', (['"""body"""'], {}), "('body')\n", (754, 762), False, 'from django.db.models import Model, TextField, DateTimeField, ForeignKey, CASCADE\n')] |
# Copyright © 2021 Ingram Micro Inc. All rights reserved.
from dj_cqrs.constants import SignalType
from dj_cqrs.controller.consumer import route_signal_to_replica_model
from dj_cqrs.mixins import ReplicaMixin
import pytest
def test_bad_model(caplog):
route_signal_to_replica_model(SignalType.SAVE, 'invalid', {})
assert 'No model with such CQRS_ID: invalid.' in caplog.text
@pytest.mark.django_db
def test_bad_signal(caplog):
route_signal_to_replica_model('invalid', 'basic', {})
assert 'Bad signal type "invalid" for CQRS_ID "basic".' in caplog.text
@pytest.mark.django_db
def test_save_model(mocker):
cqrs_save_mock = mocker.patch.object(ReplicaMixin, 'cqrs_save')
route_signal_to_replica_model(SignalType.SAVE, 'basic', {}, {})
cqrs_save_mock.assert_called_once_with({}, previous_data={})
@pytest.mark.django_db
def test_delete_model(mocker):
cqrs_delete_mock = mocker.patch.object(ReplicaMixin, 'cqrs_delete')
route_signal_to_replica_model(SignalType.DELETE, 'basic', {'id': 1})
cqrs_delete_mock.assert_called_once_with({'id': 1})
| [
"dj_cqrs.controller.consumer.route_signal_to_replica_model"
] | [((260, 321), 'dj_cqrs.controller.consumer.route_signal_to_replica_model', 'route_signal_to_replica_model', (['SignalType.SAVE', '"""invalid"""', '{}'], {}), "(SignalType.SAVE, 'invalid', {})\n", (289, 321), False, 'from dj_cqrs.controller.consumer import route_signal_to_replica_model\n'), ((445, 498), 'dj_cqrs.controller.consumer.route_signal_to_replica_model', 'route_signal_to_replica_model', (['"""invalid"""', '"""basic"""', '{}'], {}), "('invalid', 'basic', {})\n", (474, 498), False, 'from dj_cqrs.controller.consumer import route_signal_to_replica_model\n'), ((700, 763), 'dj_cqrs.controller.consumer.route_signal_to_replica_model', 'route_signal_to_replica_model', (['SignalType.SAVE', '"""basic"""', '{}', '{}'], {}), "(SignalType.SAVE, 'basic', {}, {})\n", (729, 763), False, 'from dj_cqrs.controller.consumer import route_signal_to_replica_model\n'), ((962, 1030), 'dj_cqrs.controller.consumer.route_signal_to_replica_model', 'route_signal_to_replica_model', (['SignalType.DELETE', '"""basic"""', "{'id': 1}"], {}), "(SignalType.DELETE, 'basic', {'id': 1})\n", (991, 1030), False, 'from dj_cqrs.controller.consumer import route_signal_to_replica_model\n')] |
"""Models and utilities for processing SMIRNOFF data."""
import abc
import copy
import functools
from collections import defaultdict
from typing import (
TYPE_CHECKING,
Any,
DefaultDict,
Dict,
List,
Tuple,
Type,
TypeVar,
Union,
)
import numpy as np
from openff.toolkit.topology import Molecule
from openff.toolkit.typing.engines.smirnoff.parameters import (
AngleHandler,
BondHandler,
ChargeIncrementModelHandler,
ConstraintHandler,
ElectrostaticsHandler,
ImproperTorsionHandler,
LibraryChargeHandler,
ParameterHandler,
ProperTorsionHandler,
ToolkitAM1BCCHandler,
UnassignedProperTorsionParameterException,
UnassignedValenceParameterException,
VirtualSiteHandler,
vdWHandler,
)
from openff.units import unit
from openff.units.openmm import from_openmm
from openmm import unit as omm_unit
from pydantic import Field
from typing_extensions import Literal
from openff.interchange.components.potentials import (
Potential,
PotentialHandler,
WrappedPotential,
)
from openff.interchange.exceptions import (
InvalidParameterHandlerError,
MissingParametersError,
SMIRNOFFParameterAttributeNotImplementedError,
)
from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey
from openff.interchange.types import FloatQuantity
kcal_mol = omm_unit.kilocalorie_per_mole
kcal_mol_angstroms = kcal_mol / omm_unit.angstrom ** 2
kcal_mol_radians = kcal_mol / omm_unit.radian ** 2
if TYPE_CHECKING:
from openff.toolkit.topology import Topology
from openff.interchange.components.mdtraj import _OFFBioTop
ElectrostaticsHandlerType = Union[
ElectrostaticsHandler,
ChargeIncrementModelHandler,
LibraryChargeHandler,
ToolkitAM1BCCHandler,
]
T = TypeVar("T", bound="SMIRNOFFPotentialHandler")
TP = TypeVar("TP", bound="PotentialHandler")
class SMIRNOFFPotentialHandler(PotentialHandler, abc.ABC):
"""Base class for handlers storing potentials produced by SMIRNOFF force fields."""
@classmethod
@abc.abstractmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
raise NotImplementedError()
@classmethod
@abc.abstractmethod
def supported_parameters(cls):
"""Return a list of parameter attributes supported by this handler."""
raise NotImplementedError()
# @classmethod
# @abc.abstractmethod
# def valence_terms(cls, topology):
# """Return an interable of all of one type of valence term in this topology."""
# raise NotImplementedError()
@classmethod
def check_supported_parameters(cls, parameter_handler: ParameterHandler):
"""Verify that a parameter handler is in an allowed list of handlers."""
for parameter in parameter_handler.parameters:
for parameter_attribute in parameter._get_defined_parameter_attributes():
if parameter_attribute not in cls.supported_parameters():
raise SMIRNOFFParameterAttributeNotImplementedError(
parameter_attribute,
)
def store_matches(
self,
parameter_handler: ParameterHandler,
topology: Union["Topology", "_OFFBioTop"],
) -> None:
"""Populate self.slot_map with key-val pairs of [TopologyKey, PotentialKey]."""
parameter_handler_name = getattr(parameter_handler, "_TAGNAME", None)
if self.slot_map:
# TODO: Should the slot_map always be reset, or should we be able to partially
# update it? Also Note the duplicated code in the child classes
self.slot_map = dict()
matches = parameter_handler.find_matches(topology)
for key, val in matches.items():
topology_key = TopologyKey(atom_indices=key)
potential_key = PotentialKey(
id=val.parameter_type.smirks, associated_handler=parameter_handler_name
)
self.slot_map[topology_key] = potential_key
if self.__class__.__name__ in ["SMIRNOFFBondHandler", "SMIRNOFFAngleHandler"]:
valence_terms = self.valence_terms(topology) # type: ignore[attr-defined]
parameter_handler._check_all_valence_terms_assigned(
assigned_terms=matches,
valence_terms=valence_terms,
exception_cls=UnassignedValenceParameterException,
)
@classmethod
def _from_toolkit(
cls: Type[T],
parameter_handler: TP,
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFPotentialHandler from toolkit data.
"""
if type(parameter_handler) not in cls.allowed_parameter_handlers():
raise InvalidParameterHandlerError(type(parameter_handler))
handler = cls()
if hasattr(handler, "fractional_bond_order_method"):
if getattr(parameter_handler, "fractional_bondorder_method", None):
handler.fractional_bond_order_method = ( # type: ignore[attr-defined]
parameter_handler.fractional_bondorder_method # type: ignore[attr-defined]
)
handler.fractional_bond_order_interpolation = ( # type: ignore[attr-defined]
parameter_handler.fractional_bondorder_interpolation # type: ignore[attr-defined]
)
handler.store_matches(parameter_handler=parameter_handler, topology=topology)
handler.store_potentials(parameter_handler=parameter_handler)
return handler
class SMIRNOFFBondHandler(SMIRNOFFPotentialHandler):
"""Handler storing bond potentials as produced by a SMIRNOFF force field."""
type: Literal["Bonds"] = "Bonds"
expression: Literal["k/2*(r-length)**2"] = "k/2*(r-length)**2"
fractional_bond_order_method: Literal["AM1-Wiberg"] = "AM1-Wiberg"
fractional_bond_order_interpolation: Literal["linear"] = "linear"
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [BondHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attribute names."""
return ["smirks", "id", "k", "length", "k_bondorder", "length_bondorder"]
@classmethod
def valence_terms(cls, topology):
"""Return all bonds in this topology."""
return [list(b.atoms) for b in topology.topology_bonds]
def store_matches(
self,
parameter_handler: ParameterHandler,
topology: Union["Topology", "_OFFBioTop"],
) -> None:
"""
Populate self.slot_map with key-val pairs of slots and unique potential identifiers.
"""
parameter_handler_name = getattr(parameter_handler, "_TAGNAME", None)
if self.slot_map:
# TODO: Should the slot_map always be reset, or should we be able to partially
# update it? Also Note the duplicated code in the child classes
self.slot_map = dict()
matches = parameter_handler.find_matches(topology)
for key, val in matches.items():
param = val.parameter_type
if param.k_bondorder or param.length_bondorder:
top_bond = topology.get_bond_between(*key)
fractional_bond_order = top_bond.bond.fractional_bond_order
if not fractional_bond_order:
raise RuntimeError(
"Bond orders should already be assigned at this point"
)
else:
fractional_bond_order = None
topology_key = TopologyKey(
atom_indices=key, bond_order=fractional_bond_order
)
potential_key = PotentialKey(
id=val.parameter_type.smirks,
associated_handler=parameter_handler_name,
bond_order=fractional_bond_order,
)
self.slot_map[topology_key] = potential_key
valence_terms = self.valence_terms(topology)
parameter_handler._check_all_valence_terms_assigned(
assigned_terms=matches,
valence_terms=valence_terms,
exception_cls=UnassignedValenceParameterException,
)
def store_potentials(self, parameter_handler: "BondHandler") -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
if self.potentials:
self.potentials = dict()
for topology_key, potential_key in self.slot_map.items():
smirks = potential_key.id
parameter = parameter_handler.get_parameter({"smirks": smirks})[0]
if topology_key.bond_order: # type: ignore[union-attr]
bond_order = topology_key.bond_order # type: ignore[union-attr]
if parameter.k_bondorder:
data = parameter.k_bondorder
else:
data = parameter.length_bondorder
coeffs = _get_interpolation_coeffs(
fractional_bond_order=bond_order,
data=data,
)
pots = []
map_keys = [*data.keys()]
for map_key in map_keys:
pots.append(
Potential(
parameters={
"k": parameter.k_bondorder[map_key],
"length": parameter.length_bondorder[map_key],
},
map_key=map_key,
)
)
potential = WrappedPotential(
{pot: coeff for pot, coeff in zip(pots, coeffs)}
)
else:
potential = Potential( # type: ignore[assignment]
parameters={
"k": parameter.k,
"length": parameter.length,
},
)
self.potentials[potential_key] = potential
@classmethod
def _from_toolkit(
cls: Type[T],
parameter_handler: "BondHandler",
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFBondHandler from toolkit data.
"""
# TODO: This method overrides SMIRNOFFPotentialHandler.from_toolkit in order to gobble up
# a ConstraintHandler. This seems like a good solution for the interdependence, but is also
# not a great practice. A better solution would involve not overriding the method with a
# different function signature.
if type(parameter_handler) not in cls.allowed_parameter_handlers():
raise InvalidParameterHandlerError
handler: T = cls(type="Bonds", expression="k/2*(r-length)**2")
if (
any(
getattr(p, "k_bondorder", None) is not None
for p in parameter_handler.parameters
)
) or (
any(
getattr(p, "length_bondorder", None) is not None
for p in parameter_handler.parameters
)
):
for ref_mol in topology.reference_molecules:
# TODO: expose conformer generation and fractional bond order assigment
# knobs to user via API
ref_mol.generate_conformers(n_conformers=1)
ref_mol.assign_fractional_bond_orders(
bond_order_model=handler.fractional_bond_order_method.lower(), # type: ignore[attr-defined]
)
handler.store_matches(parameter_handler=parameter_handler, topology=topology)
handler.store_potentials(parameter_handler=parameter_handler)
return handler
class SMIRNOFFConstraintHandler(SMIRNOFFPotentialHandler):
"""Handler storing constraint potentials as produced by a SMIRNOFF force field."""
type: Literal["Constraints"] = "Constraints"
expression: Literal[""] = ""
constraints: Dict[
PotentialKey, bool
] = dict() # should this be named potentials for consistency?
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [BondHandler, ConstraintHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attribute names."""
return ["smirks", "id", "k", "length", "distance"]
@classmethod
def _from_toolkit( # type: ignore[override]
cls: Type[T],
parameter_handler: List,
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFPotentialHandler from toolkit data.
"""
if isinstance(parameter_handler, list):
parameter_handlers = parameter_handler
else:
parameter_handlers = [parameter_handler]
for parameter_handler in parameter_handlers:
if type(parameter_handler) not in cls.allowed_parameter_handlers():
raise InvalidParameterHandlerError(type(parameter_handler))
handler = cls()
handler.store_constraints( # type: ignore[attr-defined]
parameter_handlers=parameter_handlers, topology=topology
)
return handler
def store_constraints(
self,
parameter_handlers: Any,
topology: "_OFFBioTop",
) -> None:
"""Store constraints."""
if self.slot_map:
self.slot_map = dict()
constraint_handler = [
p for p in parameter_handlers if type(p) == ConstraintHandler
][0]
constraint_matches = constraint_handler.find_matches(topology)
if any([type(p) == BondHandler for p in parameter_handlers]):
bond_handler = [p for p in parameter_handlers if type(p) == BondHandler][0]
bonds = SMIRNOFFBondHandler._from_toolkit(
parameter_handler=bond_handler,
topology=topology,
)
else:
bond_handler = None
bonds = None
for key, match in constraint_matches.items():
topology_key = TopologyKey(atom_indices=key)
smirks = match.parameter_type.smirks
distance = match.parameter_type.distance
if distance is not None:
# This constraint parameter is fully specified
potential_key = PotentialKey(
id=smirks, associated_handler="Constraints"
)
distance = match.parameter_type.distance
else:
# This constraint parameter depends on the BondHandler ...
if bond_handler is None:
raise MissingParametersError(
f"Constraint with SMIRKS pattern {smirks} found with no distance "
"specified, and no corresponding bond parameters were found. The distance "
"of this constraint is not specified."
)
# ... so use the same PotentialKey instance as the BondHandler to look up the distance
potential_key = bonds.slot_map[topology_key] # type: ignore[union-attr]
self.slot_map[topology_key] = potential_key
distance = bonds.potentials[potential_key].parameters["length"] # type: ignore[union-attr]
potential = Potential(
parameters={
"distance": distance,
}
)
self.constraints[potential_key] = potential # type: ignore[assignment]
class SMIRNOFFAngleHandler(SMIRNOFFPotentialHandler):
"""Handler storing angle potentials as produced by a SMIRNOFF force field."""
type: Literal["Angles"] = "Angles"
expression: Literal["k/2*(theta-angle)**2"] = "k/2*(theta-angle)**2"
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [AngleHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attributes."""
return ["smirks", "id", "k", "angle"]
@classmethod
def valence_terms(cls, topology):
"""Return all angles in this topology."""
return list(topology.angles)
def store_potentials(self, parameter_handler: "AngleHandler") -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
for potential_key in self.slot_map.values():
smirks = potential_key.id
# ParameterHandler.get_parameter returns a list, although this
# should only ever be length 1
parameter = parameter_handler.get_parameter({"smirks": smirks})[0]
potential = Potential(
parameters={
"k": parameter.k,
"angle": parameter.angle,
},
)
self.potentials[potential_key] = potential
@classmethod
def f_from_toolkit(
cls: Type[T],
parameter_handler: "AngleHandler",
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFAngleHandler from toolkit data.
"""
handler = cls()
handler.store_matches(parameter_handler=parameter_handler, topology=topology)
handler.store_potentials(parameter_handler=parameter_handler)
return handler
class SMIRNOFFProperTorsionHandler(SMIRNOFFPotentialHandler):
"""Handler storing proper torsions potentials as produced by a SMIRNOFF force field."""
type: Literal["ProperTorsions"] = "ProperTorsions"
expression: Literal[
"k*(1+cos(periodicity*theta-phase))"
] = "k*(1+cos(periodicity*theta-phase))"
fractional_bond_order_method: Literal["AM1-Wiberg"] = "AM1-Wiberg"
fractional_bond_order_interpolation: Literal["linear"] = "linear"
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [ProperTorsionHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attribute names."""
return ["smirks", "id", "k", "periodicity", "phase", "idivf", "k_bondorder"]
def store_matches(
self,
parameter_handler: "ProperTorsionHandler",
topology: "_OFFBioTop",
) -> None:
"""
Populate self.slot_map with key-val pairs of slots and unique potential identifiers.
"""
if self.slot_map:
self.slot_map = dict()
matches = parameter_handler.find_matches(topology)
for key, val in matches.items():
param = val.parameter_type
n_terms = len(val.parameter_type.phase)
for n in range(n_terms):
smirks = param.smirks
if param.k_bondorder:
# The relevant bond order is that of the _central_ bond in the torsion
top_bond = topology.get_bond_between(key[1], key[2])
fractional_bond_order = top_bond.bond.fractional_bond_order
if not fractional_bond_order:
raise RuntimeError(
"Bond orders should already be assigned at this point"
)
else:
fractional_bond_order = None
topology_key = TopologyKey(
atom_indices=key, mult=n, bond_order=fractional_bond_order
)
potential_key = PotentialKey(
id=smirks,
mult=n,
associated_handler="ProperTorsions",
bond_order=fractional_bond_order,
)
self.slot_map[topology_key] = potential_key
parameter_handler._check_all_valence_terms_assigned(
assigned_terms=matches,
valence_terms=list(topology.propers),
exception_cls=UnassignedProperTorsionParameterException,
)
def store_potentials(self, parameter_handler: "ProperTorsionHandler") -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
for topology_key, potential_key in self.slot_map.items():
smirks = potential_key.id
n = potential_key.mult
parameter = parameter_handler.get_parameter({"smirks": smirks})[0]
# n_terms = len(parameter.k)
if topology_key.bond_order: # type: ignore[union-attr]
bond_order = topology_key.bond_order # type: ignore[union-attr]
data = parameter.k_bondorder[n]
coeffs = _get_interpolation_coeffs(
fractional_bond_order=bond_order,
data=data,
)
pots = []
map_keys = [*data.keys()]
for map_key in map_keys:
parameters = {
"k": parameter.k_bondorder[n][map_key],
"periodicity": parameter.periodicity[n] * unit.dimensionless,
"phase": parameter.phase[n],
"idivf": parameter.idivf[n] * unit.dimensionless,
}
pots.append(
Potential(
parameters=parameters,
map_key=map_key,
)
)
potential = WrappedPotential(
{pot: coeff for pot, coeff in zip(pots, coeffs)}
)
else:
parameters = {
"k": parameter.k[n],
"periodicity": parameter.periodicity[n] * unit.dimensionless,
"phase": parameter.phase[n],
"idivf": parameter.idivf[n] * unit.dimensionless,
}
potential = Potential(parameters=parameters) # type: ignore[assignment]
self.potentials[potential_key] = potential
class SMIRNOFFImproperTorsionHandler(SMIRNOFFPotentialHandler):
"""Handler storing improper torsions potentials as produced by a SMIRNOFF force field."""
type: Literal["ImproperTorsions"] = "ImproperTorsions"
expression: Literal[
"k*(1+cos(periodicity*theta-phase))"
] = "k*(1+cos(periodicity*theta-phase))"
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [ImproperTorsionHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attribute names."""
return ["smirks", "id", "k", "periodicity", "phase", "idivf"]
def store_matches(
self, parameter_handler: "ImproperTorsionHandler", topology: "_OFFBioTop"
) -> None:
"""
Populate self.slot_map with key-val pairs of slots and unique potential identifiers.
"""
if self.slot_map:
self.slot_map = dict()
matches = parameter_handler.find_matches(topology)
for key, val in matches.items():
parameter_handler._assert_correct_connectivity(
val,
[
(0, 1),
(1, 2),
(1, 3),
],
)
n_terms = len(val.parameter_type.k)
for n in range(n_terms):
smirks = val.parameter_type.smirks
non_central_indices = [key[0], key[2], key[3]]
for permuted_key in [
(
non_central_indices[i],
non_central_indices[j],
non_central_indices[k],
)
for (i, j, k) in [(0, 1, 2), (1, 2, 0), (2, 0, 1)]
]:
topology_key = TopologyKey(
atom_indices=(key[1], *permuted_key), mult=n
)
potential_key = PotentialKey(
id=smirks, mult=n, associated_handler="ImproperTorsions"
)
self.slot_map[topology_key] = potential_key
def store_potentials(self, parameter_handler: "ImproperTorsionHandler") -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
for potential_key in self.slot_map.values():
smirks = potential_key.id
n = potential_key.mult
parameter = parameter_handler.get_parameter({"smirks": smirks})[0]
parameters = {
"k": parameter.k[n],
"periodicity": parameter.periodicity[n] * unit.dimensionless,
"phase": parameter.phase[n],
"idivf": 3.0 * unit.dimensionless,
}
potential = Potential(parameters=parameters)
self.potentials[potential_key] = potential
class _SMIRNOFFNonbondedHandler(SMIRNOFFPotentialHandler, abc.ABC):
"""Base class for handlers storing non-bonded potentials produced by SMIRNOFF force fields."""
type: Literal["nonbonded"] = "nonbonded"
cutoff: FloatQuantity["angstrom"] = Field( # type: ignore
9.0 * unit.angstrom,
description="The distance at which pairwise interactions are truncated",
)
scale_13: float = Field(
0.0, description="The scaling factor applied to 1-3 interactions"
)
scale_14: float = Field(
0.5, description="The scaling factor applied to 1-4 interactions"
)
scale_15: float = Field(
1.0, description="The scaling factor applied to 1-5 interactions"
)
class SMIRNOFFvdWHandler(_SMIRNOFFNonbondedHandler):
"""Handler storing vdW potentials as produced by a SMIRNOFF force field."""
type: Literal["vdW"] = "vdW" # type: ignore[assignment]
expression: Literal[
"4*epsilon*((sigma/r)**12-(sigma/r)**6)"
] = "4*epsilon*((sigma/r)**12-(sigma/r)**6)"
method: Literal["cutoff", "pme", "no-cutoff"] = Field("cutoff")
mixing_rule: Literal["lorentz-berthelot", "geometric"] = Field(
"lorentz-berthelot",
description="The mixing rule (combination rule) used in computing pairwise vdW interactions",
)
switch_width: FloatQuantity["angstrom"] = Field( # type: ignore
1.0 * unit.angstrom,
description="The width over which the switching function is applied",
)
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [vdWHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attributes."""
return ["smirks", "id", "sigma", "epsilon", "rmin_half"]
def store_potentials(self, parameter_handler: vdWHandler) -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
self.method = parameter_handler.method.lower()
self.cutoff = parameter_handler.cutoff
for potential_key in self.slot_map.values():
smirks = potential_key.id
parameter = parameter_handler.get_parameter({"smirks": smirks})[0]
try:
potential = Potential(
parameters={
"sigma": parameter.sigma,
"epsilon": parameter.epsilon,
},
)
except AttributeError:
# Handle rmin_half pending https://github.com/openforcefield/openff-toolkit/pull/750
potential = Potential(
parameters={
"sigma": parameter.sigma,
"epsilon": parameter.epsilon,
},
)
self.potentials[potential_key] = potential
@classmethod
def _from_toolkit(
cls: Type[T],
parameter_handler: "vdWHandler",
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFvdWHandler from toolkit data.
"""
if isinstance(parameter_handler, list):
parameter_handlers = parameter_handler
else:
parameter_handlers = [parameter_handler]
for parameter_handler in parameter_handlers:
if type(parameter_handler) not in cls.allowed_parameter_handlers():
raise InvalidParameterHandlerError(
f"Found parameter handler type {type(parameter_handler)}, which is not "
f"supported by potential type {type(cls)}"
)
handler = cls(
scale_13=parameter_handler.scale13,
scale_14=parameter_handler.scale14,
scale_15=parameter_handler.scale15,
cutoff=parameter_handler.cutoff,
mixing_rule=parameter_handler.combining_rules.lower(),
method=parameter_handler.method.lower(),
switch_width=parameter_handler.switch_width,
)
handler.store_matches(parameter_handler=parameter_handler, topology=topology)
handler.store_potentials(parameter_handler=parameter_handler)
return handler
@classmethod
def parameter_handler_precedence(cls) -> List[str]:
"""
Return the order in which parameter handlers take precedence when computing charges.
"""
return ["vdw", "VirtualSites"]
def _from_toolkit_virtual_sites(
self,
parameter_handler: "VirtualSiteHandler",
topology: "Topology",
):
# TODO: Merge this logic into _from_toolkit
if not all(
isinstance(
p,
(
VirtualSiteHandler.VirtualSiteBondChargeType,
VirtualSiteHandler.VirtualSiteMonovalentLonePairType,
VirtualSiteHandler.VirtualSiteDivalentLonePairType,
VirtualSiteHandler.VirtualSiteTrivalentLonePairType,
),
)
for p in parameter_handler.parameters
):
raise NotImplementedError("Found unsupported virtual site types")
matches = parameter_handler.find_matches(topology)
for atoms, parameter_match in matches.items():
virtual_site_type = parameter_match[0].parameter_type
top_key = VirtualSiteKey(
atom_indices=atoms,
type=virtual_site_type.type,
match=virtual_site_type.match,
)
pot_key = PotentialKey(
id=virtual_site_type.smirks, associated_handler=virtual_site_type.type
)
pot = Potential(
parameters={
"sigma": virtual_site_type.sigma,
"epsilon": virtual_site_type.epsilon,
# "distance": virtual_site_type.distance,
}
)
# if virtual_site_type.type in {"MonovalentLonePair", "DivalentLonePair"}:
# pot.parameters.update(
# {
# "outOfPlaneAngle": virtual_site_type.outOfPlaneAngle,
# }
# )
# if virtual_site_type.type in {"MonovalentLonePair"}:
# pot.parameters.update(
# {
# "inPlaneAngle": virtual_site_type.inPlaneAngle,
# }
# )
self.slot_map.update({top_key: pot_key})
self.potentials.update({pot_key: pot})
class SMIRNOFFElectrostaticsHandler(_SMIRNOFFNonbondedHandler):
"""
A handler which stores any electrostatic parameters applied to a topology.
This handler is responsible for grouping together
* global settings for the electrostatic interactions such as the cutoff distance
and the intramolecular scale factors.
* partial charges which have been assigned by a ``ToolkitAM1BCC``,
``LibraryCharges``, or a ``ChargeIncrementModel`` parameter
handler.
* charge corrections applied by a ``SMIRNOFFChargeIncrementHandler``.
rather than having each in their own handler.
"""
type: Literal["Electrostatics"] = "Electrostatics" # type: ignore[assignment]
expression: Literal["coul"] = "coul"
method: Literal["pme", "cutoff", "reaction-field", "no-cutoff"] = Field("pme")
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [
LibraryChargeHandler,
ChargeIncrementModelHandler,
ToolkitAM1BCCHandler,
ElectrostaticsHandler,
]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attribute names."""
pass
@property
def charges(self) -> Dict[Union[TopologyKey, VirtualSiteKey], unit.Quantity]:
"""Get the total partial charge on each atom, excluding virtual sites."""
return self.get_charges(include_virtual_sites=False)
@property
def charges_with_virtual_sites(
self,
) -> Dict[Union[VirtualSiteKey, TopologyKey], unit.Quantity]:
"""Get the total partial charge on each atom, including virtual sites."""
return self.get_charges(include_virtual_sites=True)
def get_charges(
self, include_virtual_sites=False
) -> Dict[Union[VirtualSiteKey, TopologyKey], unit.Quantity]:
"""Get the total partial charge on each atom or particle."""
charges: DefaultDict[
Union[TopologyKey, VirtualSiteKey], FloatQuantity
] = defaultdict(lambda: 0.0 * unit.e)
for topology_key, potential_key in self.slot_map.items():
potential = self.potentials[potential_key]
for parameter_key, parameter_value in potential.parameters.items():
if parameter_key == "charge_increments":
if type(topology_key) != VirtualSiteKey:
raise RuntimeError
charge = -1.0 * np.sum(parameter_value)
# assumes virtual sites can only have charges determined in one step
# also, topology_key is actually a VirtualSiteKey
charges[topology_key] = charge
elif parameter_key in ["charge", "charge_increment"]:
charge = parameter_value
charges[topology_key.atom_indices[0]] += charge # type: ignore
else:
raise NotImplementedError()
returned_charges: Dict[
Union[VirtualSiteKey, TopologyKey], unit.Quantity
] = dict()
for index, charge in charges.items():
if isinstance(index, int):
returned_charges[TopologyKey(atom_indices=(index,))] = charge
else:
if include_virtual_sites:
returned_charges[index] = charge
return returned_charges
@classmethod
def parameter_handler_precedence(cls) -> List[str]:
"""
Return the order in which parameter handlers take precedence when computing charges.
"""
return ["LibraryCharges", "ChargeIncrementModel", "ToolkitAM1BCC"]
@classmethod
def _from_toolkit(
cls: Type[T],
parameter_handler: Any,
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFElectrostaticsHandler from toolkit data.
"""
if isinstance(parameter_handler, list):
parameter_handlers = parameter_handler
else:
parameter_handlers = [parameter_handler]
toolkit_handler_with_metadata = [
p for p in parameter_handlers if type(p) == ElectrostaticsHandler
][0]
handler = cls(
type=toolkit_handler_with_metadata._TAGNAME,
scale_13=toolkit_handler_with_metadata.scale13,
scale_14=toolkit_handler_with_metadata.scale14,
scale_15=toolkit_handler_with_metadata.scale15,
cutoff=toolkit_handler_with_metadata.cutoff,
method=toolkit_handler_with_metadata.method.lower(),
)
handler.store_matches(parameter_handlers, topology)
return handler
def _from_toolkit_virtual_sites(
self,
parameter_handler: "VirtualSiteHandler",
topology: "Topology",
):
# TODO: Merge this logic into _from_toolkit
if not all(
isinstance(
p,
(
VirtualSiteHandler.VirtualSiteBondChargeType,
VirtualSiteHandler.VirtualSiteMonovalentLonePairType,
VirtualSiteHandler.VirtualSiteDivalentLonePairType,
VirtualSiteHandler.VirtualSiteTrivalentLonePairType,
),
)
for p in parameter_handler.parameters
):
raise NotImplementedError("Found unsupported virtual site types")
matches = parameter_handler.find_matches(topology)
for atom_indices, parameter_match in matches.items():
virtual_site_type = parameter_match[0].parameter_type
virtual_site_key = VirtualSiteKey(
atom_indices=atom_indices,
type=virtual_site_type.type,
match=virtual_site_type.match,
)
virtual_site_potential_key = PotentialKey(
id=virtual_site_type.smirks,
associated_handler="VirtualSiteHandler",
)
virtual_site_potential = Potential(
parameters={
"charge_increments": from_openmm(
virtual_site_type.charge_increment
),
}
)
matches = {}
potentials = {}
self.slot_map.update({virtual_site_key: virtual_site_potential_key})
self.potentials.update({virtual_site_potential_key: virtual_site_potential})
# TODO: Counter-intuitive that toolkit regression tests pass by using the counter
# variable i as if it was the atom index - shouldn't it just use atom_index?
for i, atom_index in enumerate(atom_indices): # noqa
topology_key = TopologyKey(atom_indices=(i,), mult=2)
potential_key = PotentialKey(
id=virtual_site_type.smirks,
mult=i,
associated_handler="VirtualSiteHandler",
)
charge_increment = getattr(
virtual_site_type, f"charge_increment{i + 1}"
)
potential = Potential(
parameters={"charge_increment": from_openmm(charge_increment)}
)
matches[topology_key] = potential_key
potentials[potential_key] = potential
self.slot_map.update(matches)
self.potentials.update(potentials)
@classmethod
@functools.lru_cache(None)
def _compute_partial_charges(cls, molecule: Molecule, method: str) -> unit.Quantity:
"""Call out to the toolkit's toolkit wrappers to generate partial charges."""
molecule = copy.deepcopy(molecule)
molecule.assign_partial_charges(method)
return from_openmm(molecule.partial_charges)
@classmethod
def _library_charge_to_potentials(
cls,
atom_indices: Tuple[int, ...],
parameter: LibraryChargeHandler.LibraryChargeType,
) -> Tuple[Dict[TopologyKey, PotentialKey], Dict[PotentialKey, Potential]]:
"""
Map a matched library charge parameter to a set of potentials.
"""
matches = {}
potentials = {}
for i, (atom_index, charge) in enumerate(zip(atom_indices, parameter.charge)):
topology_key = TopologyKey(atom_indices=(atom_index,))
potential_key = PotentialKey(
id=parameter.smirks, mult=i, associated_handler="LibraryCharges"
)
potential = Potential(parameters={"charge": from_openmm(charge)})
matches[topology_key] = potential_key
potentials[potential_key] = potential
return matches, potentials
@classmethod
def _charge_increment_to_potentials(
cls,
atom_indices: Tuple[int, ...],
parameter: ChargeIncrementModelHandler.ChargeIncrementType,
) -> Tuple[Dict[TopologyKey, PotentialKey], Dict[PotentialKey, Potential]]:
"""
Map a matched charge increment parameter to a set of potentials.
"""
matches = {}
potentials = {}
for i, atom_index in enumerate(atom_indices):
topology_key = TopologyKey(atom_indices=(atom_index,))
potential_key = PotentialKey(
id=parameter.smirks, mult=i, associated_handler="ChargeIncrementModel"
)
# TODO: Handle the cases where n - 1 charge increments have been defined,
# maybe by implementing this in the TK?
charge_increment = getattr(parameter, f"charge_increment{i + 1}")
potential = Potential(
parameters={"charge_increment": from_openmm(charge_increment)}
)
matches[topology_key] = potential_key
potentials[potential_key] = potential
return matches, potentials
@classmethod
def _find_slot_matches(
cls,
parameter_handler: Union["LibraryChargeHandler", "ChargeIncrementModelHandler"],
reference_molecule: Molecule,
) -> Tuple[Dict[TopologyKey, PotentialKey], Dict[PotentialKey, Potential]]:
"""
Construct a slot and potential map for a slot based parameter handler.
"""
# Ideally this would be made redundant by OpenFF TK #971
unique_parameter_matches = {
tuple(sorted(key)): (key, val)
for key, val in parameter_handler.find_matches(
reference_molecule.to_topology()
).items()
}
parameter_matches = {key: val for key, val in unique_parameter_matches.values()}
matches, potentials = {}, {}
for key, val in parameter_matches.items():
parameter = val.parameter_type
if isinstance(parameter_handler, LibraryChargeHandler):
(
parameter_matches,
parameter_potentials,
) = cls._library_charge_to_potentials(key, parameter)
elif isinstance(parameter_handler, ChargeIncrementModelHandler):
(
parameter_matches,
parameter_potentials,
) = cls._charge_increment_to_potentials(key, parameter)
else:
raise NotImplementedError()
matches.update(parameter_matches)
potentials.update(parameter_potentials)
return matches, potentials
@classmethod
def _find_am1_matches(
cls,
parameter_handler: Union["ToolkitAM1BCCHandler", ChargeIncrementModelHandler],
reference_molecule: Molecule,
) -> Tuple[Dict[TopologyKey, PotentialKey], Dict[PotentialKey, Potential]]:
"""Construct a slot and potential map for a charge model based parameter handler."""
reference_molecule = copy.deepcopy(reference_molecule)
reference_smiles = reference_molecule.to_smiles(
isomeric=True, explicit_hydrogens=True, mapped=True
)
method = getattr(parameter_handler, "partial_charge_method", "am1bcc")
partial_charges = cls._compute_partial_charges(
reference_molecule, method=method
)
matches = {}
potentials = {}
for i, partial_charge in enumerate(partial_charges):
potential_key = PotentialKey(
id=reference_smiles, mult=i, associated_handler="ToolkitAM1BCC"
)
potentials[potential_key] = Potential(parameters={"charge": partial_charge})
matches[TopologyKey(atom_indices=(i,))] = potential_key
return matches, potentials
@classmethod
def _find_reference_matches(
cls,
parameter_handlers: Dict[str, "ElectrostaticsHandlerType"],
reference_molecule: Molecule,
) -> Tuple[Dict[TopologyKey, PotentialKey], Dict[PotentialKey, Potential]]:
"""
Construct a slot and potential map for a particular reference molecule and set of parameter handlers.
"""
matches = {}
potentials = {}
expected_matches = {i for i in range(reference_molecule.n_atoms)}
for handler_type in cls.parameter_handler_precedence():
if handler_type not in parameter_handlers:
continue
parameter_handler = parameter_handlers[handler_type]
slot_matches, am1_matches = None, None
slot_potentials: Dict = {}
am1_potentials: Dict = {}
if handler_type in ["LibraryCharges", "ChargeIncrementModel"]:
slot_matches, slot_potentials = cls._find_slot_matches(
parameter_handler, reference_molecule
)
if handler_type in ["ToolkitAM1BCC", "ChargeIncrementModel"]:
am1_matches, am1_potentials = cls._find_am1_matches(
parameter_handler, reference_molecule
)
if slot_matches is None and am1_matches is None:
raise NotImplementedError()
elif slot_matches is not None and am1_matches is not None:
am1_matches = {
TopologyKey(
atom_indices=topology_key.atom_indices, mult=0
): potential_key
for topology_key, potential_key in am1_matches.items()
}
slot_matches = {
TopologyKey(
atom_indices=topology_key.atom_indices, mult=1
): potential_key
for topology_key, potential_key in slot_matches.items()
}
matched_atom_indices = {
index for key in slot_matches for index in key.atom_indices
}
matched_atom_indices.intersection_update(
{index for key in am1_matches for index in key.atom_indices}
)
elif slot_matches is not None:
matched_atom_indices = {
index for key in slot_matches for index in key.atom_indices
}
else:
matched_atom_indices = {
index for key in am1_matches for index in key.atom_indices # type: ignore[union-attr]
}
if matched_atom_indices != expected_matches:
# Handle the case where a handler could not fully assign the charges
# to the whole molecule.
continue
matches.update(slot_matches if slot_matches is not None else {})
matches.update(am1_matches if am1_matches is not None else {})
potentials.update(slot_potentials)
potentials.update(am1_potentials)
break
found_matches = {index for key in matches for index in key.atom_indices}
if found_matches != expected_matches:
raise RuntimeError(
f"{reference_molecule.to_smiles(explicit_hydrogens=False)} could "
f"not be fully assigned charges."
)
return matches, potentials
def store_matches(
self,
parameter_handler: Union[
"ElectrostaticsHandlerType", List["ElectrostaticsHandlerType"]
],
topology: Union["Topology", "_OFFBioTop"],
) -> None:
"""
Populate self.slot_map with key-val pairs of slots and unique potential identifiers.
"""
# Reshape the parameter handlers into a dictionary for easier referencing.
parameter_handlers = {
handler._TAGNAME: handler
for handler in (
parameter_handler
if isinstance(parameter_handler, list)
else [parameter_handler]
)
}
self.potentials = dict()
self.slot_map = dict()
reference_molecules = [*topology.reference_molecules]
for reference_molecule in reference_molecules:
matches, potentials = self._find_reference_matches(
parameter_handlers, reference_molecule
)
match_mults = defaultdict(set)
for top_key in matches:
match_mults[top_key.atom_indices].add(top_key.mult)
self.potentials.update(potentials)
for top_mol in topology._reference_molecule_to_topology_molecules[
reference_molecule
]:
for topology_particle in top_mol.atoms:
reference_index = topology_particle.atom.molecule_particle_index
topology_index = topology_particle.topology_particle_index
for mult in match_mults[(reference_index,)]:
top_key = TopologyKey(atom_indices=(topology_index,), mult=mult)
self.slot_map[top_key] = matches[
TopologyKey(atom_indices=(reference_index,), mult=mult)
]
def store_potentials(
self,
parameter_handler: Union[
"ElectrostaticsHandlerType", List["ElectrostaticsHandlerType"]
],
) -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
# This logic is handled by ``store_matches`` as we may need to create potentials
# to store depending on the handler type.
pass
class SMIRNOFFVirtualSiteHandler(SMIRNOFFPotentialHandler):
"""
A handler which stores the information necessary to construct virtual sites (virtual particles).
"""
type: Literal["Bonds"] = "Bonds"
expression: Literal[""] = ""
virtual_site_key_topology_index_map: Dict["VirtualSiteKey", int] = Field(
dict(),
description="A mapping between VirtualSiteKey objects (stored analogously to TopologyKey objects"
"in other handlers) and topology indices describing the associated virtual site",
)
exclusion_policy: Literal["parents"] = "parents"
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [VirtualSiteHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of parameter attributes supported by this handler."""
return ["distance", "outOfPlaneAngle", "inPlaneAngle"]
def store_matches(
self,
parameter_handler: ParameterHandler,
topology: Union["Topology", "_OFFBioTop"],
) -> None:
"""
Populate self.slot_map with key-val pairs of [TopologyKey, PotentialKey].
Differs from SMIRNOFFPotentialHandler.store_matches because each key
can point to multiple potentials (?); each value in the dict is a
list of parametertypes, whereas conventional handlers don't have lists
"""
virtual_site_index = topology.n_topology_atoms
parameter_handler_name = getattr(parameter_handler, "_TAGNAME", None)
if self.slot_map:
self.slot_map = dict()
matches = parameter_handler.find_matches(topology)
for key, val_list in matches.items():
for val in val_list:
virtual_site_key = VirtualSiteKey(
atom_indices=key,
type=val.parameter_type.type,
match=val.parameter_type.match,
)
potential_key = PotentialKey(
id=val.parameter_type.smirks,
associated_handler=parameter_handler_name,
)
self.slot_map[virtual_site_key] = potential_key
self.virtual_site_key_topology_index_map[
virtual_site_key
] = virtual_site_index
virtual_site_index += 1
def store_potentials(self, parameter_handler: ParameterHandler) -> None:
"""Store VirtualSite-specific parameter-like data."""
if self.potentials:
self.potentials = dict()
for potential_key in self.slot_map.values():
smirks = potential_key.id
parameter_type = parameter_handler.get_parameter({"smirks": smirks})[0]
potential = Potential(
parameters={
"distance": parameter_type.distance,
},
)
for attr in ["outOfPlaneAngle", "inPlaneAngle"]:
if hasattr(parameter_type, attr):
potential.parameters.update(
{attr: from_openmm(getattr(parameter_type, attr))}
)
self.potentials[potential_key] = potential
def _get_local_frame_weights(self, virtual_site_key: "VirtualSiteKey"):
if virtual_site_key.type == "BondCharge":
origin_weight = [1.0, 0.0]
x_direction = [-1.0, 1.0]
y_direction = [-1.0, 1.0]
elif virtual_site_key.type == "MonovalentLonePair":
origin_weight = [1, 0.0, 0.0]
x_direction = [-1.0, 1.0, 0.0]
y_direction = [-1.0, 0.0, 1.0]
elif virtual_site_key.type == "DivalentLonePair":
origin_weight = [0.0, 1.0, 0.0]
x_direction = [0.5, -1.0, 0.5]
y_direction = [1.0, -1.0, 1.0]
elif virtual_site_key.type == "TrivalentLonePair":
origin_weight = [0.0, 1.0, 0.0, 0.0]
x_direction = [1 / 3, -1.0, 1 / 3, 1 / 3]
y_direction = [1.0, -1.0, 0.0, 0.0]
return origin_weight, x_direction, y_direction
def _get_local_frame_position(self, virtual_site_key: "VirtualSiteKey"):
potential_key = self.slot_map[virtual_site_key]
potential = self.potentials[potential_key]
if virtual_site_key.type == "BondCharge":
distance = potential.parameters["distance"]
local_frame_position = np.asarray([-1.0, 0.0, 0.0]) * distance
elif virtual_site_key.type == "MonovalentLonePair":
distance = potential.parameters["distance"]
theta = potential.parameters["inPlaneAngle"].m_as(unit.radian) # type: ignore
psi = potential.parameters["outOfPlaneAngle"].m_as(unit.radian) # type: ignore
factor = np.array(
[np.cos(theta) * np.cos(psi), np.sin(theta) * np.cos(psi), np.sin(psi)]
)
local_frame_position = factor * distance
elif virtual_site_key.type == "DivalentLonePair":
distance = potential.parameters["distance"]
theta = potential.parameters["inPlaneAngle"].m_as(unit.radian) # type: ignore
factor = np.asarray([-1.0 * np.cos(theta), 0.0, np.sin(theta)])
local_frame_position = factor * distance
elif virtual_site_key.type == "TrivalentLonePair":
distance = potential.parameters["distance"]
local_frame_position = np.asarray([-1.0, 0.0, 0.0]) * distance
return local_frame_position
def library_charge_from_molecule(
molecule: "Molecule",
) -> LibraryChargeHandler.LibraryChargeType:
"""Given an OpenFF Molecule with charges, generate a corresponding LibraryChargeType."""
if molecule.partial_charges is None:
raise ValueError("Input molecule is missing partial charges.")
smirks = molecule.to_smiles(mapped=True)
charges = molecule.partial_charges
library_charge_type = LibraryChargeHandler.LibraryChargeType(
smirks=smirks, charge=charges
)
return library_charge_type
def _get_interpolation_coeffs(fractional_bond_order, data):
x1, x2 = data.keys()
coeff1 = (x2 - fractional_bond_order) / (x2 - x1)
coeff2 = (fractional_bond_order - x1) / (x2 - x1)
return coeff1, coeff2
SMIRNOFF_POTENTIAL_HANDLERS = [
SMIRNOFFBondHandler,
SMIRNOFFConstraintHandler,
SMIRNOFFAngleHandler,
SMIRNOFFProperTorsionHandler,
SMIRNOFFImproperTorsionHandler,
SMIRNOFFvdWHandler,
SMIRNOFFElectrostaticsHandler,
]
| [
"openff.units.openmm.from_openmm",
"pydantic.Field",
"openff.interchange.exceptions.SMIRNOFFParameterAttributeNotImplementedError",
"numpy.asarray",
"openff.interchange.models.TopologyKey",
"openff.toolkit.typing.engines.smirnoff.parameters.LibraryChargeHandler.LibraryChargeType",
"numpy.sum",
"openff... | [((1814, 1860), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {'bound': '"""SMIRNOFFPotentialHandler"""'}), "('T', bound='SMIRNOFFPotentialHandler')\n", (1821, 1860), False, 'from typing import TYPE_CHECKING, Any, DefaultDict, Dict, List, Tuple, Type, TypeVar, Union\n'), ((1866, 1905), 'typing.TypeVar', 'TypeVar', (['"""TP"""'], {'bound': '"""PotentialHandler"""'}), "('TP', bound='PotentialHandler')\n", (1873, 1905), False, 'from typing import TYPE_CHECKING, Any, DefaultDict, Dict, List, Tuple, Type, TypeVar, Union\n'), ((25522, 25626), 'pydantic.Field', 'Field', (['(9.0 * unit.angstrom)'], {'description': '"""The distance at which pairwise interactions are truncated"""'}), "(9.0 * unit.angstrom, description=\n 'The distance at which pairwise interactions are truncated')\n", (25527, 25626), False, 'from pydantic import Field\n'), ((25684, 25756), 'pydantic.Field', 'Field', (['(0.0)'], {'description': '"""The scaling factor applied to 1-3 interactions"""'}), "(0.0, description='The scaling factor applied to 1-3 interactions')\n", (25689, 25756), False, 'from pydantic import Field\n'), ((25793, 25865), 'pydantic.Field', 'Field', (['(0.5)'], {'description': '"""The scaling factor applied to 1-4 interactions"""'}), "(0.5, description='The scaling factor applied to 1-4 interactions')\n", (25798, 25865), False, 'from pydantic import Field\n'), ((25902, 25974), 'pydantic.Field', 'Field', (['(1.0)'], {'description': '"""The scaling factor applied to 1-5 interactions"""'}), "(1.0, description='The scaling factor applied to 1-5 interactions')\n", (25907, 25974), False, 'from pydantic import Field\n'), ((26363, 26378), 'pydantic.Field', 'Field', (['"""cutoff"""'], {}), "('cutoff')\n", (26368, 26378), False, 'from pydantic import Field\n'), ((26441, 26571), 'pydantic.Field', 'Field', (['"""lorentz-berthelot"""'], {'description': '"""The mixing rule (combination rule) used in computing pairwise vdW interactions"""'}), "('lorentz-berthelot', description=\n 'The mixing rule (combination rule) used in computing pairwise vdW interactions'\n )\n", (26446, 26571), False, 'from pydantic import Field\n'), ((26632, 26733), 'pydantic.Field', 'Field', (['(1.0 * unit.angstrom)'], {'description': '"""The width over which the switching function is applied"""'}), "(1.0 * unit.angstrom, description=\n 'The width over which the switching function is applied')\n", (26637, 26733), False, 'from pydantic import Field\n'), ((32709, 32721), 'pydantic.Field', 'Field', (['"""pme"""'], {}), "('pme')\n", (32714, 32721), False, 'from pydantic import Field\n'), ((39374, 39399), 'functools.lru_cache', 'functools.lru_cache', (['None'], {}), '(None)\n', (39393, 39399), False, 'import functools\n'), ((56282, 56351), 'openff.toolkit.typing.engines.smirnoff.parameters.LibraryChargeHandler.LibraryChargeType', 'LibraryChargeHandler.LibraryChargeType', ([], {'smirks': 'smirks', 'charge': 'charges'}), '(smirks=smirks, charge=charges)\n', (56320, 56351), False, 'from openff.toolkit.typing.engines.smirnoff.parameters import AngleHandler, BondHandler, ChargeIncrementModelHandler, ConstraintHandler, ElectrostaticsHandler, ImproperTorsionHandler, LibraryChargeHandler, ParameterHandler, ProperTorsionHandler, ToolkitAM1BCCHandler, UnassignedProperTorsionParameterException, UnassignedValenceParameterException, VirtualSiteHandler, vdWHandler\n'), ((33976, 34010), 'collections.defaultdict', 'defaultdict', (['(lambda : 0.0 * unit.e)'], {}), '(lambda : 0.0 * unit.e)\n', (33987, 34010), False, 'from collections import defaultdict\n'), ((39594, 39617), 'copy.deepcopy', 'copy.deepcopy', (['molecule'], {}), '(molecule)\n', (39607, 39617), False, 'import copy\n'), ((39682, 39719), 'openff.units.openmm.from_openmm', 'from_openmm', (['molecule.partial_charges'], {}), '(molecule.partial_charges)\n', (39693, 39719), False, 'from openff.units.openmm import from_openmm\n'), ((43721, 43754), 'copy.deepcopy', 'copy.deepcopy', (['reference_molecule'], {}), '(reference_molecule)\n', (43734, 43754), False, 'import copy\n'), ((3870, 3899), 'openff.interchange.models.TopologyKey', 'TopologyKey', ([], {'atom_indices': 'key'}), '(atom_indices=key)\n', (3881, 3899), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((3928, 4018), 'openff.interchange.models.PotentialKey', 'PotentialKey', ([], {'id': 'val.parameter_type.smirks', 'associated_handler': 'parameter_handler_name'}), '(id=val.parameter_type.smirks, associated_handler=\n parameter_handler_name)\n', (3940, 4018), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((7738, 7801), 'openff.interchange.models.TopologyKey', 'TopologyKey', ([], {'atom_indices': 'key', 'bond_order': 'fractional_bond_order'}), '(atom_indices=key, bond_order=fractional_bond_order)\n', (7749, 7801), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((7860, 7984), 'openff.interchange.models.PotentialKey', 'PotentialKey', ([], {'id': 'val.parameter_type.smirks', 'associated_handler': 'parameter_handler_name', 'bond_order': 'fractional_bond_order'}), '(id=val.parameter_type.smirks, associated_handler=\n parameter_handler_name, bond_order=fractional_bond_order)\n', (7872, 7984), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((14302, 14331), 'openff.interchange.models.TopologyKey', 'TopologyKey', ([], {'atom_indices': 'key'}), '(atom_indices=key)\n', (14313, 14331), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((15563, 15607), 'openff.interchange.components.potentials.Potential', 'Potential', ([], {'parameters': "{'distance': distance}"}), "(parameters={'distance': distance})\n", (15572, 15607), False, 'from openff.interchange.components.potentials import Potential, PotentialHandler, WrappedPotential\n'), ((16977, 17043), 'openff.interchange.components.potentials.Potential', 'Potential', ([], {'parameters': "{'k': parameter.k, 'angle': parameter.angle}"}), "(parameters={'k': parameter.k, 'angle': parameter.angle})\n", (16986, 17043), False, 'from openff.interchange.components.potentials import Potential, PotentialHandler, WrappedPotential\n'), ((25178, 25210), 'openff.interchange.components.potentials.Potential', 'Potential', ([], {'parameters': 'parameters'}), '(parameters=parameters)\n', (25187, 25210), False, 'from openff.interchange.components.potentials import Potential, PotentialHandler, WrappedPotential\n'), ((30700, 30799), 'openff.interchange.models.VirtualSiteKey', 'VirtualSiteKey', ([], {'atom_indices': 'atoms', 'type': 'virtual_site_type.type', 'match': 'virtual_site_type.match'}), '(atom_indices=atoms, type=virtual_site_type.type, match=\n virtual_site_type.match)\n', (30714, 30799), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((30880, 30969), 'openff.interchange.models.PotentialKey', 'PotentialKey', ([], {'id': 'virtual_site_type.smirks', 'associated_handler': 'virtual_site_type.type'}), '(id=virtual_site_type.smirks, associated_handler=\n virtual_site_type.type)\n', (30892, 30969), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((31013, 31111), 'openff.interchange.components.potentials.Potential', 'Potential', ([], {'parameters': "{'sigma': virtual_site_type.sigma, 'epsilon': virtual_site_type.epsilon}"}), "(parameters={'sigma': virtual_site_type.sigma, 'epsilon':\n virtual_site_type.epsilon})\n", (31022, 31111), False, 'from openff.interchange.components.potentials import Potential, PotentialHandler, WrappedPotential\n'), ((37560, 37665), 'openff.interchange.models.VirtualSiteKey', 'VirtualSiteKey', ([], {'atom_indices': 'atom_indices', 'type': 'virtual_site_type.type', 'match': 'virtual_site_type.match'}), '(atom_indices=atom_indices, type=virtual_site_type.type,\n match=virtual_site_type.match)\n', (37574, 37665), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((37767, 37854), 'openff.interchange.models.PotentialKey', 'PotentialKey', ([], {'id': 'virtual_site_type.smirks', 'associated_handler': '"""VirtualSiteHandler"""'}), "(id=virtual_site_type.smirks, associated_handler=\n 'VirtualSiteHandler')\n", (37779, 37854), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((40223, 40262), 'openff.interchange.models.TopologyKey', 'TopologyKey', ([], {'atom_indices': '(atom_index,)'}), '(atom_indices=(atom_index,))\n', (40234, 40262), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((40291, 40369), 'openff.interchange.models.PotentialKey', 'PotentialKey', ([], {'id': 'parameter.smirks', 'mult': 'i', 'associated_handler': '"""LibraryCharges"""'}), "(id=parameter.smirks, mult=i, associated_handler='LibraryCharges')\n", (40303, 40369), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((41098, 41137), 'openff.interchange.models.TopologyKey', 'TopologyKey', ([], {'atom_indices': '(atom_index,)'}), '(atom_indices=(atom_index,))\n', (41109, 41137), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((41166, 41255), 'openff.interchange.models.PotentialKey', 'PotentialKey', ([], {'id': 'parameter.smirks', 'mult': 'i', 'associated_handler': '"""ChargeIncrementModel"""'}), "(id=parameter.smirks, mult=i, associated_handler=\n 'ChargeIncrementModel')\n", (41178, 41255), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((44216, 44293), 'openff.interchange.models.PotentialKey', 'PotentialKey', ([], {'id': 'reference_smiles', 'mult': 'i', 'associated_handler': '"""ToolkitAM1BCC"""'}), "(id=reference_smiles, mult=i, associated_handler='ToolkitAM1BCC')\n", (44228, 44293), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((44364, 44412), 'openff.interchange.components.potentials.Potential', 'Potential', ([], {'parameters': "{'charge': partial_charge}"}), "(parameters={'charge': partial_charge})\n", (44373, 44412), False, 'from openff.interchange.components.potentials import Potential, PotentialHandler, WrappedPotential\n'), ((49027, 49043), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (49038, 49043), False, 'from collections import defaultdict\n'), ((53120, 53179), 'openff.interchange.components.potentials.Potential', 'Potential', ([], {'parameters': "{'distance': parameter_type.distance}"}), "(parameters={'distance': parameter_type.distance})\n", (53129, 53179), False, 'from openff.interchange.components.potentials import Potential, PotentialHandler, WrappedPotential\n'), ((9937, 10005), 'openff.interchange.components.potentials.Potential', 'Potential', ([], {'parameters': "{'k': parameter.k, 'length': parameter.length}"}), "(parameters={'k': parameter.k, 'length': parameter.length})\n", (9946, 10005), False, 'from openff.interchange.components.potentials import Potential, PotentialHandler, WrappedPotential\n'), ((14566, 14623), 'openff.interchange.models.PotentialKey', 'PotentialKey', ([], {'id': 'smirks', 'associated_handler': '"""Constraints"""'}), "(id=smirks, associated_handler='Constraints')\n", (14578, 14623), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((19637, 19708), 'openff.interchange.models.TopologyKey', 'TopologyKey', ([], {'atom_indices': 'key', 'mult': 'n', 'bond_order': 'fractional_bond_order'}), '(atom_indices=key, mult=n, bond_order=fractional_bond_order)\n', (19648, 19708), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((19779, 19885), 'openff.interchange.models.PotentialKey', 'PotentialKey', ([], {'id': 'smirks', 'mult': 'n', 'associated_handler': '"""ProperTorsions"""', 'bond_order': 'fractional_bond_order'}), "(id=smirks, mult=n, associated_handler='ProperTorsions',\n bond_order=fractional_bond_order)\n", (19791, 19885), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((22197, 22229), 'openff.interchange.components.potentials.Potential', 'Potential', ([], {'parameters': 'parameters'}), '(parameters=parameters)\n', (22206, 22229), False, 'from openff.interchange.components.potentials import Potential, PotentialHandler, WrappedPotential\n'), ((27609, 27687), 'openff.interchange.components.potentials.Potential', 'Potential', ([], {'parameters': "{'sigma': parameter.sigma, 'epsilon': parameter.epsilon}"}), "(parameters={'sigma': parameter.sigma, 'epsilon': parameter.epsilon})\n", (27618, 27687), False, 'from openff.interchange.components.potentials import Potential, PotentialHandler, WrappedPotential\n'), ((38649, 38687), 'openff.interchange.models.TopologyKey', 'TopologyKey', ([], {'atom_indices': '(i,)', 'mult': '(2)'}), '(atom_indices=(i,), mult=2)\n', (38660, 38687), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((38720, 38815), 'openff.interchange.models.PotentialKey', 'PotentialKey', ([], {'id': 'virtual_site_type.smirks', 'mult': 'i', 'associated_handler': '"""VirtualSiteHandler"""'}), "(id=virtual_site_type.smirks, mult=i, associated_handler=\n 'VirtualSiteHandler')\n", (38732, 38815), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((44434, 44464), 'openff.interchange.models.TopologyKey', 'TopologyKey', ([], {'atom_indices': '(i,)'}), '(atom_indices=(i,))\n', (44445, 44464), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((52127, 52226), 'openff.interchange.models.VirtualSiteKey', 'VirtualSiteKey', ([], {'atom_indices': 'key', 'type': 'val.parameter_type.type', 'match': 'val.parameter_type.match'}), '(atom_indices=key, type=val.parameter_type.type, match=val.\n parameter_type.match)\n', (52141, 52226), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((52333, 52423), 'openff.interchange.models.PotentialKey', 'PotentialKey', ([], {'id': 'val.parameter_type.smirks', 'associated_handler': 'parameter_handler_name'}), '(id=val.parameter_type.smirks, associated_handler=\n parameter_handler_name)\n', (52345, 52423), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((54772, 54800), 'numpy.asarray', 'np.asarray', (['[-1.0, 0.0, 0.0]'], {}), '([-1.0, 0.0, 0.0])\n', (54782, 54800), True, 'import numpy as np\n'), ((3086, 3152), 'openff.interchange.exceptions.SMIRNOFFParameterAttributeNotImplementedError', 'SMIRNOFFParameterAttributeNotImplementedError', (['parameter_attribute'], {}), '(parameter_attribute)\n', (3131, 3152), False, 'from openff.interchange.exceptions import InvalidParameterHandlerError, MissingParametersError, SMIRNOFFParameterAttributeNotImplementedError\n'), ((14879, 15088), 'openff.interchange.exceptions.MissingParametersError', 'MissingParametersError', (['f"""Constraint with SMIRKS pattern {smirks} found with no distance specified, and no corresponding bond parameters were found. The distance of this constraint is not specified."""'], {}), "(\n f'Constraint with SMIRKS pattern {smirks} found with no distance specified, and no corresponding bond parameters were found. The distance of this constraint is not specified.'\n )\n", (14901, 15088), False, 'from openff.interchange.exceptions import InvalidParameterHandlerError, MissingParametersError, SMIRNOFFParameterAttributeNotImplementedError\n'), ((24181, 24238), 'openff.interchange.models.TopologyKey', 'TopologyKey', ([], {'atom_indices': '(key[1], *permuted_key)', 'mult': 'n'}), '(atom_indices=(key[1], *permuted_key), mult=n)\n', (24192, 24238), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((24321, 24391), 'openff.interchange.models.PotentialKey', 'PotentialKey', ([], {'id': 'smirks', 'mult': 'n', 'associated_handler': '"""ImproperTorsions"""'}), "(id=smirks, mult=n, associated_handler='ImproperTorsions')\n", (24333, 24391), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((27962, 28040), 'openff.interchange.components.potentials.Potential', 'Potential', ([], {'parameters': "{'sigma': parameter.sigma, 'epsilon': parameter.epsilon}"}), "(parameters={'sigma': parameter.sigma, 'epsilon': parameter.epsilon})\n", (27971, 28040), False, 'from openff.interchange.components.potentials import Potential, PotentialHandler, WrappedPotential\n'), ((35148, 35182), 'openff.interchange.models.TopologyKey', 'TopologyKey', ([], {'atom_indices': '(index,)'}), '(atom_indices=(index,))\n', (35159, 35182), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((9434, 9561), 'openff.interchange.components.potentials.Potential', 'Potential', ([], {'parameters': "{'k': parameter.k_bondorder[map_key], 'length': parameter.length_bondorder[\n map_key]}", 'map_key': 'map_key'}), "(parameters={'k': parameter.k_bondorder[map_key], 'length':\n parameter.length_bondorder[map_key]}, map_key=map_key)\n", (9443, 9561), False, 'from openff.interchange.components.potentials import Potential, PotentialHandler, WrappedPotential\n'), ((21572, 21621), 'openff.interchange.components.potentials.Potential', 'Potential', ([], {'parameters': 'parameters', 'map_key': 'map_key'}), '(parameters=parameters, map_key=map_key)\n', (21581, 21621), False, 'from openff.interchange.components.potentials import Potential, PotentialHandler, WrappedPotential\n'), ((34412, 34435), 'numpy.sum', 'np.sum', (['parameter_value'], {}), '(parameter_value)\n', (34418, 34435), True, 'import numpy as np\n'), ((38016, 38063), 'openff.units.openmm.from_openmm', 'from_openmm', (['virtual_site_type.charge_increment'], {}), '(virtual_site_type.charge_increment)\n', (38027, 38063), False, 'from openff.units.openmm import from_openmm\n'), ((40456, 40475), 'openff.units.openmm.from_openmm', 'from_openmm', (['charge'], {}), '(charge)\n', (40467, 40475), False, 'from openff.units.openmm import from_openmm\n'), ((41588, 41617), 'openff.units.openmm.from_openmm', 'from_openmm', (['charge_increment'], {}), '(charge_increment)\n', (41599, 41617), False, 'from openff.units.openmm import from_openmm\n'), ((46040, 46099), 'openff.interchange.models.TopologyKey', 'TopologyKey', ([], {'atom_indices': 'topology_key.atom_indices', 'mult': '(0)'}), '(atom_indices=topology_key.atom_indices, mult=0)\n', (46051, 46099), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((46307, 46366), 'openff.interchange.models.TopologyKey', 'TopologyKey', ([], {'atom_indices': 'topology_key.atom_indices', 'mult': '(1)'}), '(atom_indices=topology_key.atom_indices, mult=1)\n', (46318, 46366), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((49650, 49704), 'openff.interchange.models.TopologyKey', 'TopologyKey', ([], {'atom_indices': '(topology_index,)', 'mult': 'mult'}), '(atom_indices=(topology_index,), mult=mult)\n', (49661, 49704), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((55217, 55228), 'numpy.sin', 'np.sin', (['psi'], {}), '(psi)\n', (55223, 55228), True, 'import numpy as np\n'), ((39111, 39140), 'openff.units.openmm.from_openmm', 'from_openmm', (['charge_increment'], {}), '(charge_increment)\n', (39122, 39140), False, 'from openff.units.openmm import from_openmm\n'), ((49792, 49847), 'openff.interchange.models.TopologyKey', 'TopologyKey', ([], {'atom_indices': '(reference_index,)', 'mult': 'mult'}), '(atom_indices=(reference_index,), mult=mult)\n', (49803, 49847), False, 'from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey\n'), ((55159, 55172), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (55165, 55172), True, 'import numpy as np\n'), ((55175, 55186), 'numpy.cos', 'np.cos', (['psi'], {}), '(psi)\n', (55181, 55186), True, 'import numpy as np\n'), ((55188, 55201), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (55194, 55201), True, 'import numpy as np\n'), ((55204, 55215), 'numpy.cos', 'np.cos', (['psi'], {}), '(psi)\n', (55210, 55215), True, 'import numpy as np\n'), ((55562, 55575), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (55568, 55575), True, 'import numpy as np\n'), ((55781, 55809), 'numpy.asarray', 'np.asarray', (['[-1.0, 0.0, 0.0]'], {}), '([-1.0, 0.0, 0.0])\n', (55791, 55809), True, 'import numpy as np\n'), ((55542, 55555), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (55548, 55555), True, 'import numpy as np\n')] |
from __future__ import print_function
import pylab as plt
import numpy as np
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseBadRequest, QueryDict
from django.shortcuts import render_to_response, get_object_or_404, redirect, render
from django.template import Context, RequestContext, loader
from astrometry.net.models import *
from astrometry.util.resample import *
from astrometry.net.tmpfile import *
def simple_histeq(pixels, getinverse=False, mx=256):
assert(pixels.dtype in [np.uint8, np.uint16])
if not getinverse:
h = np.bincount(pixels, minlength=mx)
# pixel value -> quantile map.
# If you imagine jittering the pixels so there are no repeats,
# this assigns the middle quantile to a pixel value.
quant = h * 0.5
cs = np.cumsum(h)
quant[1:] += cs[:-1]
quant /= float(cs[-1])
# quant = np.cumsum(h / float(h.sum()))
return quant[pixels]
# This inverse function has slightly weird properties -- it
# puts a ramp across each pixel value, so inv(0.) may produce
# values as small as -0.5, and inv(1.) may produce 255.5
h = np.bincount(pixels.astype(int)+1, minlength=mx+1)
quant = h[1:] * 0.5
cs = np.cumsum(h)
quant[1:] += cs[1:-1]
quant /= float(cs[-1])
# interp1d is fragile -- remove duplicate "yy" values that
# otherwise cause nans.
yy = cs / float(cs[-1])
xx = np.arange(mx + 1) - 0.5
I = np.append([0], 1 + np.flatnonzero(np.diff(yy)))
print('mx:', mx)
print('xx:', len(xx))
print('yy:', len(yy))
print('I:', I.min(), I.max())
yy = yy[I]
xx = xx[I]
xx[-1] = mx-0.5
# print 'yy', yy[0], yy[-1]
# print 'xx', xx[0], xx[-1]
inv = interp1d(yy, xx, kind='linear')
return quant[pixels], inv
def enhanced_ui(req, user_image_id=None):
ui = UserImage.objects.get(id=user_image_id)
job = ui.get_best_job()
return enhanced_image(req, job_id=job.id, size='display')
def enhanced_image(req, job_id=None, size=None):
job = get_object_or_404(Job, pk=job_id)
ui = job.user_image
cal = job.calibration
tan = cal.raw_tan
nside,hh = get_healpixes_touching_wcs(tan)
tt = 'hello %s, job %s, nside %s, hh %s' % (ui, job, nside, hh)
ver = EnhanceVersion.objects.get(name='v4')
print('Using', ver)
EIms = EnhancedImage.objects.filter(version=ver)
ens = []
for hp in hh:
en = EIms.filter(nside=nside, healpix=hp, version=ver)
if len(en):
ens.extend(list(en))
for dnside in range(1, 3):
if len(ens) == 0:
bignside = nside / (2**dnside)
nil,hh = get_healpixes_touching_wcs(tan, nside=bignside)
tt += 'bigger healpixes: %s: %s' % (bignside, hh)
for hp in hh:
en = EIms.filter(nside=bignside, healpix=hp)
if len(en):
ens.extend(list(en))
tt = tt + ', EnhancedImages: ' + ', '.join('%s'%e for e in ens)
img = ui.image
W,H = img.width, img.height
tt = tt + 'image size %ix%i' % (W,H)
#return HttpResponse(tt)
targetwcs = tan.to_tanwcs()
#print 'Target WCS:', targetwcs
#print 'W,H', W,H
logmsg('wcs:', str(targetwcs))
if size == 'display':
scale = float(ui.image.get_display_image().width)/ui.image.width
logmsg('scaling:', scale)
targetwcs = targetwcs.scale(scale)
logmsg('scaled wcs:', str(targetwcs))
H,W = targetwcs.get_height(), targetwcs.get_width()
img = ui.image.get_display_image()
print(tt)
ee = np.zeros((H,W,3), np.float32)
imgdata = None
df = img.disk_file
ft = df.file_type
fn = df.get_path()
if 'JPEG' in ft:
print('Reading', fn)
I = plt.imread(fn)
print('Read', I.shape, I.dtype)
if len(I.shape) == 2:
I = I[:,:,np.newaxis].repeat(3, axis=2)
assert(len(I.shape) == 3)
if I.shape[2] > 3:
I = I.shape[:,:,:3]
# vertical FLIP to match WCS
I = I[::-1,:,:]
imgdata = I
mapped = np.zeros_like(imgdata)
for en in ens:
logmsg('Resampling %s' % en)
wcs = en.wcs.to_tanwcs()
try:
Yo,Xo,Yi,Xi,nil = resample_with_wcs(targetwcs, wcs, [], 3)
except OverlapError:
continue
#logmsg(len(Yo), 'pixels')
enI,enW = en.read_files()
#print 'Cals included in this Enhanced image:'
#for c in en.cals.all():
# print ' ', c
#logmsg('en:', enI.min(), enI.max())
if imgdata is not None:
mask = (enW[Yi,Xi] > 0)
for b in range(3):
enI[:,:,b] /= enI[:,:,b].max()
if imgdata is not None:
idata = imgdata[Yo[mask],Xo[mask],b]
DI = np.argsort((idata + np.random.uniform(size=idata.shape))/255.)
EI = np.argsort(enI[Yi[mask], Xi[mask], b])
Erank = np.zeros_like(EI)
Erank[EI] = np.arange(len(Erank))
mapped[Yo[mask],Xo[mask],b] = idata[DI[Erank]]
else:
# Might have to average the coverage here...
ee[Yo,Xo,b] += enI[Yi,Xi,b]
# ee[Yo[mask],Xo[mask],b] += enI[Yi[mask],Xi[mask],b]
tempfn = get_temp_file(suffix='.png')
if imgdata is not None:
im = mapped
else:
im = np.clip(ee, 0., 1.)
dpi = 100
figsize = [x / float(dpi) for x in im.shape[:2][::-1]]
fig = plt.figure(figsize=figsize, frameon=False, dpi=dpi)
plt.clf()
plt.subplots_adjust(left=0, right=1, bottom=0, top=1)
plt.imshow(im, interpolation='nearest')
# rdfn = job.get_rdls_file()
# rd = fits_table(rdfn)
# ok,x,y = targetwcs.radec2pixelxy(rd.ra, rd.dec)
# plt.plot(x, y, 'o', mec='r', mfc='none', ms=10)
plt.savefig(tempfn)
print('Wrote', tempfn)
f = open(tempfn)
res = HttpResponse(f)
res['Content-Type'] = 'image/png'
return res
| [
"numpy.clip",
"pylab.subplots_adjust",
"pylab.imread",
"django.http.HttpResponse",
"pylab.savefig",
"django.shortcuts.get_object_or_404",
"numpy.zeros_like",
"numpy.diff",
"pylab.figure",
"numpy.argsort",
"numpy.zeros",
"numpy.random.uniform",
"numpy.cumsum",
"numpy.bincount",
"pylab.clf... | [((1246, 1258), 'numpy.cumsum', 'np.cumsum', (['h'], {}), '(h)\n', (1255, 1258), True, 'import numpy as np\n'), ((2058, 2091), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Job'], {'pk': 'job_id'}), '(Job, pk=job_id)\n', (2075, 2091), False, 'from django.shortcuts import render_to_response, get_object_or_404, redirect, render\n'), ((3611, 3642), 'numpy.zeros', 'np.zeros', (['(H, W, 3)', 'np.float32'], {}), '((H, W, 3), np.float32)\n', (3619, 3642), True, 'import numpy as np\n'), ((5543, 5594), 'pylab.figure', 'plt.figure', ([], {'figsize': 'figsize', 'frameon': '(False)', 'dpi': 'dpi'}), '(figsize=figsize, frameon=False, dpi=dpi)\n', (5553, 5594), True, 'import pylab as plt\n'), ((5599, 5608), 'pylab.clf', 'plt.clf', ([], {}), '()\n', (5606, 5608), True, 'import pylab as plt\n'), ((5613, 5666), 'pylab.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0)', 'right': '(1)', 'bottom': '(0)', 'top': '(1)'}), '(left=0, right=1, bottom=0, top=1)\n', (5632, 5666), True, 'import pylab as plt\n'), ((5671, 5710), 'pylab.imshow', 'plt.imshow', (['im'], {'interpolation': '"""nearest"""'}), "(im, interpolation='nearest')\n", (5681, 5710), True, 'import pylab as plt\n'), ((5886, 5905), 'pylab.savefig', 'plt.savefig', (['tempfn'], {}), '(tempfn)\n', (5897, 5905), True, 'import pylab as plt\n'), ((5965, 5980), 'django.http.HttpResponse', 'HttpResponse', (['f'], {}), '(f)\n', (5977, 5980), False, 'from django.http import HttpResponse, HttpResponseRedirect, HttpResponseBadRequest, QueryDict\n'), ((570, 603), 'numpy.bincount', 'np.bincount', (['pixels'], {'minlength': 'mx'}), '(pixels, minlength=mx)\n', (581, 603), True, 'import numpy as np\n'), ((812, 824), 'numpy.cumsum', 'np.cumsum', (['h'], {}), '(h)\n', (821, 824), True, 'import numpy as np\n'), ((1441, 1458), 'numpy.arange', 'np.arange', (['(mx + 1)'], {}), '(mx + 1)\n', (1450, 1458), True, 'import numpy as np\n'), ((3791, 3805), 'pylab.imread', 'plt.imread', (['fn'], {}), '(fn)\n', (3801, 3805), True, 'import pylab as plt\n'), ((4119, 4141), 'numpy.zeros_like', 'np.zeros_like', (['imgdata'], {}), '(imgdata)\n', (4132, 4141), True, 'import numpy as np\n'), ((5440, 5461), 'numpy.clip', 'np.clip', (['ee', '(0.0)', '(1.0)'], {}), '(ee, 0.0, 1.0)\n', (5447, 5461), True, 'import numpy as np\n'), ((1507, 1518), 'numpy.diff', 'np.diff', (['yy'], {}), '(yy)\n', (1514, 1518), True, 'import numpy as np\n'), ((4937, 4975), 'numpy.argsort', 'np.argsort', (['enI[Yi[mask], Xi[mask], b]'], {}), '(enI[Yi[mask], Xi[mask], b])\n', (4947, 4975), True, 'import numpy as np\n'), ((5000, 5017), 'numpy.zeros_like', 'np.zeros_like', (['EI'], {}), '(EI)\n', (5013, 5017), True, 'import numpy as np\n'), ((4872, 4907), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'idata.shape'}), '(size=idata.shape)\n', (4889, 4907), True, 'import numpy as np\n')] |
# Generated by Django 3.2.5 on 2021-07-26 18:46
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myhood', '0009_business_created_on'),
]
operations = [
migrations.RemoveField(
model_name='neighborhood',
name='occupants_count',
),
]
| [
"django.db.migrations.RemoveField"
] | [((227, 300), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""neighborhood"""', 'name': '"""occupants_count"""'}), "(model_name='neighborhood', name='occupants_count')\n", (249, 300), False, 'from django.db import migrations\n')] |
import collections
import subprocess
class Runner(object):
"""Base object to run linters."""
_runners = collections.defaultdict(lambda: ByFileRunner)
def __init__(self):
"""Runner constructor"""
pass
@classmethod
def new_runner(cls, name):
"""Return an instance of a Runner specified by name
:param name: name of a registered runner.
:return: an instance of the specified runner, the default one if not
found.
"""
return cls._runners[name]()
def run(self, *args, **kwargs):
"""Run the linter."""
raise NotImplementedError(
"%s.%s must override run()." % (self.__class__.__module__,
self.__class__.__name__))
class ByFileRunner(Runner):
def __init__(self):
super(ByFileRunner, self).__init__()
def _execute(self, cmd, files, cb=None):
"""Execute and collect the results of the linter execution on the files.
There is no timeout here, the method will wait till the execution of
cmd returns.
:param cmd, list of str as Popen receives to run a program. The path to
the file will be replaced in the list if the keyword
'%file_path%' appears, otherwise the path will be append to
the list.
:param files, list of str with the path to the files to be linted.
:param cb: If not None, will call the callback for each tuple
(returncode, stdout, stderr).
:return an ordered dict with one entry for each file in the files list,
as value it will contain the exit code of the linter, the
stdout and the stderr."""
ret = collections.OrderedDict()
need_replace = False
for c in cmd:
if '%file_path%' in c:
need_replace = True
break
for f in files:
if need_replace:
command = [s.replace('%file_path%', f) for s in cmd]
else:
command = list(cmd)
command.append(f)
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
result = (p.returncode, stdout, stderr)
if cb is not None:
cb(*result)
ret[f] = result
return ret
def run(self, linter_configuration, files, cb):
"""Run the linter specified at linter_configuration.
:param linter_configuration: dict, Linter configuration, parsed from
autolint configuration file.
:param files: iterable of files to be linted at this run.
:param cb: callable to be called after every run of the linter, passed
to self._execute.
:return see self.execute return.
"""
cmd = [linter_configuration['cmd']]
if 'flags' in linter_configuration:
for flag in linter_configuration['flags']:
cmd.append(flag)
return self._execute(cmd, files, cb)
| [
"subprocess.Popen",
"collections.OrderedDict",
"collections.defaultdict"
] | [((114, 160), 'collections.defaultdict', 'collections.defaultdict', (['(lambda : ByFileRunner)'], {}), '(lambda : ByFileRunner)\n', (137, 160), False, 'import collections\n'), ((1783, 1808), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (1806, 1808), False, 'import collections\n'), ((2181, 2254), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (2197, 2254), False, 'import subprocess\n')] |
from django.db import models
# Create your models here.
class TestModel(models.Model):
test_field = models.IntegerField(default=0)
| [
"django.db.models.IntegerField"
] | [((106, 136), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (125, 136), False, 'from django.db import models\n')] |
import functools
from typing import Callable, TypeVar
import jax
import jax.numpy as jnp
def scan(f, s, as_):
bs = []
for a in as_:
s, b = f(s, a)
bs.append(b)
return s, jnp.concatenate(bs)
KwArg = TypeVar('KwArg')
@functools.partial(jax.jit, static_argnums=(0, 1, 2, 3, 4, 5, 6, 7))
def _runge_kutta_4(f: Callable[[float, jnp.ndarray, KwArg], jnp.ndarray],
step_size,
num_steps,
dampening_rate,
lyapunov_scale,
clip,
unconstrain_fn,
constrain_fn,
rng_key: jnp.ndarray,
y0: jnp.ndarray,
**kwargs: KwArg):
def step(t, y, **kwargs):
k1 = clip(step_size * f(t, y, **kwargs))
k2 = clip(step_size * f(t + step_size / 2, y + k1 / 2, **kwargs))
k3 = clip(step_size * f(t + step_size / 2, y + k2 / 2, **kwargs))
k4 = clip(step_size * f(t + step_size, y + k3, **kwargs))
dy = clip((k1 + 2 * k2 + 2 * k3 + k4) / 6)
return y + dy
k1, rng_key = jax.random.split(rng_key)
nkwargs = {}
for kwa, kwv in kwargs.items():
k1, rng_key = jax.random.split(rng_key)
kwn = jax.random.normal(k1, jnp.shape(kwv)) * lyapunov_scale
nkwargs[kwa] = constrain_fn(kwa, unconstrain_fn(kwa, kwv) + kwn)
def body_fn(s, i):
y, rng_key, lyapunov_loss = s
t = i * step_size
k1, rng_key = jax.random.split(rng_key)
# noise = jax.random.normal(k1, jnp.shape(y)) * lyapunov_scale
# ly_prev = constrain_fn('y', unconstrain_fn('y', y) + noise)
# ly = step(t, ly_prev, **nkwargs)
y_und = step(t, y, **kwargs)
y = (1 - dampening_rate) * jax.lax.stop_gradient(y_und) + dampening_rate * y_und
# ll = jnp.sum(jnp.abs(y - ly)) / jnp.sum(jnp.abs(noise))
lyapunov_loss = 0.0 # lyapunov_loss + jnp.maximum(0.0, jnp.log(ll))
return ((y, rng_key, lyapunov_loss), y)
s = (y0, rng_key, jnp.array(0.))
(_, _, lyapunov_loss), res = jax.lax.scan(body_fn, s, jnp.arange(num_steps))
return res, lyapunov_loss
def runge_kutta_4(f: Callable[[float, jnp.ndarray], jnp.ndarray], step_size=0.1, num_steps=10, dampening_rate=0.9,
lyapunov_scale=1e-3,
clip=lambda x: x, unconstrain_fn=lambda k, v: v, constrain_fn=lambda k, v: v):
return functools.partial(_runge_kutta_4, f, step_size, num_steps, dampening_rate,
lyapunov_scale, clip, unconstrain_fn, constrain_fn)
| [
"jax.numpy.concatenate",
"jax.numpy.arange",
"jax.lax.stop_gradient",
"jax.numpy.array",
"jax.numpy.shape",
"functools.partial",
"typing.TypeVar",
"jax.random.split"
] | [((231, 247), 'typing.TypeVar', 'TypeVar', (['"""KwArg"""'], {}), "('KwArg')\n", (238, 247), False, 'from typing import Callable, TypeVar\n'), ((251, 318), 'functools.partial', 'functools.partial', (['jax.jit'], {'static_argnums': '(0, 1, 2, 3, 4, 5, 6, 7)'}), '(jax.jit, static_argnums=(0, 1, 2, 3, 4, 5, 6, 7))\n', (268, 318), False, 'import functools\n'), ((1115, 1140), 'jax.random.split', 'jax.random.split', (['rng_key'], {}), '(rng_key)\n', (1131, 1140), False, 'import jax\n'), ((2434, 2564), 'functools.partial', 'functools.partial', (['_runge_kutta_4', 'f', 'step_size', 'num_steps', 'dampening_rate', 'lyapunov_scale', 'clip', 'unconstrain_fn', 'constrain_fn'], {}), '(_runge_kutta_4, f, step_size, num_steps, dampening_rate,\n lyapunov_scale, clip, unconstrain_fn, constrain_fn)\n', (2451, 2564), False, 'import functools\n'), ((201, 220), 'jax.numpy.concatenate', 'jnp.concatenate', (['bs'], {}), '(bs)\n', (216, 220), True, 'import jax.numpy as jnp\n'), ((1216, 1241), 'jax.random.split', 'jax.random.split', (['rng_key'], {}), '(rng_key)\n', (1232, 1241), False, 'import jax\n'), ((1494, 1519), 'jax.random.split', 'jax.random.split', (['rng_key'], {}), '(rng_key)\n', (1510, 1519), False, 'import jax\n'), ((2044, 2058), 'jax.numpy.array', 'jnp.array', (['(0.0)'], {}), '(0.0)\n', (2053, 2058), True, 'import jax.numpy as jnp\n'), ((2117, 2138), 'jax.numpy.arange', 'jnp.arange', (['num_steps'], {}), '(num_steps)\n', (2127, 2138), True, 'import jax.numpy as jnp\n'), ((1278, 1292), 'jax.numpy.shape', 'jnp.shape', (['kwv'], {}), '(kwv)\n', (1287, 1292), True, 'import jax.numpy as jnp\n'), ((1776, 1804), 'jax.lax.stop_gradient', 'jax.lax.stop_gradient', (['y_und'], {}), '(y_und)\n', (1797, 1804), False, 'import jax\n')] |
#!/usr/bin/env python3
from __future__ import print_function
import sys
import errno
import statistics
import serial
from binho.utils import log_silent, log_verbose, binhoArgumentParser
from binho.errors import DeviceNotFoundError, CapabilityError
def main():
# Set up a simple argument parser.
parser = binhoArgumentParser(description="utility for reading from Binho host adapter's ADC")
parser.add_argument(
"-f",
"--format",
dest="format",
type=str,
default="voltage",
choices=["voltage", "raw"],
help="Format to output in.\nVoltage string, or raw fraction returned by the ADC.",
)
parser.add_argument(
"-s", "--samples", dest="sample_count", type=int, default=1, help="The number of samples to read. (default: 1)",
)
parser.add_argument("-n", "--iopin", default=0, help="Use the given IO pin number for the ADC input")
args = parser.parse_args()
log_function = log_verbose if args.verbose else log_silent
try:
log_function("Trying to find a Binho host adapter...")
device = parser.find_specified_device()
if device.inBootloaderMode:
print(
"{} found on {}, but it cannot be used now because it's in DFU mode".format(
device.productName, device.commPort
)
)
sys.exit(errno.ENODEV)
elif device.inDAPLinkMode:
print(
"{} found on {}, but it cannot be used now because it's in DAPlink mode".format(
device.productName, device.commPort
)
)
print("Tip: Exit DAPLink mode using 'binho daplink -q' command")
sys.exit(errno.ENODEV)
else:
log_function("{} found on {}. (Device ID: {})".format(device.productName, device.commPort, device.deviceID))
except serial.SerialException:
print(
"The target Binho host adapter was found, but failed to connect because another application already has an\
open connection to it."
)
print("Please close the connection in the other application and try again.")
sys.exit(errno.ENODEV)
except DeviceNotFoundError:
if args.serial:
print(
"No Binho host adapter found matching Device ID '{}'.".format(args.serial), file=sys.stderr,
)
else:
print("No Binho host adapter found!", file=sys.stderr)
sys.exit(errno.ENODEV)
# if we fail before here, no connection to the device was opened yet.
# however, if we fail after this point, we need to make sure we don't
# leave the serial port open.
try:
adcPin = {}
if args.iopin:
if args.iopin.isnumeric():
adcPin = "IO" + str(args.iopin)
else:
adcPin = args.iopin.upper()
else:
adcPin = device.adc.getDefaultADCPin()
if args.sample_count == 0:
raise CapabilityError("Cannot take 0 samples! Samples must be >= 1.")
if args.sample_count > 1:
log_function("Taking {} samples...".format(args.sample_count))
else:
log_function("Taking {} sample...".format(args.sample_count))
log_function("")
samples = []
for x in range(args.sample_count):
if args.format == "voltage":
sample = device.adc.readInputVoltage(adcPin)
log_function("[{}] ADC channel {} reads {} Volts".format(x + 1, adcPin, sample))
else:
sample = device.adc.readInputRaw(adcPin)
log_function("[{}] ADC channel {} reads {}".format(x + 1, adcPin, sample))
samples.append(sample)
log_function("")
if args.format == "voltage":
log_function(
"Stats: Min = {} V, Mean = {} V, Max = {} V, Range = {} V (n = {})".format(
min(samples),
statistics.mean(samples),
max(samples),
"%.3f" % (max(samples) - min(samples)),
len(samples),
)
)
else:
log_function(
"Stats: Min = {}, Mean = {}, Max = {}, Range = {} (n = {})".format(
min(samples),
statistics.mean(samples),
max(samples),
"%.3f" % (max(samples) - min(samples)),
len(samples),
)
)
finally:
# close the connection to the host adapter
device.close()
if __name__ == "__main__":
main()
| [
"statistics.mean",
"binho.errors.CapabilityError",
"binho.utils.binhoArgumentParser",
"sys.exit"
] | [((318, 407), 'binho.utils.binhoArgumentParser', 'binhoArgumentParser', ([], {'description': '"""utility for reading from Binho host adapter\'s ADC"""'}), '(description=\n "utility for reading from Binho host adapter\'s ADC")\n', (337, 407), False, 'from binho.utils import log_silent, log_verbose, binhoArgumentParser\n'), ((1387, 1409), 'sys.exit', 'sys.exit', (['errno.ENODEV'], {}), '(errno.ENODEV)\n', (1395, 1409), False, 'import sys\n'), ((2209, 2231), 'sys.exit', 'sys.exit', (['errno.ENODEV'], {}), '(errno.ENODEV)\n', (2217, 2231), False, 'import sys\n'), ((2520, 2542), 'sys.exit', 'sys.exit', (['errno.ENODEV'], {}), '(errno.ENODEV)\n', (2528, 2542), False, 'import sys\n'), ((3048, 3111), 'binho.errors.CapabilityError', 'CapabilityError', (['"""Cannot take 0 samples! Samples must be >= 1."""'], {}), "('Cannot take 0 samples! Samples must be >= 1.')\n", (3063, 3111), False, 'from binho.errors import DeviceNotFoundError, CapabilityError\n'), ((1739, 1761), 'sys.exit', 'sys.exit', (['errno.ENODEV'], {}), '(errno.ENODEV)\n', (1747, 1761), False, 'import sys\n'), ((4041, 4065), 'statistics.mean', 'statistics.mean', (['samples'], {}), '(samples)\n', (4056, 4065), False, 'import statistics\n'), ((4405, 4429), 'statistics.mean', 'statistics.mean', (['samples'], {}), '(samples)\n', (4420, 4429), False, 'import statistics\n')] |
from viper import *
import inspect
def GetSource(func):
lines = inspect.getsource(func)
print(lines) | [
"inspect.getsource"
] | [((69, 92), 'inspect.getsource', 'inspect.getsource', (['func'], {}), '(func)\n', (86, 92), False, 'import inspect\n')] |
""" django models utilities"""
from django.db import models
class CRideModel(models.Model):
""" Comparte Ride base model
CRideModel acts as an abstract base class from which every
other model in the project will inherit. This class provides
every table with the following attributes:
+ created (Datetime): Store the datetime to the object was created
+ updated (Datetime): Store the last datetime to the object was modified
"""
created = models.DateTimeField(
'created at',
auto_now_add=True,
help_text='Date time on which object was created'
)
modified = models.DateTimeField(
'updated at',
auto_now=True,
help_text='Date time on which the object was last modified'
)
class Meta:
"""Meta options."""
abstract = True
get_latest_by = 'created'
ordering = ['-created', '-modified']
| [
"django.db.models.DateTimeField"
] | [((484, 593), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""created at"""'], {'auto_now_add': '(True)', 'help_text': '"""Date time on which object was created"""'}), "('created at', auto_now_add=True, help_text=\n 'Date time on which object was created')\n", (504, 593), False, 'from django.db import models\n'), ((635, 750), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""updated at"""'], {'auto_now': '(True)', 'help_text': '"""Date time on which the object was last modified"""'}), "('updated at', auto_now=True, help_text=\n 'Date time on which the object was last modified')\n", (655, 750), False, 'from django.db import models\n')] |
import boto3
import json
import uuid
from datetime import datetime
import logging
# Update the root logger to get messages at DEBUG and above
logging.getLogger().setLevel(logging.DEBUG)
logging.getLogger("botocore").setLevel(logging.CRITICAL)
logging.getLogger("boto3").setLevel(logging.CRITICAL)
logging.getLogger("urllib3").setLevel(logging.CRITICAL)
def handler(event, context):
"""Provide an event that contains the following keys:
- operation: one of the operations in the operations dict below
- tableName: required for operations that interact with DynamoDB
- payload: a parameter to pass to the operation being performed
"""
logging.info("Received event: " + json.dumps(event, indent=2))
operation = event["operation"]
if "tableName" in event:
dynamo = boto3.resource("dynamodb").Table(event["tableName"])
if operation == "create":
event["payload"]["Item"]["UUID"] = str(uuid.uuid4())
if "timestampField" in event:
if operation == "create":
event["payload"]["Item"][event["timestampField"]] = datetime.now().strftime("%Y-%m-%d,%H:%M")
elif operation == "update":
event["payload"].update({"AttributeUpdates": { event["timestampField"]: {"Value": datetime.now().strftime("%Y-%m-%d,%H:%M")}}})
operations = {
"create": lambda x: dynamo.put_item(**x),
"read": lambda x: dynamo.get_item(**x),
"update": lambda x: dynamo.update_item(**x),
"delete": lambda x: dynamo.delete_item(**x),
"list": lambda x: dynamo.scan(**x),
"echo": lambda x: x,
"ping": lambda x: "pong"
}
if operation in operations:
return operations[operation](event.get("payload"))
else:
raise ValueError(f"Unrecognized operation {operation}")
| [
"logging.getLogger",
"json.dumps",
"uuid.uuid4",
"datetime.datetime.now",
"boto3.resource"
] | [((143, 162), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (160, 162), False, 'import logging\n'), ((187, 216), 'logging.getLogger', 'logging.getLogger', (['"""botocore"""'], {}), "('botocore')\n", (204, 216), False, 'import logging\n'), ((244, 270), 'logging.getLogger', 'logging.getLogger', (['"""boto3"""'], {}), "('boto3')\n", (261, 270), False, 'import logging\n'), ((298, 326), 'logging.getLogger', 'logging.getLogger', (['"""urllib3"""'], {}), "('urllib3')\n", (315, 326), False, 'import logging\n'), ((701, 728), 'json.dumps', 'json.dumps', (['event'], {'indent': '(2)'}), '(event, indent=2)\n', (711, 728), False, 'import json\n'), ((944, 956), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (954, 956), False, 'import uuid\n'), ((813, 839), 'boto3.resource', 'boto3.resource', (['"""dynamodb"""'], {}), "('dynamodb')\n", (827, 839), False, 'import boto3\n'), ((1091, 1105), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1103, 1105), False, 'from datetime import datetime\n'), ((1263, 1277), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1275, 1277), False, 'from datetime import datetime\n')] |
#!/usr/bin/env python3
from argparse import ArgumentParser, ArgumentError
import re, sys
# Define a custom argument type `interval_int` to properly parse arguments
# https://docs.python.org/3/library/argparse.html#type
def interval_int(arg):
"""
Validate given interval and return as list
"""
pattern = re.compile("^\s*\[?\s*(-?\s*(\d\s*)+,\s*-?\s*(\d\s*)+)\]?\s*$")
match = pattern.match(arg)
if not match:
raise ArgumentError(None, f"argument interval: invalid interval value: '{arg}'")
# Convert comma-separated list (of strings) to list of integers
arg = match.group(1).replace(" ", "")
return sorted([int(i) for i in arg.split(",")])
def merge(intervals):
"""
Merges probably overlapping intervals into non-overlapping intervals.
"""
merged_interval = None
merged_intervals = []
for i, current_interval in enumerate(sorted(intervals)):
# First iteration
if merged_interval is None:
merged_interval = current_interval
# Current interval overlaps with the previous(ly merged) interval(s)
if current_interval[0] <= merged_interval[1]:
merged_interval[1] = max(current_interval[1], merged_interval[1])
# Current interval doesn't overlap with previous(ly merged) inverval(s)
# As intervals are sorted by the interval's lower limit, no other interval at a higher index will.
# Thus the previous(ly merged) inverval(s) are "complete".
else:
merged_intervals.append(merged_interval)
merged_interval = current_interval
# Last iteration
if i == len(intervals) - 1:
merged_intervals.append(merged_interval)
return merged_intervals
if __name__ == '__main__':
# argparse has issues with parameters starting with a negative integer value,
# thus a little workaround is required (by adding a space in front)
# https://stackoverflow.com/questions/9025204/python-argparse-issue-with-optional-arguments-which-are-negative-numbers
for i, arg in enumerate(sys.argv):
if (arg[0] == '-') and arg[1].isdigit(): sys.argv[i] = ' ' + arg
# Define and parse arguments
parser = ArgumentParser(description='Merge probably overlapping intervals into non-overlapping intervals.')
parser.add_argument('intervals', metavar='interval', type=interval_int, nargs='+',
help='list of intervals to merge (example: -1,3 3,9)')
parser.add_argument('--verbose', action='store_true', help='Print merge intervals to stdout')
args = parser.parse_args()
# Merge intervals
merged_intervals = merge(args.intervals)
if args.verbose:
print(merged_intervals)
| [
"argparse.ArgumentError",
"argparse.ArgumentParser",
"re.compile"
] | [((322, 397), 're.compile', 're.compile', (['"""^\\\\s*\\\\[?\\\\s*(-?\\\\s*(\\\\d\\\\s*)+,\\\\s*-?\\\\s*(\\\\d\\\\s*)+)\\\\]?\\\\s*$"""'], {}), "('^\\\\s*\\\\[?\\\\s*(-?\\\\s*(\\\\d\\\\s*)+,\\\\s*-?\\\\s*(\\\\d\\\\s*)+)\\\\]?\\\\s*$')\n", (332, 397), False, 'import re, sys\n'), ((2220, 2323), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Merge probably overlapping intervals into non-overlapping intervals."""'}), "(description=\n 'Merge probably overlapping intervals into non-overlapping intervals.')\n", (2234, 2323), False, 'from argparse import ArgumentParser, ArgumentError\n'), ((450, 524), 'argparse.ArgumentError', 'ArgumentError', (['None', 'f"""argument interval: invalid interval value: \'{arg}\'"""'], {}), '(None, f"argument interval: invalid interval value: \'{arg}\'")\n', (463, 524), False, 'from argparse import ArgumentParser, ArgumentError\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[8]:
import requests
import re
import time
import json
def get_one_page(url):
# 根据源码分析,构造请求头
headers = {
# 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) '
# 'Chrome/52.0.2743.116 Safari/537.36'
"authoration":"apicode","apicode":"2b33b36fb2564ecaaac6ae66226e995f"
}
response = requests.get(url, headers = headers)
if response.status_code == 200:
return response.content.decode(encoding='utf-8')
return None
url = 'https://api.yonyoucloud.com/apis/dst/ncov/wholeworld'
html = get_one_page(url)
print(html)
# In[15]:
import json
b = json.dumps(html)
f2 = open('json2.json', 'w',encoding='utf-8')
f2.write(b)
f2.close()
# In[14]:
import json
f = open('global_epidemic_statistics.json.json', 'w',encoding='utf-8')
f.write(html)
f.close()
# In[ ]:
| [
"json.dumps",
"requests.get"
] | [((708, 724), 'json.dumps', 'json.dumps', (['html'], {}), '(html)\n', (718, 724), False, 'import json\n'), ((432, 466), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (444, 466), False, 'import requests\n')] |
# included from libs/mincostflow.py
"""
Min Cost Flow
"""
# derived: https://atcoder.jp/contests/practice2/submissions/16726003
from heapq import heappush, heappop
class MinCostFlow():
def __init__(self, n):
self.n = n
self.graph = [[] for _ in range(n)]
self.pos = []
def add_edge(self, fr, to, cap, cost):
#assert 0 <= fr < self.n
#assert 0 <= to < self.n
m = len(self.pos)
self.pos.append((fr, len(self.graph[fr])))
self.graph[fr].append([to, len(self.graph[to]), cap, cost])
self.graph[to].append([fr, len(self.graph[fr]) - 1, 0, -cost])
return m
def get_edge(self, idx):
#assert 0 <= idx < len(self.pos)
to, rev, cap, cost = self.graph[self.pos[idx][0]][self.pos[idx][1]]
_rev_to, _rev_rev, rev_cap, _rev_cost = self.graph[to][rev]
return self.pos[idx][0], to, cap + rev_cap, rev_cap, cost
def edges(self):
for i in range(len(self.pos)):
yield self.get_edge(i)
def dual_ref(self, s, t):
dist = [2**63 - 1] * self.n
dist[s] = 0
vis = [0] * self.n
self.pv = [-1] * self.n
self.pe = [-1] * self.n
queue = []
heappush(queue, (0, s))
while queue:
k, v = heappop(queue)
if vis[v]:
continue
vis[v] = True
if v == t:
break
for i in range(len(self.graph[v])):
to, _rev, cap, cost = self.graph[v][i]
if vis[to] or cap == 0:
continue
cost += self.dual[v] - self.dual[to]
if dist[to] - dist[v] > cost:
dist[to] = dist[v] + cost
self.pv[to] = v
self.pe[to] = i
heappush(queue, (dist[to], to))
if not vis[t]:
return False
for v in range(self.n):
if not vis[v]:
continue
self.dual[v] -= dist[t] - dist[v]
return True
def flow(self, s, t):
return self.flow_with_limit(s, t, 2**63 - 1)
def flow_with_limit(self, s, t, limit):
return self.slope_with_limit(s, t, limit)[-1]
def slope(self, s, t):
return self.slope_with_limit(s, t, 2**63 - 1)
def slope_with_limit(self, s, t, limit):
#assert 0 <= s < self.n
#assert 0 <= t < self.n
#assert s != t
flow = 0
cost = 0
prev_cost = -1
res = [(flow, cost)]
self.dual = [0] * self.n
while flow < limit:
if not self.dual_ref(s, t):
break
c = limit - flow
v = t
while v != s:
c = min(c, self.graph[self.pv[v]][self.pe[v]][2])
v = self.pv[v]
v = t
while v != s:
_to, rev, _cap, _ = self.graph[self.pv[v]][self.pe[v]]
self.graph[self.pv[v]][self.pe[v]][2] -= c
self.graph[v][rev][2] += c
v = self.pv[v]
d = -self.dual[s]
flow += c
cost += c * d
if prev_cost == d:
res.pop()
res.append((flow, cost))
prev_cost = cost
return res
# end of libs/mincostflow.py
# included from snippets/main.py
def debug(*x, msg=""):
import sys
print(msg, *x, file=sys.stderr)
def solve(N, M, AS, BS, RS):
global mcf
INF = 10 ** 5
mcf = MinCostFlow(N + 5)
start = N
goal = N + 1
round = N + 2
for i in range(3):
mcf.add_edge(start, round + i, M, 0)
for i in range(3):
for j in range(N):
r = AS[j] * (BS[j] ** (i + 1)) % RS[i]
mcf.add_edge(round + i, j, 1, INF - r)
for j in range(N):
cs = [AS[j] * (BS[j] ** (k + 1)) for k in range(3)]
cs.append(0)
for k in range(3):
c = cs[k] - cs[k-1]
mcf.add_edge(j, goal, 1, c)
return INF * (3 * M) - mcf.flow(start, goal)[-1]
def main():
# parse input
N, M = map(int, input().split())
AS = list(map(int, input().split()))
BS = list(map(int, input().split()))
RS = list(map(int, input().split()))
print(solve(N, M, AS, BS, RS))
# tests
T1 = """
2 1
3 2
3 3
100000 100000 100000
"""
TEST_T1 = """
>>> as_input(T1)
>>> main()
81
"""
T2 = """
4 2
2 4 3 3
4 2 3 3
100000 100000 100000
"""
TEST_T2 = """
>>> as_input(T2)
>>> main()
210
"""
T3 = """
20 19
3 2 3 4 3 3 2 3 2 2 3 3 4 3 2 4 4 3 3 4
2 3 4 2 4 3 3 2 4 2 4 3 3 2 3 4 4 4 2 2
3 4 5
"""
TEST_T3 = """
>>> as_input(T3)
>>> main()
-1417
"""
def _test():
import doctest
doctest.testmod()
g = globals()
for k in sorted(g):
if k.startswith("TEST_"):
print(k)
doctest.run_docstring_examples(g[k], g, name=k)
def as_input(s):
"use in test, use given string as input file"
import io
f = io.StringIO(s.strip())
g = globals()
g["input"] = lambda: bytes(f.readline(), "ascii")
g["read"] = lambda: bytes(f.read(), "ascii")
if __name__ == "__main__":
import sys
input = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
sys.setrecursionlimit(10 ** 6)
if sys.argv[-1] == "-t":
print("testing")
_test()
sys.exit()
main()
sys.exit()
# end of snippets/main.py
| [
"sys.setrecursionlimit",
"heapq.heappop",
"doctest.testmod",
"sys.exit",
"heapq.heappush",
"doctest.run_docstring_examples"
] | [((4699, 4716), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (4714, 4716), False, 'import doctest\n'), ((5228, 5258), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10 ** 6)'], {}), '(10 ** 6)\n', (5249, 5258), False, 'import sys\n'), ((5363, 5373), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5371, 5373), False, 'import sys\n'), ((1226, 1249), 'heapq.heappush', 'heappush', (['queue', '(0, s)'], {}), '(queue, (0, s))\n', (1234, 1249), False, 'from heapq import heappush, heappop\n'), ((5337, 5347), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5345, 5347), False, 'import sys\n'), ((1290, 1304), 'heapq.heappop', 'heappop', (['queue'], {}), '(queue)\n', (1297, 1304), False, 'from heapq import heappush, heappop\n'), ((4826, 4873), 'doctest.run_docstring_examples', 'doctest.run_docstring_examples', (['g[k]', 'g'], {'name': 'k'}), '(g[k], g, name=k)\n', (4856, 4873), False, 'import doctest\n'), ((1833, 1864), 'heapq.heappush', 'heappush', (['queue', '(dist[to], to)'], {}), '(queue, (dist[to], to))\n', (1841, 1864), False, 'from heapq import heappush, heappop\n')] |
# Generated by Django 3.1.12 on 2021-07-13 00:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('reo', '0111_auto_20210708_2144'),
]
operations = [
migrations.RenameField(
model_name='sitemodel',
old_name='year_one_emissions_from_elec_grid_purchase',
new_name='year_one_CO2_emissions_from_elec_grid_purchase',
),
migrations.RenameField(
model_name='sitemodel',
old_name='year_one_emissions_from_elec_grid_purchase_bau',
new_name='year_one_CO2_emissions_from_elec_grid_purchase_bau',
),
migrations.RenameField(
model_name='sitemodel',
old_name='year_one_emissions_from_fuelburn',
new_name='year_one_CO2_emissions_from_fuelburn',
),
migrations.RenameField(
model_name='sitemodel',
old_name='year_one_emissions_from_fuelburn_bau',
new_name='year_one_CO2_emissions_from_fuelburn_bau',
),
migrations.RenameField(
model_name='sitemodel',
old_name='year_one_emissions_offset_from_elec_exports',
new_name='year_one_CO2_emissions_offset_from_elec_exports',
),
migrations.RenameField(
model_name='sitemodel',
old_name='year_one_emissions_offset_from_elec_exports_bau',
new_name='year_one_CO2_emissions_offset_from_elec_exports_bau',
),
]
| [
"django.db.migrations.RenameField"
] | [((224, 394), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""sitemodel"""', 'old_name': '"""year_one_emissions_from_elec_grid_purchase"""', 'new_name': '"""year_one_CO2_emissions_from_elec_grid_purchase"""'}), "(model_name='sitemodel', old_name=\n 'year_one_emissions_from_elec_grid_purchase', new_name=\n 'year_one_CO2_emissions_from_elec_grid_purchase')\n", (246, 394), False, 'from django.db import migrations\n'), ((441, 619), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""sitemodel"""', 'old_name': '"""year_one_emissions_from_elec_grid_purchase_bau"""', 'new_name': '"""year_one_CO2_emissions_from_elec_grid_purchase_bau"""'}), "(model_name='sitemodel', old_name=\n 'year_one_emissions_from_elec_grid_purchase_bau', new_name=\n 'year_one_CO2_emissions_from_elec_grid_purchase_bau')\n", (463, 619), False, 'from django.db import migrations\n'), ((666, 816), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""sitemodel"""', 'old_name': '"""year_one_emissions_from_fuelburn"""', 'new_name': '"""year_one_CO2_emissions_from_fuelburn"""'}), "(model_name='sitemodel', old_name=\n 'year_one_emissions_from_fuelburn', new_name=\n 'year_one_CO2_emissions_from_fuelburn')\n", (688, 816), False, 'from django.db import migrations\n'), ((863, 1021), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""sitemodel"""', 'old_name': '"""year_one_emissions_from_fuelburn_bau"""', 'new_name': '"""year_one_CO2_emissions_from_fuelburn_bau"""'}), "(model_name='sitemodel', old_name=\n 'year_one_emissions_from_fuelburn_bau', new_name=\n 'year_one_CO2_emissions_from_fuelburn_bau')\n", (885, 1021), False, 'from django.db import migrations\n'), ((1068, 1240), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""sitemodel"""', 'old_name': '"""year_one_emissions_offset_from_elec_exports"""', 'new_name': '"""year_one_CO2_emissions_offset_from_elec_exports"""'}), "(model_name='sitemodel', old_name=\n 'year_one_emissions_offset_from_elec_exports', new_name=\n 'year_one_CO2_emissions_offset_from_elec_exports')\n", (1090, 1240), False, 'from django.db import migrations\n'), ((1287, 1467), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""sitemodel"""', 'old_name': '"""year_one_emissions_offset_from_elec_exports_bau"""', 'new_name': '"""year_one_CO2_emissions_offset_from_elec_exports_bau"""'}), "(model_name='sitemodel', old_name=\n 'year_one_emissions_offset_from_elec_exports_bau', new_name=\n 'year_one_CO2_emissions_offset_from_elec_exports_bau')\n", (1309, 1467), False, 'from django.db import migrations\n')] |
from setuptools import setup
if __name__ == '__main__':
setup(name='foo', version='1.0.0')
| [
"setuptools.setup"
] | [((60, 94), 'setuptools.setup', 'setup', ([], {'name': '"""foo"""', 'version': '"""1.0.0"""'}), "(name='foo', version='1.0.0')\n", (65, 94), False, 'from setuptools import setup\n')] |
import time
import requests
from core.utils.parser import Parser
from core.utils.helpers import Helpers
from core.models.plugin import BasePlugin
class HIBP(BasePlugin):
def __init__(self, args):
self.args = args
self.base_url = "https://haveibeenpwned.com/api/v2/breachedaccount"
self.url_parameters = "truncateResponse=true&includeUnverified=true"
def execute(self, data):
Helpers.print_warning("Starting Have I Been Pwned plugin...", jumpline=True)
all_emails = Parser(self.args).all_unique_emails(data)
if all_emails:
self.check_all_emails(all_emails)
return True
return False
def check_authors(self, authors):
for author in authors:
time.sleep(2)
self.check_email(author.email)
def check_all_emails(self, emails):
for email in emails:
time.sleep(2)
self.check_email(email)
def check_email(self, email):
try:
url = "{}/{}?{}".format(self.base_url, email, self.url_parameters)
r = requests.get(url)
if r.status_code == 503:
Helpers.print_error("hibp: IP got in DDoS protection by CloudFare")
elif r.status_code == 429:
Helpers.print_error("hibp: Throttled by HIBP API")
elif r.text:
r = r.json()
print("\n{} leaks:".format(email))
for leak in r:
print("\t- {}".format(leak["Name"]))
return True
return False
except Exception as e:
Helpers.print_error(e)
return False
| [
"core.utils.parser.Parser",
"requests.get",
"time.sleep",
"core.utils.helpers.Helpers.print_error",
"core.utils.helpers.Helpers.print_warning"
] | [((419, 495), 'core.utils.helpers.Helpers.print_warning', 'Helpers.print_warning', (['"""Starting Have I Been Pwned plugin..."""'], {'jumpline': '(True)'}), "('Starting Have I Been Pwned plugin...', jumpline=True)\n", (440, 495), False, 'from core.utils.helpers import Helpers\n'), ((756, 769), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (766, 769), False, 'import time\n'), ((895, 908), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (905, 908), False, 'import time\n'), ((1088, 1105), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1100, 1105), False, 'import requests\n'), ((517, 534), 'core.utils.parser.Parser', 'Parser', (['self.args'], {}), '(self.args)\n', (523, 534), False, 'from core.utils.parser import Parser\n'), ((1159, 1226), 'core.utils.helpers.Helpers.print_error', 'Helpers.print_error', (['"""hibp: IP got in DDoS protection by CloudFare"""'], {}), "('hibp: IP got in DDoS protection by CloudFare')\n", (1178, 1226), False, 'from core.utils.helpers import Helpers\n'), ((1624, 1646), 'core.utils.helpers.Helpers.print_error', 'Helpers.print_error', (['e'], {}), '(e)\n', (1643, 1646), False, 'from core.utils.helpers import Helpers\n'), ((1282, 1332), 'core.utils.helpers.Helpers.print_error', 'Helpers.print_error', (['"""hibp: Throttled by HIBP API"""'], {}), "('hibp: Throttled by HIBP API')\n", (1301, 1332), False, 'from core.utils.helpers import Helpers\n')] |
from re import S
from flask import render_template, redirect, url_for, flash, request
from flask_paginate import Pagination, get_page_parameter
from flask_login import current_user
from flask_wtf import FlaskForm
from wtforms import SubmitField
from flask_babel import _, lazy_gettext as _l
from flask_login import current_user
from .models.purchase import Purchase, PurchaseSummary, PurchaseEntry
from .models.user import User
from flask import Blueprint
bp = Blueprint('purchases', __name__)
# Route to show a user's past purchases.
@bp.route('/purchases', methods=['GET', 'POST'])
def purchases():
# Set up pagination.
page = request.args.get(get_page_parameter(), type=int, default=1)
per_page = 10
start = (page - 1) * per_page
# If user is not logged in, redirect.
if not current_user.is_authenticated:
return redirect(url_for('users.login'))
seller_status = User.sellerStatus(current_user.id)
# Get list of past purchases.
past_purchases = Purchase._get_purchases(current_user.id)
pagination = Pagination(page=page, per_page=per_page, total=len(past_purchases), record_name='products')
return render_template('purchases.html', purchases=past_purchases[start: start + per_page], pagination=pagination, seller_status=seller_status)
# Route to show a specific order.
@bp.route('/individual-purchase<purchase_id>', methods=['GET', 'POST'])
def individual_purchase(purchase_id):
# If user is not logged in, redirect.
if not current_user.is_authenticated:
return redirect(url_for('users.login'))
seller_status = User.sellerStatus(current_user.id)
# Get all entries in the purchase and the total price.
purchase_entries = Purchase._get_individual_purchase(current_user.id, purchase_id)
total_price_paid = Purchase._get_total_purchase_cost(current_user.id, purchase_id)
total_price_paid = ('%.2f'%total_price_paid)
return render_template('individualPurchase.html', purchase_id=purchase_id, purchase_entries=purchase_entries, total_price_paid=total_price_paid, seller_status=seller_status) | [
"flask.render_template",
"flask_paginate.get_page_parameter",
"flask.Blueprint",
"flask.url_for"
] | [((463, 495), 'flask.Blueprint', 'Blueprint', (['"""purchases"""', '__name__'], {}), "('purchases', __name__)\n", (472, 495), False, 'from flask import Blueprint\n'), ((1168, 1307), 'flask.render_template', 'render_template', (['"""purchases.html"""'], {'purchases': 'past_purchases[start:start + per_page]', 'pagination': 'pagination', 'seller_status': 'seller_status'}), "('purchases.html', purchases=past_purchases[start:start +\n per_page], pagination=pagination, seller_status=seller_status)\n", (1183, 1307), False, 'from flask import render_template, redirect, url_for, flash, request\n'), ((1933, 2107), 'flask.render_template', 'render_template', (['"""individualPurchase.html"""'], {'purchase_id': 'purchase_id', 'purchase_entries': 'purchase_entries', 'total_price_paid': 'total_price_paid', 'seller_status': 'seller_status'}), "('individualPurchase.html', purchase_id=purchase_id,\n purchase_entries=purchase_entries, total_price_paid=total_price_paid,\n seller_status=seller_status)\n", (1948, 2107), False, 'from flask import render_template, redirect, url_for, flash, request\n'), ((659, 679), 'flask_paginate.get_page_parameter', 'get_page_parameter', ([], {}), '()\n', (677, 679), False, 'from flask_paginate import Pagination, get_page_parameter\n'), ((863, 885), 'flask.url_for', 'url_for', (['"""users.login"""'], {}), "('users.login')\n", (870, 885), False, 'from flask import render_template, redirect, url_for, flash, request\n'), ((1559, 1581), 'flask.url_for', 'url_for', (['"""users.login"""'], {}), "('users.login')\n", (1566, 1581), False, 'from flask import render_template, redirect, url_for, flash, request\n')] |
# Copyright (c) 2019 Works Applications Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import fileinput
import logging
import os
import sys
from . import __version__
from . import Dictionary, SplitMode
def _set_default_subparser(self, name, args=None):
"""
copy and modify code from https://bitbucket.org/ruamel/std.argparse
"""
subparser_found = False
for arg in sys.argv[1:]:
if arg in ['-h', '--help']: # global help if no subparser
break
else:
for x in self._subparsers._actions:
if not isinstance(x, argparse._SubParsersAction):
continue
for sp_name in x._name_parser_map.keys():
if sp_name in sys.argv[1:]:
subparser_found = True
if not subparser_found:
# insert default in first position, this implies no
# global options without a sub_parsers specified
if args is None:
sys.argv.insert(1, name)
else:
args.insert(0, name)
argparse.ArgumentParser.set_default_subparser = _set_default_subparser
def run(tokenizer, mode, input_, output, logger, print_all, enable_dump):
for line in input_:
line = line.rstrip('\n')
# Note: Current version of the tokenizer ignores logger
for m in tokenizer.tokenize(line, mode, logger if enable_dump else None):
list_info = [
m.surface(),
",".join(m.part_of_speech()),
m.normalized_form()]
if print_all:
list_info += [
m.dictionary_form(),
m.reading_form(),
str(m.dictionary_id()),
'[{}]'.format(','.join([str(synonym_group_id) for synonym_group_id in m.synonym_group_ids()]))]
if m.is_oov():
list_info.append("(OOV)")
output.write("\t".join(list_info))
output.write("\n")
output.write("EOS\n")
def _input_files_checker(args, print_usage):
for file in args.in_files:
if not os.path.exists(file):
print_usage()
print('{}: error: {} doesn\'t exist'.format(
__name__, file), file=sys.stderr)
exit(1)
def _command_tokenize(args, print_usage):
if args.version:
print_version()
return
_input_files_checker(args, print_usage)
if args.mode == "A":
mode = SplitMode.A
elif args.mode == "B":
mode = SplitMode.B
else:
mode = SplitMode.C
output = sys.stdout
if args.fpath_out:
output = open(args.fpath_out, "w", encoding="utf-8")
stdout_logger = logging.getLogger(__name__)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
stdout_logger.addHandler(handler)
stdout_logger.setLevel(logging.DEBUG)
stdout_logger.propagate = False
print_all = args.a
enable_dump = args.d
try:
dict_ = Dictionary(config_path=args.fpath_setting,
dict_type=args.system_dict_type)
tokenizer_obj = dict_.create()
input_ = fileinput.input(
args.in_files, openhook=fileinput.hook_encoded("utf-8"))
run(tokenizer_obj, mode, input_, output,
stdout_logger, print_all, enable_dump)
finally:
if args.fpath_out:
output.close()
def _command_build(args, print_usage):
raise NotImplementedError(
"Build dictionary feature is not yet implemented. Please use sudachipy<0.6.")
def _command_user_build(args, print_usage):
raise NotImplementedError(
"Build dictionary feature is not yet implemented. Please use sudachipy<0.6.")
def print_version():
print('sudachipy {}'.format(__version__))
def main():
parser = argparse.ArgumentParser(
description="Japanese Morphological Analyzer")
subparsers = parser.add_subparsers(description='')
# root, tokenizer parser
parser_tk = subparsers.add_parser(
'tokenize', help='(default) see `tokenize -h`', description='Tokenize Text')
parser_tk.add_argument("-r", dest="fpath_setting",
metavar="file", help="the setting file in JSON format")
parser_tk.add_argument(
"-m", dest="mode", choices=["A", "B", "C"], default="C", help="the mode of splitting")
parser_tk.add_argument("-o", dest="fpath_out",
metavar="file", help="the output file")
parser_tk.add_argument("-s", dest="system_dict_type", metavar='string', choices=["small", "core", "full"],
help="sudachidict type")
parser_tk.add_argument("-a", action="store_true",
help="print all of the fields")
parser_tk.add_argument("-d", action="store_true",
help="print the debug information")
parser_tk.add_argument("-v", "--version", action="store_true",
dest="version", help="print sudachipy version")
parser_tk.add_argument("in_files", metavar="file",
nargs=argparse.ZERO_OR_MORE, help='text written in utf-8')
parser_tk.set_defaults(handler=_command_tokenize,
print_usage=parser_tk.print_usage)
# build dictionary parser
parser_bd = subparsers.add_parser(
'build', help='see `build -h`', description='Build Sudachi Dictionary')
parser_bd.add_argument('-o', dest='out_file', metavar='file', default='system.dic',
help='output file (default: system.dic)')
parser_bd.add_argument('-d', dest='description', default='', metavar='string', required=False,
help='description comment to be embedded on dictionary')
required_named_bd = parser_bd.add_argument_group(
'required named arguments')
required_named_bd.add_argument('-m', dest='matrix_file', metavar='file', required=True,
help='connection matrix file with MeCab\'s matrix.def format')
parser_bd.add_argument("in_files", metavar="file", nargs=argparse.ONE_OR_MORE,
help='source files with CSV format (one of more)')
parser_bd.set_defaults(handler=_command_build,
print_usage=parser_bd.print_usage)
# build user-dictionary parser
parser_ubd = subparsers.add_parser(
'ubuild', help='see `ubuild -h`', description='Build User Dictionary')
parser_ubd.add_argument('-d', dest='description', default='', metavar='string', required=False,
help='description comment to be embedded on dictionary')
parser_ubd.add_argument('-o', dest='out_file', metavar='file', default='user.dic',
help='output file (default: user.dic)')
parser_ubd.add_argument('-s', dest='system_dic', metavar='file', required=False,
help='system dictionary path (default: system core dictionary path)')
parser_ubd.add_argument("in_files", metavar="file", nargs=argparse.ONE_OR_MORE,
help='source files with CSV format (one or more)')
parser_ubd.set_defaults(handler=_command_user_build,
print_usage=parser_ubd.print_usage)
parser.set_default_subparser('tokenize')
args = parser.parse_args()
if hasattr(args, 'handler'):
args.handler(args, args.print_usage)
else:
parser.print_help()
if __name__ == '__main__':
main()
| [
"logging.getLogger",
"os.path.exists",
"logging.StreamHandler",
"argparse.ArgumentParser",
"fileinput.hook_encoded",
"sys.argv.insert"
] | [((3238, 3265), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (3255, 3265), False, 'import logging\n'), ((3280, 3313), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (3301, 3313), False, 'import logging\n'), ((4370, 4440), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Japanese Morphological Analyzer"""'}), "(description='Japanese Morphological Analyzer')\n", (4393, 4440), False, 'import argparse\n'), ((2640, 2660), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (2654, 2660), False, 'import os\n'), ((1496, 1520), 'sys.argv.insert', 'sys.argv.insert', (['(1)', 'name'], {}), '(1, name)\n', (1511, 1520), False, 'import sys\n'), ((3753, 3784), 'fileinput.hook_encoded', 'fileinput.hook_encoded', (['"""utf-8"""'], {}), "('utf-8')\n", (3775, 3784), False, 'import fileinput\n')] |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
from branch_utility import BranchUtility
from fake_url_fetcher import FakeUrlFetcher
from object_store_creator import ObjectStoreCreator
class BranchUtilityTest(unittest.TestCase):
def setUp(self):
self._branch_util = BranchUtility(
os.path.join('branch_utility', 'first.json'),
os.path.join('branch_utility', 'second.json'),
FakeUrlFetcher(os.path.join(sys.path[0], 'test_data')),
ObjectStoreCreator.ForTest())
def testSplitChannelNameFromPath(self):
self.assertEquals(('stable', 'extensions/stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'stable/extensions/stuff.html'))
self.assertEquals(('dev', 'extensions/stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'dev/extensions/stuff.html'))
self.assertEquals(('beta', 'extensions/stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'beta/extensions/stuff.html'))
self.assertEquals(('trunk', 'extensions/stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'trunk/extensions/stuff.html'))
self.assertEquals((None, 'extensions/stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'extensions/stuff.html'))
self.assertEquals((None, 'apps/stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'apps/stuff.html'))
self.assertEquals((None, 'extensions/dev/stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'extensions/dev/stuff.html'))
self.assertEquals((None, 'stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'stuff.html'))
def testNewestChannel(self):
self.assertEquals('trunk',
self._branch_util.NewestChannel(('trunk', 'dev', 'beta', 'stable')))
self.assertEquals('trunk',
self._branch_util.NewestChannel(('stable', 'beta', 'dev', 'trunk')))
self.assertEquals('dev',
self._branch_util.NewestChannel(('stable', 'beta', 'dev')))
self.assertEquals('dev',
self._branch_util.NewestChannel(('dev', 'beta', 'stable')))
self.assertEquals('beta',
self._branch_util.NewestChannel(('beta', 'stable')))
self.assertEquals('beta',
self._branch_util.NewestChannel(('stable', 'beta')))
self.assertEquals('stable', self._branch_util.NewestChannel(('stable',)))
self.assertEquals('beta', self._branch_util.NewestChannel(('beta',)))
self.assertEquals('dev', self._branch_util.NewestChannel(('dev',)))
self.assertEquals('trunk', self._branch_util.NewestChannel(('trunk',)))
def testGetChannelInfo(self):
self.assertEquals('trunk',
self._branch_util.GetChannelInfo('trunk').channel)
self.assertEquals('trunk',
self._branch_util.GetChannelInfo('trunk').branch)
self.assertEquals('trunk',
self._branch_util.GetChannelInfo('trunk').version)
self.assertEquals('dev',
self._branch_util.GetChannelInfo('dev').channel)
self.assertEquals(1500,
self._branch_util.GetChannelInfo('dev').branch)
self.assertEquals(28,
self._branch_util.GetChannelInfo('dev').version)
self.assertEquals('beta',
self._branch_util.GetChannelInfo('beta').channel)
self.assertEquals(1453,
self._branch_util.GetChannelInfo('beta').branch)
self.assertEquals(27,
self._branch_util.GetChannelInfo('beta').version)
self.assertEquals('stable',
self._branch_util.GetChannelInfo('stable').channel)
self.assertEquals(1410,
self._branch_util.GetChannelInfo('stable').branch)
self.assertEquals(26,
self._branch_util.GetChannelInfo('stable').version)
def testGetLatestVersionNumber(self):
self.assertEquals(28, self._branch_util.GetLatestVersionNumber())
def testGetBranchForVersion(self):
self.assertEquals(1453,
self._branch_util.GetBranchForVersion(27))
self.assertEquals(1410,
self._branch_util.GetBranchForVersion(26))
self.assertEquals(1364,
self._branch_util.GetBranchForVersion(25))
self.assertEquals(1312,
self._branch_util.GetBranchForVersion(24))
self.assertEquals(1271,
self._branch_util.GetBranchForVersion(23))
self.assertEquals(1229,
self._branch_util.GetBranchForVersion(22))
self.assertEquals(1180,
self._branch_util.GetBranchForVersion(21))
self.assertEquals(1132,
self._branch_util.GetBranchForVersion(20))
self.assertEquals(1084,
self._branch_util.GetBranchForVersion(19))
self.assertEquals(1025,
self._branch_util.GetBranchForVersion(18))
self.assertEquals(963,
self._branch_util.GetBranchForVersion(17))
self.assertEquals(696,
self._branch_util.GetBranchForVersion(11))
self.assertEquals(396,
self._branch_util.GetBranchForVersion(5))
def testGetChannelForVersion(self):
self.assertEquals('trunk',
self._branch_util.GetChannelForVersion('trunk'))
self.assertEquals('dev',
self._branch_util.GetChannelForVersion(28))
self.assertEquals('beta',
self._branch_util.GetChannelForVersion(27))
self.assertEquals('stable',
self._branch_util.GetChannelForVersion(26))
self.assertEquals('stable',
self._branch_util.GetChannelForVersion(22))
self.assertEquals('stable',
self._branch_util.GetChannelForVersion(18))
self.assertEquals('stable',
self._branch_util.GetChannelForVersion(14))
self.assertEquals(None,
self._branch_util.GetChannelForVersion(30))
self.assertEquals(None,
self._branch_util.GetChannelForVersion(42))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"object_store_creator.ObjectStoreCreator.ForTest",
"os.path.join"
] | [((6045, 6060), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6058, 6060), False, 'import unittest\n'), ((475, 519), 'os.path.join', 'os.path.join', (['"""branch_utility"""', '"""first.json"""'], {}), "('branch_utility', 'first.json')\n", (487, 519), False, 'import os\n'), ((529, 574), 'os.path.join', 'os.path.join', (['"""branch_utility"""', '"""second.json"""'], {}), "('branch_utility', 'second.json')\n", (541, 574), False, 'import os\n'), ((648, 676), 'object_store_creator.ObjectStoreCreator.ForTest', 'ObjectStoreCreator.ForTest', ([], {}), '()\n', (674, 676), False, 'from object_store_creator import ObjectStoreCreator\n'), ((599, 637), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""test_data"""'], {}), "(sys.path[0], 'test_data')\n", (611, 637), False, 'import os\n')] |
"""
Created on Dec 16 2021
@author: <NAME>
Poisson equation solver for the Hall effect.
Includes classes for Hall bars, Hall bars in a nonlocal geometry, and Corbino disks.
The Hall bar class has build in methods for longitudinal and Hall 4-probe resistance measurements.
Plotting functions assume coordinates are in microns, but the Poisson equation is scale-invariant.
"""
import time
import math
import numpy as np
import scipy.sparse as sp # import sparse matrix library
import matplotlib.pyplot as plt
from scipy.sparse.linalg import spsolve
# import the file where the differentiation matrix operators are defined
from diff_matrices import Diff_mat_1D, Diff_mat_2D
class hallbar():
"""The class for a Hall bar device
Source is the left terminal, drain is the right terminal.
Args:
Lx : length in x direction
Ly : length in y direction
Nx : number of points in grid along x
Ny : number of points in grid along y
"""
def __init__(self, Lx, Ly, Nx = 301, Ny = 201):
# Initiate with no contacts
self.contacts = []
# Define coordinate variables
self.Nx = Nx
self.Ny = Ny
self.Lx = Lx
self.Ly = Ly
self.x = np.linspace(0,self.Lx,self.Nx)
self.y = np.linspace(0,self.Ly,self.Ny)
self.dx = self.x[1] - self.x[0] # grid spacing along x direction
self.dy = self.y[1] - self.y[0] # grid spacing along y direction
self.X,self.Y = np.meshgrid(self.x,self.y) # 2D meshgrid
# 1D indexing
self.Xu = self.X.ravel() # Unravel 2D meshgrid to 1D array
self.Yu = self.Y.ravel()
# Search for boundary indices
start_time = time.time()
self.ind_unravel_L = np.squeeze(np.where(self.Xu==self.x[0])) # Left boundary
self.ind_unravel_R = np.squeeze(np.where(self.Xu==self.x[self.Nx-1])) # Right boundary
self.ind_unravel_B = np.squeeze(np.where(self.Yu==self.y[0])) # Bottom boundary
self.ind_unravel_T = np.squeeze(np.where(self.Yu==self.y[self.Ny-1])) # Top boundary
self.ind_boundary_unravel = np.squeeze(np.where((self.Xu==self.x[0]) | (self.Xu==self.x[self.Nx-1]) | (self.Yu==self.y[0]) | (self.Yu==self.y[self.Ny-1]))) # outer boundaries 1D unravel indices
self.ind_boundary = np.where((self.X==self.x[0]) | (self.X==self.x[self.Nx-1]) | (self.Y==self.y[0]) | (self.Y==self.y[self.Ny-1])) # outer boundary
print("Boundary search time = %1.4s" % (time.time()-start_time))
# Load finite difference matrix operators
self.Dx_2d, self.Dy_2d, self.D2x_2d, self.D2y_2d = Diff_mat_2D(self.Nx,self.Ny)
# Initiate empty solution matrix
self.u = 0
def solve(self, lmbda):
# constructs matrix problem and solves Poisson equation
# Args: lmbda : sigma_xy / sigma_xx. Must be finite
# Returns: self.u : electric potential
self.lmbda = lmbda
# Construct system matrix without boundary conditions
start_time = time.time()
I_sp = sp.eye(self.Nx*self.Ny).tocsr()
L_sys = self.D2x_2d/self.dx**2 + self.D2y_2d/self.dy**2
# Boundary operators
BD = I_sp # Dirichlet boundary operator
BNx = self.Dx_2d / (2 * self.dx) # Neumann boundary operator for x component
BNy = self.Dy_2d / (2 * self.dy) # Neumann boundary operator for y component
# DIRICHLET BOUNDARY CONDITIONS FOR CONTACTS
L_sys[self.ind_unravel_L,:] = BD[self.ind_unravel_L,:] # Boundaries at the left layer
L_sys[self.ind_unravel_R,:] = BD[self.ind_unravel_R,:] # Boundaries at the right edges
# CURRENT THROUGH EDGES
L_sys[self.ind_unravel_T,:] = BNy[self.ind_unravel_T,:] - lmbda * BNx[self.ind_unravel_T,:] # Boundaries at the top layer
L_sys[self.ind_unravel_B,:] = BNy[self.ind_unravel_B,:] - lmbda * BNx[self.ind_unravel_B,:] # Boundaries at the bottom layer
# Source function (right hand side vector)
g = np.zeros(self.Nx*self.Ny)
# Insert boundary values at the boundary points
g[self.ind_unravel_L] = 1 # Dirichlet boundary condition at source
g[self.ind_unravel_R] = 0 # Dirichlet boundary condition at drain
g[self.ind_unravel_T] = 0 # No current through top
g[self.ind_unravel_B] = 0 # No current through bottom
print("System matrix and right hand vector computation time = %1.6s" % (time.time()-start_time))
start_time = time.time()
self.u = spsolve(L_sys,g).reshape(self.Ny,self.Nx).T
print("spsolve() time = %1.6s" % (time.time()-start_time))
def voltage_measurement(self, x1, x2, side='top'):
# Args: x1 : point of V_A
# x2 : point of V_B
# side ('top', 'bottom', or 'hall') : which side of Hall bar to measure
# Returns: V_A - V_B
if np.all(self.u==0):
raise Exception('System has not been solved')
if x1 > self.Lx or x1 < 0 or x2 > self.Lx or x2 < 0:
raise Exception('Points out of bounds')
if side=='top':
ya = self.Ny-1
yb = self.Ny-1
elif side=='bottom':
ya = 0
yb = 0
elif side=='hall':
ya = 0
yb = self.Ny-1
else:
raise Exception('Side must be top or bottom')
# Find nearest index value to input coordinates
xa = np.searchsorted(self.x, x1, side='left')
xb = np.searchsorted(self.x, x2, side='left')
return self.u[xa, ya] - self.u[xb, yb]
def plot_potential(self):
if np.all(self.u==0):
raise Exception('System has not been solved')
fig = plt.figure(figsize = [8,5])
plt.contourf(self.x,self.y,self.u.T,41,cmap = 'inferno')
cbar = plt.colorbar(ticks = np.arange(0, 1.01, 0.2), label = r'$\phi / \phi_s$')
plt.xlabel(r'x ($\mu$m)');
plt.ylabel(r'y ($\mu$m)');
plt.show()
def plot_resistance(self):
if np.all(self.u==0):
raise Exception('System has not been solved')
r_top = (self.u[0:-1, -1] - self.u[1:, -1]) * 25812 * self.Ly / self.dx
r_bottom = (self.u[0:-1, 0] - self.u[1:, 0]) * 25812 * self.Ly / self.dx
rxx = 25812 / self.lmbda
fig = plt.figure(figsize = [8,5])
plt.plot(self.x[0:-1] - self.dx, r_top, 'r', label='top')
plt.plot(self.x[0:-1] - self.dx, r_bottom, 'b', label='bottom')
plt.hlines(rxx, self.x[0], self.x[-1], linestyle='dashed', color='grey', label=r'$\rho_{xx}$')
plt.xlabel(r'x ($\mu$m)');
plt.ylabel(r'$\rho_{xx}$ $(\Omega)$');
plt.legend()
plt.ylim([0, 12000]);
plt.show()
def add_contact(self, contact):
if contact.x1 > self.Lx or contact.x2 > self.Lx:
raise Exception('Contact out of bounds')
self.contacts.append(contact)
def measure_contact_voltageonly(self, contact):
# Args: contact instance
# Returns: measured resistivity
# Voltage is averaged across voltage tap
# THIS FUNCTION DOES NOT CHECK THE CURRENT!
# This method assumes 2terminal resistance is h/e2, which in general is wrong
if np.all(self.u==0):
raise Exception('System has not been solved')
if contact.side=='top':
y = self.Ny-1
elif contact.side=='bottom':
y = 0
else:
raise Exception('Side must be top or bottom')
# Average voltage A
A_indices = np.where(np.abs(self.x - contact.x1) < contact.width)[0]
A_voltage = self.u[A_indices, y].mean()
# Average voltage A
B_indices = np.where(np.abs(self.x - contact.x2) < contact.width)[0]
B_voltage = self.u[B_indices, y].mean()
# voltage difference
v = A_voltage - B_voltage
# length between contacts
dx = np.abs(contact.x1 - contact.x2)
# return apparent resistivity
return 25812 * v * self.Ly / dx
def measure_all_contacts_voltageonly(self):
# Args: none
# Returns: array; resistivity measurement of all contacts
if np.all(self.u==0):
raise Exception('System has not been solved')
result = []
for contact in self.contacts:
result.append(self.measure_contact_voltageonly(contact))
return result
def measure_contact(self, contact, sxx, sxy):
'''
Voltage is averaged across voltage tap
This method checks the current and outputs resistivity.
Args:
contact : contact instance
sxx : longitudinal
sxy : hall. sxy/sxx should match self.lmbda
Returns: measured resistivity
'''
if np.all(self.u==0):
raise Exception('System has not been solved')
if contact.side=='top':
ya = self.Ny-1
yb = self.Ny-1
elif contact.side=='bottom':
ya = 0
yb = 0
elif contact.side=='hall':
ya = 0
yb = self.Ny-1
else:
raise Exception('Side must be top or bottom')
# Average voltage A
A_indices = np.where(np.abs(self.x - contact.x1) < contact.width)[0]
A_voltage = self.u[A_indices, ya].mean()
# Average voltage B
B_indices = np.where(np.abs(self.x - contact.x2) < contact.width)[0]
B_voltage = self.u[B_indices, yb].mean()
# voltage difference
v = A_voltage - B_voltage
# length between contacts
dx = np.abs(contact.x1 - contact.x2)
i = self.measure_current(sxx, sxy)
# return apparent resistivity
if contact.side=='hall':
return v / i
else:
return v / i * self.Ly / dx
def measure_all_contacts(self, sxx, sxy):
# Args: none
# Returns: array; resistivity measurement of all contacts
if np.all(self.u==0):
raise Exception('System has not been solved')
result = []
for contact in self.contacts:
result.append(self.measure_contact(contact, sxx, sxy))
return result
def measure_current(self, sxx, sxy):
'''
ARGS : sxx and sxy : longitudinal and Hall conductivity. units e2/h
Returns : current moving through device
'''
# choose place to measure: halfway across Hallbar
ind_x = int(self.Nx/2)
# calculate electric field using E = -\nabla V
# x electric field, using second order central finite difference
E_x = 0.5 * (self.u[ind_x - 1, :] - self.u[ind_x + 1, :]) / self.dx
# y electric field, need forward/backward differences for edges
Dy_1d, D2y_1d = Diff_mat_1D(self.Ny)
E_y = - 0.5 * Dy_1d.dot(self.u[ind_x, :]) / self.dy
# calculate x current using j = sigma E; integrate and convert to SI units
current = np.sum(sxx * E_x + sxy * E_y) * self.dy / 25812
return current
class contact():
"""The class for a voltage contact
Args:
x1 : coordinate location of V_A
x2 : coordinate location of V_B
side ('top', 'bottom', or 'hall') : which side of the Hall bar to measure
width : width of voltage tap in microns
"""
def __init__(self, x1, x2, side='top', width=6):
self.x1 = x1
self.x2 = x2
self.side = side
self.width = width
class nonlocal_hb():
"""The class for nonlocal measurements
Contacts are on the bottom edge of the device
Args:
Lx : length in x direction
Ly : length in y direction
Nx : number of points in grid along x
Ny : number of points in grid along y
settings : positions of contacts
"""
def __init__(self, Lx, Ly, Nx = 301, Ny = 201, settings = {}):
# Initiate with no contacts
self.contacts = []
# Define coordinate variables
self.Nx = Nx
self.Ny = Ny
self.Lx = Lx
self.Ly = Ly
self.x = np.linspace(0,self.Lx,self.Nx)
self.y = np.linspace(0,self.Ly,self.Ny)
self.dx = self.x[1] - self.x[0] # grid spacing along x direction
self.dy = self.y[1] - self.y[0] # grid spacing along y direction
self.X,self.Y = np.meshgrid(self.x,self.y) # 2D meshgrid
# 1D indexing
self.Xu = self.X.ravel() # Unravel 2D meshgrid to 1D array
self.Yu = self.Y.ravel()
# Nonlocal contacts
self.source_x1 = settings.get("source_x1", Lx/4)
self.source_x2 = settings.get("source_x2", Lx/3)
self.drain_x1 = settings.get("drain_x1", 2*Lx/3)
self.drain_x2 = settings.get("drain_x2", 3*Lx/4)
# Search for boundary indices
start_time = time.time()
self.ind_unravel_L = np.squeeze(np.where(self.Xu==self.x[0])) # Left boundary
self.ind_unravel_R = np.squeeze(np.where(self.Xu==self.x[self.Nx-1])) # Right boundary
self.ind_unravel_B = np.squeeze(np.where(self.Yu==self.y[0])) # Bottom boundary
self.ind_unravel_T = np.squeeze(np.where(self.Yu==self.y[self.Ny-1])) # Top boundary
self.ind_boundary_unravel = np.squeeze(np.where((self.Xu==self.x[0]) | (self.Xu==self.x[self.Nx-1]) | (self.Yu==self.y[0]) | (self.Yu==self.y[self.Ny-1]))) # outer boundaries 1D unravel indices
self.ind_boundary = np.where((self.X==self.x[0]) | (self.X==self.x[self.Nx-1]) | (self.Y==self.y[0]) | (self.Y==self.y[self.Ny-1])) # outer boundary
self.ind_unravel_source = np.squeeze(np.where( (self.Yu==self.y[0]) & (self.Xu >= self.source_x1) & (self.Xu <= self.source_x2) )) # Source
self.ind_unravel_drain = np.squeeze(np.where( (self.Yu==self.y[0]) & (self.Xu >= self.drain_x1) & (self.Xu <= self.drain_x2) )) # Drain
print("Boundary search time = %1.4s" % (time.time()-start_time))
# Load finite difference matrix operators
self.Dx_2d, self.Dy_2d, self.D2x_2d, self.D2y_2d = Diff_mat_2D(self.Nx,self.Ny)
# Initiate empty solution matrix
self.u = 0
def solve(self, lmbda):
''' Constructs matrix problem and solves Poisson equation
# Args:
lmbda : sigma_xy / sigma_xx. Must be finite
# Returns:
self.u : electric potential
'''
self.lmbda = lmbda
# Construct system matrix without boundary conditions
start_time = time.time()
I_sp = sp.eye(self.Nx*self.Ny).tocsr()
L_sys = self.D2x_2d/self.dx**2 + self.D2y_2d/self.dy**2
# Boundary operators
BD = I_sp # Dirichlet boundary operator
BNx = self.Dx_2d / (2 * self.dx) # Neumann boundary operator for x component
BNy = self.Dy_2d / (2 * self.dy) # Neumann boundary operator for y component
# CURRENT THROUGH TOP/BOTTOM EDGES
L_sys[self.ind_unravel_T,:] = BNy[self.ind_unravel_T,:] - lmbda * BNx[self.ind_unravel_T,:] # Boundaries at the top layer
L_sys[self.ind_unravel_B,:] = BNy[self.ind_unravel_B,:] - lmbda * BNx[self.ind_unravel_B,:] # Boundaries at the bottom layer
# CURRENT THROUGH LEFT/RIGHT EDGES
L_sys[self.ind_unravel_L,:] = BNx[self.ind_unravel_L,:] + lmbda * BNy[self.ind_unravel_L,:]
L_sys[self.ind_unravel_R,:] = BNx[self.ind_unravel_R,:] + lmbda * BNy[self.ind_unravel_R,:]
# REPLACE WITH DIRICHLET BOUNDARY CONDITIONS FOR SOURCE/DRAIN
L_sys[self.ind_unravel_source,:] = BD[self.ind_unravel_source,:]
L_sys[self.ind_unravel_drain,:] = BD[self.ind_unravel_drain,:]
# Source function (right hand side vector)
g = np.zeros(self.Nx*self.Ny)
# No current boundary conditions
g[self.ind_unravel_L] = 0
g[self.ind_unravel_R] = 0
g[self.ind_unravel_T] = 0
g[self.ind_unravel_B] = 0
# Replace source with potential
g[self.ind_unravel_source] = 1
print("System matrix and right hand vector computation time = %1.6s" % (time.time()-start_time))
start_time = time.time()
self.u = spsolve(L_sys,g).reshape(self.Ny,self.Nx).T
print("spsolve() time = %1.6s" % (time.time()-start_time))
def voltage_measurement(self, x1, x2, side='top'):
# Args: x1 : point of V_A
# x2 : point of V_B
# side ('top' or 'bottom') : which side of Hall bar to measure
# Returns: V_A - V_B
if np.all(self.u==0):
raise Exception('System has not been solved')
if x1 > self.Lx or x1 < 0 or x2 > self.Lx or x2 < 0:
raise Exception('Points out of bounds')
if side=='top':
y = self.Ny-1
elif side=='bottom':
y = 0
else:
raise Exception('Side must be top or bottom')
# Find nearest index value to input coordinates
xa = np.searchsorted(self.x, x1, side='left')
xb = np.searchsorted(self.x, x2, side='left')
return self.u[xa, y] - self.u[xb, y]
def plot_potential(self):
if np.all(self.u==0):
raise Exception('System has not been solved')
fig = plt.figure(figsize = [8,5])
# plt.contour(self.x,self.y,self.u.T,41,cmap = 'viridis', vmin=0, vmax=1)
plt.pcolormesh(self.X, self.Y, self.u.T, cmap='inferno', vmin=0, vmax=1)
cbar = plt.colorbar(ticks = np.arange(0, 1.01, 0.2), label = r'$\phi / \phi_s$')
plt.xlabel(r'x ($\mu$m)');
plt.ylabel(r'y ($\mu$m)');
plt.show()
class corbino():
"""The class for a Corbino disk
Args:
ro : outer radius
ri : inner radius
Nx : number of points in grid along x
Ny : number of points in grid along y
"""
def __init__(self, ro, ri, Nx = 301, Ny = 201):
# Initiate with no contacts
self.contacts = []
# Define coordinate variables
self.Nx = Nx
self.Ny = Ny
self.ro = ro
self.ri = ri
self.x = np.linspace(-self.ro, self.ro, self.Nx)
self.y = np.linspace(-self.ro, self.ro, self.Ny)
self.dx = self.x[1] - self.x[0] # grid spacing along x direction
self.dy = self.y[1] - self.y[0] # grid spacing along y direction
self.X,self.Y = np.meshgrid(self.x,self.y) # 2D meshgrid
# 1D indexing
self.Xu = self.X.ravel() # Unravel 2D meshgrid to 1D array
self.Yu = self.Y.ravel()
# Search for boundary indices
start_time = time.time()
self.ind_unravel_outer = np.squeeze(np.where(self.Xu**2 + self.Yu**2 >= self.ro**2)) # outer boundary
self.ind_unravel_inner = np.squeeze(np.where(self.Xu**2 + self.Yu**2 <= self.ri**2)) # inner boundary
self.ind_boundary_unravel = np.squeeze(np.where((self.Xu**2 + self.Yu**2 >= self.ro**2) | (self.Xu**2 + self.Yu**2 <= self.ri**2))) # boundary 1D unravel indices
self.ind_boundary = np.where((self.Xu**2 + self.Yu**2 >= self.ro**2) | (self.Xu**2 + self.Yu**2 <= self.ri**2)) # boundary
print("Boundary search time = %1.4s" % (time.time()-start_time))
# Load finite difference matrix operators
self.Dx_2d, self.Dy_2d, self.D2x_2d, self.D2y_2d = Diff_mat_2D(self.Nx,self.Ny)
# Initiate empty solution matrix
self.u = 0
def solve(self, lmbda):
# constructs matrix problem and solves Poisson equation
# Args: lmbda : sigma_xy / sigma_xx. Must be finite
# Returns: self.u : electric potential
self.lmbda = lmbda
# Construct system matrix without boundary conditions
start_time = time.time()
I_sp = sp.eye(self.Nx*self.Ny).tocsr()
L_sys = self.D2x_2d/self.dx**2 + self.D2y_2d/self.dy**2
# Boundary operators
BD = I_sp # Dirichlet boundary operator
# DIRICHLET BOUNDARY CONDITIONS FOR CONTACTS
L_sys[self.ind_boundary_unravel,:] = BD[self.ind_boundary_unravel,:]
# Source function (right hand side vector)
g = np.zeros(self.Nx*self.Ny)
# Insert boundary values at the boundary points
g[self.ind_unravel_outer] = 1 # Dirichlet boundary condition at source
g[self.ind_unravel_inner] = 0 # Dirichlet boundary condition at drain
print("System matrix and right hand vector computation time = %1.6s" % (time.time()-start_time))
start_time = time.time()
self.u = spsolve(L_sys,g).reshape(self.Ny,self.Nx).T
print("spsolve() time = %1.6s" % (time.time()-start_time))
def plot_potential(self):
if np.all(self.u==0):
raise Exception('System has not been solved')
fig = plt.figure(figsize = [8,5])
plt.contourf(self.x,self.y,self.u.T,41,cmap = 'inferno')
cbar = plt.colorbar(ticks = np.arange(0, 1.01, 0.2), label = r'$\phi / \phi_s$')
plt.xlabel(r'x ($\mu$m)');
plt.ylabel(r'y ($\mu$m)');
plt.show() | [
"matplotlib.pyplot.ylabel",
"diff_matrices.Diff_mat_2D",
"matplotlib.pyplot.pcolormesh",
"numpy.arange",
"matplotlib.pyplot.contourf",
"scipy.sparse.eye",
"numpy.where",
"numpy.searchsorted",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linspace",
"numpy.meshgrid",
"matplotli... | [((1161, 1193), 'numpy.linspace', 'np.linspace', (['(0)', 'self.Lx', 'self.Nx'], {}), '(0, self.Lx, self.Nx)\n', (1172, 1193), True, 'import numpy as np\n'), ((1203, 1235), 'numpy.linspace', 'np.linspace', (['(0)', 'self.Ly', 'self.Ny'], {}), '(0, self.Ly, self.Ny)\n', (1214, 1235), True, 'import numpy as np\n'), ((1419, 1446), 'numpy.meshgrid', 'np.meshgrid', (['self.x', 'self.y'], {}), '(self.x, self.y)\n', (1430, 1446), True, 'import numpy as np\n'), ((1639, 1650), 'time.time', 'time.time', ([], {}), '()\n', (1648, 1650), False, 'import time\n'), ((2239, 2366), 'numpy.where', 'np.where', (['((self.X == self.x[0]) | (self.X == self.x[self.Nx - 1]) | (self.Y == self.\n y[0]) | (self.Y == self.y[self.Ny - 1]))'], {}), '((self.X == self.x[0]) | (self.X == self.x[self.Nx - 1]) | (self.Y ==\n self.y[0]) | (self.Y == self.y[self.Ny - 1]))\n', (2247, 2366), True, 'import numpy as np\n'), ((2537, 2566), 'diff_matrices.Diff_mat_2D', 'Diff_mat_2D', (['self.Nx', 'self.Ny'], {}), '(self.Nx, self.Ny)\n', (2548, 2566), False, 'from diff_matrices import Diff_mat_1D, Diff_mat_2D\n'), ((2888, 2899), 'time.time', 'time.time', ([], {}), '()\n', (2897, 2899), False, 'import time\n'), ((3803, 3830), 'numpy.zeros', 'np.zeros', (['(self.Nx * self.Ny)'], {}), '(self.Nx * self.Ny)\n', (3811, 3830), True, 'import numpy as np\n'), ((4244, 4255), 'time.time', 'time.time', ([], {}), '()\n', (4253, 4255), False, 'import time\n'), ((4583, 4602), 'numpy.all', 'np.all', (['(self.u == 0)'], {}), '(self.u == 0)\n', (4589, 4602), True, 'import numpy as np\n'), ((5011, 5051), 'numpy.searchsorted', 'np.searchsorted', (['self.x', 'x1'], {'side': '"""left"""'}), "(self.x, x1, side='left')\n", (5026, 5051), True, 'import numpy as np\n'), ((5059, 5099), 'numpy.searchsorted', 'np.searchsorted', (['self.x', 'x2'], {'side': '"""left"""'}), "(self.x, x2, side='left')\n", (5074, 5099), True, 'import numpy as np\n'), ((5176, 5195), 'numpy.all', 'np.all', (['(self.u == 0)'], {}), '(self.u == 0)\n', (5182, 5195), True, 'import numpy as np\n'), ((5253, 5279), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[8, 5]'}), '(figsize=[8, 5])\n', (5263, 5279), True, 'import matplotlib.pyplot as plt\n'), ((5283, 5341), 'matplotlib.pyplot.contourf', 'plt.contourf', (['self.x', 'self.y', 'self.u.T', '(41)'], {'cmap': '"""inferno"""'}), "(self.x, self.y, self.u.T, 41, cmap='inferno')\n", (5295, 5341), True, 'import matplotlib.pyplot as plt\n'), ((5425, 5450), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x ($\\\\mu$m)"""'], {}), "('x ($\\\\mu$m)')\n", (5435, 5450), True, 'import matplotlib.pyplot as plt\n'), ((5454, 5479), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y ($\\\\mu$m)"""'], {}), "('y ($\\\\mu$m)')\n", (5464, 5479), True, 'import matplotlib.pyplot as plt\n'), ((5483, 5493), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5491, 5493), True, 'import matplotlib.pyplot as plt\n'), ((5529, 5548), 'numpy.all', 'np.all', (['(self.u == 0)'], {}), '(self.u == 0)\n', (5535, 5548), True, 'import numpy as np\n'), ((5783, 5809), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[8, 5]'}), '(figsize=[8, 5])\n', (5793, 5809), True, 'import matplotlib.pyplot as plt\n'), ((5813, 5870), 'matplotlib.pyplot.plot', 'plt.plot', (['(self.x[0:-1] - self.dx)', 'r_top', '"""r"""'], {'label': '"""top"""'}), "(self.x[0:-1] - self.dx, r_top, 'r', label='top')\n", (5821, 5870), True, 'import matplotlib.pyplot as plt\n'), ((5873, 5936), 'matplotlib.pyplot.plot', 'plt.plot', (['(self.x[0:-1] - self.dx)', 'r_bottom', '"""b"""'], {'label': '"""bottom"""'}), "(self.x[0:-1] - self.dx, r_bottom, 'b', label='bottom')\n", (5881, 5936), True, 'import matplotlib.pyplot as plt\n'), ((5939, 6037), 'matplotlib.pyplot.hlines', 'plt.hlines', (['rxx', 'self.x[0]', 'self.x[-1]'], {'linestyle': '"""dashed"""', 'color': '"""grey"""', 'label': '"""$\\\\rho_{xx}$"""'}), "(rxx, self.x[0], self.x[-1], linestyle='dashed', color='grey',\n label='$\\\\rho_{xx}$')\n", (5949, 6037), True, 'import matplotlib.pyplot as plt\n'), ((6036, 6061), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x ($\\\\mu$m)"""'], {}), "('x ($\\\\mu$m)')\n", (6046, 6061), True, 'import matplotlib.pyplot as plt\n'), ((6065, 6103), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\rho_{xx}$ $(\\\\Omega)$"""'], {}), "('$\\\\rho_{xx}$ $(\\\\Omega)$')\n", (6075, 6103), True, 'import matplotlib.pyplot as plt\n'), ((6106, 6118), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6116, 6118), True, 'import matplotlib.pyplot as plt\n'), ((6121, 6141), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 12000]'], {}), '([0, 12000])\n', (6129, 6141), True, 'import matplotlib.pyplot as plt\n'), ((6145, 6155), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6153, 6155), True, 'import matplotlib.pyplot as plt\n'), ((6606, 6625), 'numpy.all', 'np.all', (['(self.u == 0)'], {}), '(self.u == 0)\n', (6612, 6625), True, 'import numpy as np\n'), ((7174, 7205), 'numpy.abs', 'np.abs', (['(contact.x1 - contact.x2)'], {}), '(contact.x1 - contact.x2)\n', (7180, 7205), True, 'import numpy as np\n'), ((7400, 7419), 'numpy.all', 'np.all', (['(self.u == 0)'], {}), '(self.u == 0)\n', (7406, 7419), True, 'import numpy as np\n'), ((7898, 7917), 'numpy.all', 'np.all', (['(self.u == 0)'], {}), '(self.u == 0)\n', (7904, 7917), True, 'import numpy as np\n'), ((8555, 8586), 'numpy.abs', 'np.abs', (['(contact.x1 - contact.x2)'], {}), '(contact.x1 - contact.x2)\n', (8561, 8586), True, 'import numpy as np\n'), ((8865, 8884), 'numpy.all', 'np.all', (['(self.u == 0)'], {}), '(self.u == 0)\n', (8871, 8884), True, 'import numpy as np\n'), ((9571, 9591), 'diff_matrices.Diff_mat_1D', 'Diff_mat_1D', (['self.Ny'], {}), '(self.Ny)\n', (9582, 9591), False, 'from diff_matrices import Diff_mat_1D, Diff_mat_2D\n'), ((10693, 10725), 'numpy.linspace', 'np.linspace', (['(0)', 'self.Lx', 'self.Nx'], {}), '(0, self.Lx, self.Nx)\n', (10704, 10725), True, 'import numpy as np\n'), ((10735, 10767), 'numpy.linspace', 'np.linspace', (['(0)', 'self.Ly', 'self.Ny'], {}), '(0, self.Ly, self.Ny)\n', (10746, 10767), True, 'import numpy as np\n'), ((10950, 10977), 'numpy.meshgrid', 'np.meshgrid', (['self.x', 'self.y'], {}), '(self.x, self.y)\n', (10961, 10977), True, 'import numpy as np\n'), ((11397, 11408), 'time.time', 'time.time', ([], {}), '()\n', (11406, 11408), False, 'import time\n'), ((11997, 12124), 'numpy.where', 'np.where', (['((self.X == self.x[0]) | (self.X == self.x[self.Nx - 1]) | (self.Y == self.\n y[0]) | (self.Y == self.y[self.Ny - 1]))'], {}), '((self.X == self.x[0]) | (self.X == self.x[self.Nx - 1]) | (self.Y ==\n self.y[0]) | (self.Y == self.y[self.Ny - 1]))\n', (12005, 12124), True, 'import numpy as np\n'), ((12576, 12605), 'diff_matrices.Diff_mat_2D', 'Diff_mat_2D', (['self.Nx', 'self.Ny'], {}), '(self.Nx, self.Ny)\n', (12587, 12605), False, 'from diff_matrices import Diff_mat_1D, Diff_mat_2D\n'), ((12941, 12952), 'time.time', 'time.time', ([], {}), '()\n', (12950, 12952), False, 'import time\n'), ((14059, 14086), 'numpy.zeros', 'np.zeros', (['(self.Nx * self.Ny)'], {}), '(self.Nx * self.Ny)\n', (14067, 14086), True, 'import numpy as np\n'), ((14418, 14429), 'time.time', 'time.time', ([], {}), '()\n', (14427, 14429), False, 'import time\n'), ((14748, 14767), 'numpy.all', 'np.all', (['(self.u == 0)'], {}), '(self.u == 0)\n', (14754, 14767), True, 'import numpy as np\n'), ((15097, 15137), 'numpy.searchsorted', 'np.searchsorted', (['self.x', 'x1'], {'side': '"""left"""'}), "(self.x, x1, side='left')\n", (15112, 15137), True, 'import numpy as np\n'), ((15145, 15185), 'numpy.searchsorted', 'np.searchsorted', (['self.x', 'x2'], {'side': '"""left"""'}), "(self.x, x2, side='left')\n", (15160, 15185), True, 'import numpy as np\n'), ((15260, 15279), 'numpy.all', 'np.all', (['(self.u == 0)'], {}), '(self.u == 0)\n', (15266, 15279), True, 'import numpy as np\n'), ((15337, 15363), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[8, 5]'}), '(figsize=[8, 5])\n', (15347, 15363), True, 'import matplotlib.pyplot as plt\n'), ((15443, 15515), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['self.X', 'self.Y', 'self.u.T'], {'cmap': '"""inferno"""', 'vmin': '(0)', 'vmax': '(1)'}), "(self.X, self.Y, self.u.T, cmap='inferno', vmin=0, vmax=1)\n", (15457, 15515), True, 'import matplotlib.pyplot as plt\n'), ((15601, 15626), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x ($\\\\mu$m)"""'], {}), "('x ($\\\\mu$m)')\n", (15611, 15626), True, 'import matplotlib.pyplot as plt\n'), ((15630, 15655), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y ($\\\\mu$m)"""'], {}), "('y ($\\\\mu$m)')\n", (15640, 15655), True, 'import matplotlib.pyplot as plt\n'), ((15659, 15669), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15667, 15669), True, 'import matplotlib.pyplot as plt\n'), ((16059, 16098), 'numpy.linspace', 'np.linspace', (['(-self.ro)', 'self.ro', 'self.Nx'], {}), '(-self.ro, self.ro, self.Nx)\n', (16070, 16098), True, 'import numpy as np\n'), ((16110, 16149), 'numpy.linspace', 'np.linspace', (['(-self.ro)', 'self.ro', 'self.Ny'], {}), '(-self.ro, self.ro, self.Ny)\n', (16121, 16149), True, 'import numpy as np\n'), ((16335, 16362), 'numpy.meshgrid', 'np.meshgrid', (['self.x', 'self.y'], {}), '(self.x, self.y)\n', (16346, 16362), True, 'import numpy as np\n'), ((16555, 16566), 'time.time', 'time.time', ([], {}), '()\n', (16564, 16566), False, 'import time\n'), ((16963, 17071), 'numpy.where', 'np.where', (['((self.Xu ** 2 + self.Yu ** 2 >= self.ro ** 2) | (self.Xu ** 2 + self.Yu **\n 2 <= self.ri ** 2))'], {}), '((self.Xu ** 2 + self.Yu ** 2 >= self.ro ** 2) | (self.Xu ** 2 + \n self.Yu ** 2 <= self.ri ** 2))\n', (16971, 17071), True, 'import numpy as np\n'), ((17235, 17264), 'diff_matrices.Diff_mat_2D', 'Diff_mat_2D', (['self.Nx', 'self.Ny'], {}), '(self.Nx, self.Ny)\n', (17246, 17264), False, 'from diff_matrices import Diff_mat_1D, Diff_mat_2D\n'), ((17586, 17597), 'time.time', 'time.time', ([], {}), '()\n', (17595, 17597), False, 'import time\n'), ((17940, 17967), 'numpy.zeros', 'np.zeros', (['(self.Nx * self.Ny)'], {}), '(self.Nx * self.Ny)\n', (17948, 17967), True, 'import numpy as np\n'), ((18280, 18291), 'time.time', 'time.time', ([], {}), '()\n', (18289, 18291), False, 'import time\n'), ((18442, 18461), 'numpy.all', 'np.all', (['(self.u == 0)'], {}), '(self.u == 0)\n', (18448, 18461), True, 'import numpy as np\n'), ((18519, 18545), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[8, 5]'}), '(figsize=[8, 5])\n', (18529, 18545), True, 'import matplotlib.pyplot as plt\n'), ((18549, 18607), 'matplotlib.pyplot.contourf', 'plt.contourf', (['self.x', 'self.y', 'self.u.T', '(41)'], {'cmap': '"""inferno"""'}), "(self.x, self.y, self.u.T, 41, cmap='inferno')\n", (18561, 18607), True, 'import matplotlib.pyplot as plt\n'), ((18691, 18716), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x ($\\\\mu$m)"""'], {}), "('x ($\\\\mu$m)')\n", (18701, 18716), True, 'import matplotlib.pyplot as plt\n'), ((18720, 18745), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y ($\\\\mu$m)"""'], {}), "('y ($\\\\mu$m)')\n", (18730, 18745), True, 'import matplotlib.pyplot as plt\n'), ((18749, 18759), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18757, 18759), True, 'import matplotlib.pyplot as plt\n'), ((1685, 1715), 'numpy.where', 'np.where', (['(self.Xu == self.x[0])'], {}), '(self.Xu == self.x[0])\n', (1693, 1715), True, 'import numpy as np\n'), ((1774, 1814), 'numpy.where', 'np.where', (['(self.Xu == self.x[self.Nx - 1])'], {}), '(self.Xu == self.x[self.Nx - 1])\n', (1782, 1814), True, 'import numpy as np\n'), ((1869, 1899), 'numpy.where', 'np.where', (['(self.Yu == self.y[0])'], {}), '(self.Yu == self.y[0])\n', (1877, 1899), True, 'import numpy as np\n'), ((1960, 2000), 'numpy.where', 'np.where', (['(self.Yu == self.y[self.Ny - 1])'], {}), '(self.Yu == self.y[self.Ny - 1])\n', (1968, 2000), True, 'import numpy as np\n'), ((2061, 2193), 'numpy.where', 'np.where', (['((self.Xu == self.x[0]) | (self.Xu == self.x[self.Nx - 1]) | (self.Yu ==\n self.y[0]) | (self.Yu == self.y[self.Ny - 1]))'], {}), '((self.Xu == self.x[0]) | (self.Xu == self.x[self.Nx - 1]) | (self.\n Yu == self.y[0]) | (self.Yu == self.y[self.Ny - 1]))\n', (2069, 2193), True, 'import numpy as np\n'), ((11443, 11473), 'numpy.where', 'np.where', (['(self.Xu == self.x[0])'], {}), '(self.Xu == self.x[0])\n', (11451, 11473), True, 'import numpy as np\n'), ((11532, 11572), 'numpy.where', 'np.where', (['(self.Xu == self.x[self.Nx - 1])'], {}), '(self.Xu == self.x[self.Nx - 1])\n', (11540, 11572), True, 'import numpy as np\n'), ((11627, 11657), 'numpy.where', 'np.where', (['(self.Yu == self.y[0])'], {}), '(self.Yu == self.y[0])\n', (11635, 11657), True, 'import numpy as np\n'), ((11718, 11758), 'numpy.where', 'np.where', (['(self.Yu == self.y[self.Ny - 1])'], {}), '(self.Yu == self.y[self.Ny - 1])\n', (11726, 11758), True, 'import numpy as np\n'), ((11819, 11951), 'numpy.where', 'np.where', (['((self.Xu == self.x[0]) | (self.Xu == self.x[self.Nx - 1]) | (self.Yu ==\n self.y[0]) | (self.Yu == self.y[self.Ny - 1]))'], {}), '((self.Xu == self.x[0]) | (self.Xu == self.x[self.Nx - 1]) | (self.\n Yu == self.y[0]) | (self.Yu == self.y[self.Ny - 1]))\n', (11827, 11951), True, 'import numpy as np\n'), ((12169, 12265), 'numpy.where', 'np.where', (['((self.Yu == self.y[0]) & (self.Xu >= self.source_x1) & (self.Xu <= self.\n source_x2))'], {}), '((self.Yu == self.y[0]) & (self.Xu >= self.source_x1) & (self.Xu <=\n self.source_x2))\n', (12177, 12265), True, 'import numpy as np\n'), ((12310, 12404), 'numpy.where', 'np.where', (['((self.Yu == self.y[0]) & (self.Xu >= self.drain_x1) & (self.Xu <= self.\n drain_x2))'], {}), '((self.Yu == self.y[0]) & (self.Xu >= self.drain_x1) & (self.Xu <=\n self.drain_x2))\n', (12318, 12404), True, 'import numpy as np\n'), ((16605, 16658), 'numpy.where', 'np.where', (['(self.Xu ** 2 + self.Yu ** 2 >= self.ro ** 2)'], {}), '(self.Xu ** 2 + self.Yu ** 2 >= self.ro ** 2)\n', (16613, 16658), True, 'import numpy as np\n'), ((16709, 16762), 'numpy.where', 'np.where', (['(self.Xu ** 2 + self.Yu ** 2 <= self.ri ** 2)'], {}), '(self.Xu ** 2 + self.Yu ** 2 <= self.ri ** 2)\n', (16717, 16762), True, 'import numpy as np\n'), ((16817, 16925), 'numpy.where', 'np.where', (['((self.Xu ** 2 + self.Yu ** 2 >= self.ro ** 2) | (self.Xu ** 2 + self.Yu **\n 2 <= self.ri ** 2))'], {}), '((self.Xu ** 2 + self.Yu ** 2 >= self.ro ** 2) | (self.Xu ** 2 + \n self.Yu ** 2 <= self.ri ** 2))\n', (16825, 16925), True, 'import numpy as np\n'), ((2909, 2934), 'scipy.sparse.eye', 'sp.eye', (['(self.Nx * self.Ny)'], {}), '(self.Nx * self.Ny)\n', (2915, 2934), True, 'import scipy.sparse as sp\n'), ((5370, 5393), 'numpy.arange', 'np.arange', (['(0)', '(1.01)', '(0.2)'], {}), '(0, 1.01, 0.2)\n', (5379, 5393), True, 'import numpy as np\n'), ((9736, 9765), 'numpy.sum', 'np.sum', (['(sxx * E_x + sxy * E_y)'], {}), '(sxx * E_x + sxy * E_y)\n', (9742, 9765), True, 'import numpy as np\n'), ((12962, 12987), 'scipy.sparse.eye', 'sp.eye', (['(self.Nx * self.Ny)'], {}), '(self.Nx * self.Ny)\n', (12968, 12987), True, 'import scipy.sparse as sp\n'), ((15546, 15569), 'numpy.arange', 'np.arange', (['(0)', '(1.01)', '(0.2)'], {}), '(0, 1.01, 0.2)\n', (15555, 15569), True, 'import numpy as np\n'), ((17607, 17632), 'scipy.sparse.eye', 'sp.eye', (['(self.Nx * self.Ny)'], {}), '(self.Nx * self.Ny)\n', (17613, 17632), True, 'import scipy.sparse as sp\n'), ((18636, 18659), 'numpy.arange', 'np.arange', (['(0)', '(1.01)', '(0.2)'], {}), '(0, 1.01, 0.2)\n', (18645, 18659), True, 'import numpy as np\n'), ((2414, 2425), 'time.time', 'time.time', ([], {}), '()\n', (2423, 2425), False, 'import time\n'), ((4200, 4211), 'time.time', 'time.time', ([], {}), '()\n', (4209, 4211), False, 'import time\n'), ((4267, 4284), 'scipy.sparse.linalg.spsolve', 'spsolve', (['L_sys', 'g'], {}), '(L_sys, g)\n', (4274, 4284), False, 'from scipy.sparse.linalg import spsolve\n'), ((4347, 4358), 'time.time', 'time.time', ([], {}), '()\n', (4356, 4358), False, 'import time\n'), ((6861, 6888), 'numpy.abs', 'np.abs', (['(self.x - contact.x1)'], {}), '(self.x - contact.x1)\n', (6867, 6888), True, 'import numpy as np\n'), ((6997, 7024), 'numpy.abs', 'np.abs', (['(self.x - contact.x2)'], {}), '(self.x - contact.x2)\n', (7003, 7024), True, 'import numpy as np\n'), ((8240, 8267), 'numpy.abs', 'np.abs', (['(self.x - contact.x1)'], {}), '(self.x - contact.x1)\n', (8246, 8267), True, 'import numpy as np\n'), ((8377, 8404), 'numpy.abs', 'np.abs', (['(self.x - contact.x2)'], {}), '(self.x - contact.x2)\n', (8383, 8404), True, 'import numpy as np\n'), ((12453, 12464), 'time.time', 'time.time', ([], {}), '()\n', (12462, 12464), False, 'import time\n'), ((14374, 14385), 'time.time', 'time.time', ([], {}), '()\n', (14383, 14385), False, 'import time\n'), ((14441, 14458), 'scipy.sparse.linalg.spsolve', 'spsolve', (['L_sys', 'g'], {}), '(L_sys, g)\n', (14448, 14458), False, 'from scipy.sparse.linalg import spsolve\n'), ((14521, 14532), 'time.time', 'time.time', ([], {}), '()\n', (14530, 14532), False, 'import time\n'), ((17112, 17123), 'time.time', 'time.time', ([], {}), '()\n', (17121, 17123), False, 'import time\n'), ((18236, 18247), 'time.time', 'time.time', ([], {}), '()\n', (18245, 18247), False, 'import time\n'), ((18303, 18320), 'scipy.sparse.linalg.spsolve', 'spsolve', (['L_sys', 'g'], {}), '(L_sys, g)\n', (18310, 18320), False, 'from scipy.sparse.linalg import spsolve\n'), ((18383, 18394), 'time.time', 'time.time', ([], {}), '()\n', (18392, 18394), False, 'import time\n')] |
from django import forms
from core.models import Profile
def get_sender_choices():
return list(Profile.objects.all().values_list('pk', 'inn'))
class TransactionForm(forms.Form):
sender = forms.ChoiceField(
label='Отправитель',
help_text='Выберите ИНН отправителя',
choices=get_sender_choices,
)
receiver_list = forms.CharField(
label='Список получателей',
help_text='Укажите ИНН получателей через запятую',
)
amount = forms.DecimalField(
max_digits=12,
decimal_places=2,
label='Сумма перевода',
help_text='С точностью до 2 знаков',
)
def clean_receiver_list(self):
try:
receiver_list = self.cleaned_data['receiver_list'].split(',')
receiver_list = set([item.strip() for item in receiver_list if item])
rel_receiver_list = set(Profile.objects.filter(inn__in=receiver_list).values_list('inn', flat=True))
subtract = receiver_list - rel_receiver_list
if subtract == set():
return receiver_list
raise forms.ValidationError(
message='Users with this INN {} not found'.format(sorted(list(subtract))),
code='some_users_not_found',
)
except KeyError:
pass
def clean(self):
try:
profile = Profile.objects.get(pk=self.cleaned_data['sender'])
if profile.balance < self.cleaned_data['amount']:
raise forms.ValidationError(
message='Not enough funds',
code='not_enough_funds',
)
return self.cleaned_data
except Profile.DoesNotExist:
raise forms.ValidationError(
message='Profile not found',
code='profile_not_found',
)
except KeyError:
pass
| [
"core.models.Profile.objects.filter",
"django.forms.CharField",
"django.forms.ValidationError",
"django.forms.ChoiceField",
"core.models.Profile.objects.get",
"core.models.Profile.objects.all",
"django.forms.DecimalField"
] | [((200, 308), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {'label': '"""Отправитель"""', 'help_text': '"""Выберите ИНН отправителя"""', 'choices': 'get_sender_choices'}), "(label='Отправитель', help_text='Выберите ИНН отправителя',\n choices=get_sender_choices)\n", (217, 308), False, 'from django import forms\n'), ((356, 455), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Список получателей"""', 'help_text': '"""Укажите ИНН получателей через запятую"""'}), "(label='Список получателей', help_text=\n 'Укажите ИНН получателей через запятую')\n", (371, 455), False, 'from django import forms\n'), ((487, 603), 'django.forms.DecimalField', 'forms.DecimalField', ([], {'max_digits': '(12)', 'decimal_places': '(2)', 'label': '"""Сумма перевода"""', 'help_text': '"""С точностью до 2 знаков"""'}), "(max_digits=12, decimal_places=2, label='Сумма перевода',\n help_text='С точностью до 2 знаков')\n", (505, 603), False, 'from django import forms\n'), ((1381, 1432), 'core.models.Profile.objects.get', 'Profile.objects.get', ([], {'pk': "self.cleaned_data['sender']"}), "(pk=self.cleaned_data['sender'])\n", (1400, 1432), False, 'from core.models import Profile\n'), ((102, 123), 'core.models.Profile.objects.all', 'Profile.objects.all', ([], {}), '()\n', (121, 123), False, 'from core.models import Profile\n'), ((1518, 1592), 'django.forms.ValidationError', 'forms.ValidationError', ([], {'message': '"""Not enough funds"""', 'code': '"""not_enough_funds"""'}), "(message='Not enough funds', code='not_enough_funds')\n", (1539, 1592), False, 'from django import forms\n'), ((1746, 1822), 'django.forms.ValidationError', 'forms.ValidationError', ([], {'message': '"""Profile not found"""', 'code': '"""profile_not_found"""'}), "(message='Profile not found', code='profile_not_found')\n", (1767, 1822), False, 'from django import forms\n'), ((882, 927), 'core.models.Profile.objects.filter', 'Profile.objects.filter', ([], {'inn__in': 'receiver_list'}), '(inn__in=receiver_list)\n', (904, 927), False, 'from core.models import Profile\n')] |
from fabric2 import Connection
from fabric2 import task
from fabric2 import config
import os
import time
from xml.etree import ElementTree as ET
import uuid
import glob
import json
import urllib.parse
import io
workflow_components = ['input.xml', 'binding.xml', 'flow.xml', 'result.xml', 'tool.xml']
@task
def release_text(c, workflow_name):
base_dir = '.'
tools = read_all_tools('..')
dependencies = output_tool_dependencies(workflow_name, base_dir)
makefile = read_makefile(base_dir)
readme = 'README.md'
previous_readme_lines = []
if os.path.isfile(readme):
with open(readme) as f:
for previous_readme_line in f:
previous_readme_lines.append(previous_readme_line)
if "CCMS_DEPLOYMENTS_HEADER_BREAK_ELEMENT_CAUTION_ANYTHING_ABOVE_WILL_BE_AUTOGENERATED" in previous_readme_line:
previous_readme_lines = []
version = makefile["WORKFLOW_VERSION"]
name = makefile.get("WORKFLOW_LABEL")
if name:
name = name[1:-1]
else:
name = workflow_name
description = makefile.get("WORKFLOW_DESCRIPTION")
update_text = "Last updated: {}.".format(makefile['LAST_UPDATED'])
dependency_text = []
seen = {}
for (dependency, dependency_version) in dependencies:
status = "N/V"
if dependency not in seen or (dependency in seen and seen[dependency] != dependency_version):
if dependency in tools:
local_version, workflow = tools[dependency]
if dependency_version == local_version:
status = "({})".format(dependency_version)
else:
status = "({}, latest is {})".format(dependency_version, local_version)
dependency_text.append("* {} {}".format(dependency, status))
else:
dependency_text.append("* {} (untracked)".format(dependency))
seen[dependency] = dependency_version
with open(readme, 'w') as w:
w.write('## {}\n\n'.format(name))
w.write('#### Version: {}\n\n'.format(version))
if description:
w.write('#### Description: \n{}\n\n'.format(description[1:-1]))
if len(dependency_text) > 0:
w.write('#### Dependencies: \n{}\n\n'.format("\n".join(dependency_text)))
w.write('_{}_\n\n'.format(update_text))
w.write('<data id=CCMS_DEPLOYMENTS_HEADER_BREAK_ELEMENT_CAUTION_ANYTHING_ABOVE_WILL_BE_AUTOGENERATED />\n\n')
for previous_readme_line in previous_readme_lines:
w.write(previous_readme_line)
@task
def read_branch(c, workflow_name):
branch_name = None
with io.StringIO() as f:
c.local('cd {} && git branch | grep \*'.format(workflow_name), out_stream = f)
branch = f.getvalue().replace('\n','').replace('* ','')
if not ('HEAD detached' in branch or 'master' in branch or 'main' in branch):
branch_name = branch
return branch_name
def read_makefile(workflow_name):
params = {}
makefile_location = os.path.join(workflow_name,'Makefile')
with open(makefile_location) as f:
for l in f:
split_line = l.rstrip().split('=')
if len(split_line) >= 2:
params[split_line[0]] = '='.join(split_line[1:])
params['LAST_UPDATED'] = time.ctime(os.path.getmtime(makefile_location))
return params
@task
def update_workflow_from_makefile(c, workflow_name, subcomponents):
params = read_makefile(workflow_name)
update_all(c, params["WORKFLOW_VERSION"], params.get("WORKFLOW_NAME"), params.get("TOOL_FOLDER_NAME"), params.get("WORKLFLOW_LABEL"), params.get("WORKLFLOW_DESCRIPTION"), workflow_name, subcomponents=subcomponents)
@task
def update_all(c, workflow_version, workflow_name=None, tool_name=None, workflow_label=None, workflow_description=None, base_dir=".", subcomponents=None, force_update_string='yes'):
production = "production" in c
if workflow_version == None:
exit("A workflow cannot be deployed without a version.")
branch_name = read_branch(c, base_dir)
if branch_name and not production:
workflow_version = '{}+{}'.format(workflow_version, branch_name.replace(' ','_'))
if workflow_name:
update_workflow_xml(c, workflow_name, tool_name, workflow_version, workflow_label, workflow_description, base_dir=base_dir, subcomponents=subcomponents, force_update_string=force_update_string)
if tool_name:
update_tools(c, tool_name, workflow_version, base_dir)
if workflow_name:
server_url_base = "https://{}/ProteoSAFe/index.jsp?params=".format(c.host)
workflow_url = server_url_base + urllib.parse.quote(json.dumps({"workflow":workflow_name.upper(), "workflow_version":workflow_version}))
print("SUCCESS:\n\n{} updated at with version:\n\n{}\n\n".format(workflow_name, workflow_url))
if force_update_string == 'yes':
server_url_base = "https://{}/ProteoSAFe/index.jsp?params=".format(c.host)
workflow_url = server_url_base + urllib.parse.quote(json.dumps({"workflow":workflow_name.upper()}))
print("And default version :\n\n{}\n\n".format(workflow_url))
@task
def read_workflows_from_yml(c):
workflows_to_deploy = []
if "workflows" not in c:
exit("Deploy all only works if a list of workflows to deploy is specified.")
for workflow in c["workflows"]:
workflow_name = None
subcomponents = workflow_components
if isinstance(workflow,dict):
for workflow, xml in workflow.items():
workflow_name = workflow
subcomponents = xml
else:
workflow_name = workflow
workflows_to_deploy.append((workflow_name, subcomponents))
return workflows_to_deploy
def read_all_tools(base_dir = '.'):
all_tools = {}
all_submodules = glob.glob(os.path.join(base_dir, '*'))
for submodule in all_submodules:
if 'CCMSDeployments' not in submodule and os.path.isdir(submodule):
try:
submodule_params = read_makefile(submodule)
tool_name = submodule_params.get("TOOL_FOLDER_NAME")
version = submodule_params["WORKFLOW_VERSION"]
if tool_name:
all_tools[tool_name] = (version, submodule)
except:
pass
return all_tools
@task
def deploy_all(c):
for workflow, subcomponents in read_workflows_from_yml(c):
update_workflow_from_makefile(c, workflow, subcomponents)
@task
def read_dependencies(c, workflow_name, rewrite_string = 'no', base_dir = '.'):
tools = read_all_tools('..')
rewrite = rewrite_string == 'yes'
output_updates(c, workflow_name, tool_name = None, base_dir = base_dir, tools = tools, seen = {}, rewrite = rewrite)
print('')
@task
def is_on_server(c, tool_name, tool_version):
tool_path = os.path.join(c["paths"]["tools"],tool_name, tool_version)
production = "production" in c
production_user = c["production"]["workflow_user"] if production else None
on_server = False
if production_user:
on_server = c.sudo("test -e {}".format(tool_path), user=production_user, pty=True)
else:
on_server = c.run("test -e {}".format(tool_path))
return not on_server.return_code
def output_updates(c, workflow_name = None, tool_name = None, base_dir = '.', tools = None, seen = {}, rewrite = False):
updates = {}
if workflow_name:
dependencies = output_tool_dependencies(workflow_name, base_dir)
outputs = []
for (dependency, version) in dependencies:
status = "N/V"
if dependency not in seen or (dependency in seen and seen[dependency] != version):
update = False
deployed = False
if dependency in tools:
local_version, workflow = tools[dependency]
if version == local_version:
status = "{}".format(version)
else:
update = True
updates[dependency] = local_version
status = "{}->{}".format(version, local_version)
if version and is_on_server(c, dependency, local_version):
deployed = True
deployed_str = " (deployed)" if deployed else " (needs deployment)"
# if rewrite:
# if not deployed:
# update_workflow_from_makefile(c, workflow, workflow_components, True)
# status += " (updated)"
# else:
# status += " (already deployed)"
# else:
# status += deployed_str
status += deployed_str
outputs.append((update or deployed,"\t{} {}".format(dependency, status)))
else:
outputs.append((update or deployed,"\t{} untracked".format(dependency)))
seen[dependency] = version
if not rewrite:
print('\nDepenencies for {}:'.format(workflow_name))
for output in outputs:
print(output[1])
else:
print('\nUpdated depenencies for {}:'.format(workflow_name))
for output in outputs:
if output[0]:
print(output[1])
rewrite_tool_w_new_dependencies(workflow_name, updates, base_dir = base_dir)
def output_tool_dependencies(workflow_name, base_dir = '.'):
dependencies = []
local = os.path.join(base_dir, workflow_name, 'tool.xml')
tree = ET.parse(local)
root = tree.getroot()
for path in root.findall('pathSet'):
if not '$base' in path.attrib['base']:
split_full_path = path.attrib['base'].split('/')
tool_name = split_full_path[0]
if len(split_full_path) >= 2:
tool_name = '/'.join(split_full_path[0:-1])
tool_version = split_full_path[-1]
else:
tool_version = "NV"
dependencies.append((tool_name, tool_version))
return dependencies
def rewrite_tool_w_new_dependencies(workflow_name, updates, rewrite = False, base_dir = '.'):
changes_made = False
dependencies = []
local = os.path.join(base_dir, workflow_name, 'tool.xml')
tree = ET.parse(local)
root = tree.getroot()
for path in root.findall('pathSet'):
if not '$base' in path.get('base'):
split_full_path = path.get('base').split('/')
tool_name = split_full_path[0]
if tool_name in updates and updates[tool_name]:
changes_made = True
if len(split_full_path[2:]) == 0:
path.set('base',os.path.join(tool_name, updates[tool_name]))
else:
path.set('base',os.path.join(tool_name, updates[tool_name], '/'.join(split_full_path[2:])))
if changes_made:
tree.write(local)
@task
def generate_manifest(c):
for workflow, subcomponents in read_workflows_from_yml(c):
params = read_makefile(workflow)
flag = ""
if "WORKFLOW_NAME" not in params:
flag = " (Tool only)"
elif "TOOL_FOLDER_NAME" not in params:
flag = " (Workflow only)"
print('{}{}, version: {}, last updated: {}'.format(workflow,flag,params['WORKFLOW_VERSION'],params['LAST_UPDATED']))
@task
def update_workflow_xml(c, workflow_name, tool_name, workflow_version, workflow_label, workflow_description, base_dir=".", subcomponents=None, force_update_string='yes'):
if not subcomponents:
subcomponents = workflow_components
force_update = force_update_string == 'yes'
production = "production" in c
production_user = c["production"]["workflow_user"] if production else None
local_temp_path = os.path.join("/tmp/{}_{}_{}".format(workflow_name, workflow_version, str(uuid.uuid4())))
c.local("mkdir -p {}".format(local_temp_path))
for component in subcomponents:
rewrite_workflow_component(component, base_dir, workflow_name, tool_name, workflow_version, workflow_label, workflow_description, local_temp_path)
#Performing Workflow Files Validation
try:
validate_workflow_xml(local_temp_path)
except:
print("Validation Failed in Exception")
base_workflow_path = os.path.join(c["paths"]["workflows"], workflow_name, "versions")
versioned_workflow_path = os.path.join(c["paths"]["workflows"], workflow_name, "versions", workflow_version)
if production_user:
c.sudo("mkdir -p {}".format(base_workflow_path), user=production_user, pty=True)
c.sudo("mkdir -p {}".format(versioned_workflow_path), user=production_user, pty=True)
else:
c.run("mkdir -p {}".format(base_workflow_path))
c.run("mkdir -p {}".format(versioned_workflow_path))
for component in subcomponents:
# print(component)
if force_update:
update_workflow_component(c, local_temp_path, workflow_name, component, production_user=production_user) #Adding to active default version
update_workflow_component(c, local_temp_path, workflow_name, component, workflow_version=workflow_version, production_user=production_user) #Explicitly adding versioned
if not production_user:
c.run("chmod 777 {}".format(versioned_workflow_path))
c.run("chmod -R 777 {}".format(versioned_workflow_path))
for xml_filename in workflow_components:
c.run("chmod 777 {}".format(os.path.join(c["paths"]["workflows"], workflow_name, xml_filename)))
#Uploading the actual tools to the server
@task
def update_tools(c, workflow_name, workflow_version, base_dir="."):
production = "production" in c
production_user = c["production"]["tool_user"] if production else None
final_path = os.path.join(c["paths"]["tools"],workflow_name, workflow_version)
if production_user:
c.sudo("mkdir -p {}".format(final_path), user=production_user, pty=True)
else:
c.run("mkdir -p {}".format(final_path))
local_path = os.path.join(base_dir, 'tools', workflow_name)
update_folder(c, local_path, final_path, production_user=production_user)
if not production_user:
c.run("chmod 777 {}".format(final_path))
c.run("chmod -R 777 {}".format(final_path))
#Utility Functions
def rewrite_workflow_component(component, base_dir, workflow_name, tool_name, workflow_version, workflow_label, workflow_description, local_temp_path):
local = os.path.join(base_dir, workflow_name, component)
temp = os.path.join(local_temp_path,component)
tree = ET.parse(local)
root = tree.getroot()
if component in ['input.xml','result.xml']:
root.set('id', workflow_name)
root.set('version', workflow_version)
if component in ['input.xml']:
for path in root.findall('workflow-id'):
path.text = workflow_name.upper()
for path in root.findall('workflow-label'):
if workflow_label:
path.text = workflow_label
if workflow_description is not None:
description_block = ET.Element("block")
root.insert(0, description_block)
description_block.attrib["label"] = "Workflow Description"
description_row = ET.SubElement(description_block, "row")
description_cell = ET.SubElement(description_row, "cell")
description_label = ET.SubElement(description_cell, "label")
description_label.attrib["prefix"] = "false"
description_content = ET.SubElement(description_label, "content")
description_content.text = '<div style="5px;padding:1px; border:2px;margin-left:8%;margin-right:8%;text-align:left">\
<br><strong>{}</strong> \
<hr style="margin-top:5px;margin-bottom:5px"> \
{} \
<hr style="margin-top:5px;margin-bottom:5px"> \
<small>Workflow version {} </small> \
</div>'.format(workflow_label if workflow_label else workflow_name.upper(), workflow_description, workflow_version)
elif component in ['flow.xml']:
root.set('name', workflow_name)
elif component in ['tool.xml']:
for path in root.findall('pathSet'):
if '$base' in path.get('base'):
if tool_name:
path.set('base',path.get('base').replace('$base',os.path.join(tool_name,workflow_version)))
else:
exit("Cannot rewrite tool.xml without specifying tool name.")
tree.write(temp)
def validate_workflow_xml(local_temp_path):
import workflow_validator
flow_path = os.path.join(local_temp_path, "flow.xml")
binding_path = os.path.join(local_temp_path, "binding.xml")
tool_path = os.path.join(local_temp_path, "tool.xml")
workflow_obj = workflow_validator.Workflow(flow_path, binding_path, tool_path)
workflow_obj.validate()
print(workflow_obj.printerrors())
#TODO: Validate that the xml is also a valid workflow
def update_workflow_component(c, local_temp_path, workflow_filename, component, workflow_version=None, production_user=None):
local = os.path.join(local_temp_path,component)
if workflow_version:
server = os.path.join(c["paths"]["workflows"], workflow_filename, "versions", workflow_version, component)
else:
server = os.path.join(c["paths"]["workflows"], workflow_filename, component)
update_file(c, local, server, production_user=production_user)
#Update File
def update_file(c, local_path, final_path, production_user = None):
if production_user:
remote_temp_path = os.path.join("/tmp/{}_{}".format(local_path.replace("/", "_"), str(uuid.uuid4())))
c.put(local_path, remote_temp_path, preserve_mode=True)
c.sudo('cp {} {}'.format(remote_temp_path, final_path), user=production_user, pty=True)
if os.path.split(os.path.normpath(remote_temp_path))[0] == '/tmp':
c.run('rm {}'.format(remote_temp_path))
else:
try:
c.put(local_path, final_path, preserve_mode=True)
except:
c.put(local_path, final_path, preserve_mode=False)
#TODO: update this to work with rsync
def update_folder(c, local_path, final_path, production_user = None):
#Tar up local folder and upload to temporary space on server and untar
local_temp_path = os.path.join("/tmp/{}_{}.tar".format(local_path.replace("/", "_"), str(uuid.uuid4())))
cmd = "tar -C {} -chf {} .".format(local_path, local_temp_path)
# print(cmd)
os.system(cmd)
remote_temp_tar_path = os.path.join("/tmp/{}_{}.tar".format(local_path.replace("/", "_"), str(uuid.uuid4())))
c.put(local_temp_path, remote_temp_tar_path, preserve_mode=True)
remote_temp_path = os.path.join("/tmp/{}_{}".format(local_path.replace("/", "_"), str(uuid.uuid4())))
c.run("mkdir {}".format(remote_temp_path))
c.run("tar -C {} -xf {}".format(remote_temp_path, remote_temp_tar_path))
if production_user:
c.sudo('rsync -rlptD {}/ {}'.format(remote_temp_path, final_path), user=production_user, pty=True)
else:
c.run('rsync -rlptD {}/ {}'.format(remote_temp_path, final_path))
if os.path.split(os.path.normpath(remote_temp_path))[0] == '/tmp':
c.run('rm -rf {}'.format(remote_temp_path))
if os.path.split(os.path.normpath(remote_temp_tar_path))[0] == '/tmp':
c.run('rm {}'.format(remote_temp_tar_path))
| [
"xml.etree.ElementTree.parse",
"workflow_validator.Workflow",
"os.path.join",
"os.path.getmtime",
"uuid.uuid4",
"os.path.isfile",
"xml.etree.ElementTree.Element",
"xml.etree.ElementTree.SubElement",
"os.path.isdir",
"os.path.normpath",
"os.system",
"io.StringIO"
] | [((567, 589), 'os.path.isfile', 'os.path.isfile', (['readme'], {}), '(readme)\n', (581, 589), False, 'import os\n'), ((3061, 3100), 'os.path.join', 'os.path.join', (['workflow_name', '"""Makefile"""'], {}), "(workflow_name, 'Makefile')\n", (3073, 3100), False, 'import os\n'), ((6911, 6969), 'os.path.join', 'os.path.join', (["c['paths']['tools']", 'tool_name', 'tool_version'], {}), "(c['paths']['tools'], tool_name, tool_version)\n", (6923, 6969), False, 'import os\n'), ((9667, 9716), 'os.path.join', 'os.path.join', (['base_dir', 'workflow_name', '"""tool.xml"""'], {}), "(base_dir, workflow_name, 'tool.xml')\n", (9679, 9716), False, 'import os\n'), ((9728, 9743), 'xml.etree.ElementTree.parse', 'ET.parse', (['local'], {}), '(local)\n', (9736, 9743), True, 'from xml.etree import ElementTree as ET\n'), ((10406, 10455), 'os.path.join', 'os.path.join', (['base_dir', 'workflow_name', '"""tool.xml"""'], {}), "(base_dir, workflow_name, 'tool.xml')\n", (10418, 10455), False, 'import os\n'), ((10467, 10482), 'xml.etree.ElementTree.parse', 'ET.parse', (['local'], {}), '(local)\n', (10475, 10482), True, 'from xml.etree import ElementTree as ET\n'), ((12496, 12560), 'os.path.join', 'os.path.join', (["c['paths']['workflows']", 'workflow_name', '"""versions"""'], {}), "(c['paths']['workflows'], workflow_name, 'versions')\n", (12508, 12560), False, 'import os\n'), ((12591, 12677), 'os.path.join', 'os.path.join', (["c['paths']['workflows']", 'workflow_name', '"""versions"""', 'workflow_version'], {}), "(c['paths']['workflows'], workflow_name, 'versions',\n workflow_version)\n", (12603, 12677), False, 'import os\n'), ((13986, 14052), 'os.path.join', 'os.path.join', (["c['paths']['tools']", 'workflow_name', 'workflow_version'], {}), "(c['paths']['tools'], workflow_name, workflow_version)\n", (13998, 14052), False, 'import os\n'), ((14234, 14280), 'os.path.join', 'os.path.join', (['base_dir', '"""tools"""', 'workflow_name'], {}), "(base_dir, 'tools', workflow_name)\n", (14246, 14280), False, 'import os\n'), ((14676, 14724), 'os.path.join', 'os.path.join', (['base_dir', 'workflow_name', 'component'], {}), '(base_dir, workflow_name, component)\n', (14688, 14724), False, 'import os\n'), ((14736, 14776), 'os.path.join', 'os.path.join', (['local_temp_path', 'component'], {}), '(local_temp_path, component)\n', (14748, 14776), False, 'import os\n'), ((14787, 14802), 'xml.etree.ElementTree.parse', 'ET.parse', (['local'], {}), '(local)\n', (14795, 14802), True, 'from xml.etree import ElementTree as ET\n'), ((16935, 16976), 'os.path.join', 'os.path.join', (['local_temp_path', '"""flow.xml"""'], {}), "(local_temp_path, 'flow.xml')\n", (16947, 16976), False, 'import os\n'), ((16996, 17040), 'os.path.join', 'os.path.join', (['local_temp_path', '"""binding.xml"""'], {}), "(local_temp_path, 'binding.xml')\n", (17008, 17040), False, 'import os\n'), ((17057, 17098), 'os.path.join', 'os.path.join', (['local_temp_path', '"""tool.xml"""'], {}), "(local_temp_path, 'tool.xml')\n", (17069, 17098), False, 'import os\n'), ((17119, 17182), 'workflow_validator.Workflow', 'workflow_validator.Workflow', (['flow_path', 'binding_path', 'tool_path'], {}), '(flow_path, binding_path, tool_path)\n', (17146, 17182), False, 'import workflow_validator\n'), ((17444, 17484), 'os.path.join', 'os.path.join', (['local_temp_path', 'component'], {}), '(local_temp_path, component)\n', (17456, 17484), False, 'import os\n'), ((18839, 18853), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (18848, 18853), False, 'import os\n'), ((2673, 2686), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (2684, 2686), False, 'import io\n'), ((3348, 3383), 'os.path.getmtime', 'os.path.getmtime', (['makefile_location'], {}), '(makefile_location)\n', (3364, 3383), False, 'import os\n'), ((5887, 5914), 'os.path.join', 'os.path.join', (['base_dir', '"""*"""'], {}), "(base_dir, '*')\n", (5899, 5914), False, 'import os\n'), ((17527, 17628), 'os.path.join', 'os.path.join', (["c['paths']['workflows']", 'workflow_filename', '"""versions"""', 'workflow_version', 'component'], {}), "(c['paths']['workflows'], workflow_filename, 'versions',\n workflow_version, component)\n", (17539, 17628), False, 'import os\n'), ((17652, 17719), 'os.path.join', 'os.path.join', (["c['paths']['workflows']", 'workflow_filename', 'component'], {}), "(c['paths']['workflows'], workflow_filename, component)\n", (17664, 17719), False, 'import os\n'), ((6003, 6027), 'os.path.isdir', 'os.path.isdir', (['submodule'], {}), '(submodule)\n', (6016, 6027), False, 'import os\n'), ((12052, 12064), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (12062, 12064), False, 'import uuid\n'), ((15326, 15345), 'xml.etree.ElementTree.Element', 'ET.Element', (['"""block"""'], {}), "('block')\n", (15336, 15345), True, 'from xml.etree import ElementTree as ET\n'), ((15505, 15544), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['description_block', '"""row"""'], {}), "(description_block, 'row')\n", (15518, 15544), True, 'from xml.etree import ElementTree as ET\n'), ((15580, 15618), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['description_row', '"""cell"""'], {}), "(description_row, 'cell')\n", (15593, 15618), True, 'from xml.etree import ElementTree as ET\n'), ((15655, 15695), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['description_cell', '"""label"""'], {}), "(description_cell, 'label')\n", (15668, 15695), True, 'from xml.etree import ElementTree as ET\n'), ((15795, 15838), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['description_label', '"""content"""'], {}), "(description_label, 'content')\n", (15808, 15838), True, 'from xml.etree import ElementTree as ET\n'), ((18734, 18746), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (18744, 18746), False, 'import uuid\n'), ((18953, 18965), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (18963, 18965), False, 'import uuid\n'), ((19129, 19141), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (19139, 19141), False, 'import uuid\n'), ((19507, 19541), 'os.path.normpath', 'os.path.normpath', (['remote_temp_path'], {}), '(remote_temp_path)\n', (19523, 19541), False, 'import os\n'), ((19630, 19668), 'os.path.normpath', 'os.path.normpath', (['remote_temp_tar_path'], {}), '(remote_temp_tar_path)\n', (19646, 19668), False, 'import os\n'), ((13671, 13737), 'os.path.join', 'os.path.join', (["c['paths']['workflows']", 'workflow_name', 'xml_filename'], {}), "(c['paths']['workflows'], workflow_name, xml_filename)\n", (13683, 13737), False, 'import os\n'), ((17990, 18002), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (18000, 18002), False, 'import uuid\n'), ((18191, 18225), 'os.path.normpath', 'os.path.normpath', (['remote_temp_path'], {}), '(remote_temp_path)\n', (18207, 18225), False, 'import os\n'), ((10877, 10920), 'os.path.join', 'os.path.join', (['tool_name', 'updates[tool_name]'], {}), '(tool_name, updates[tool_name])\n', (10889, 10920), False, 'import os\n'), ((16675, 16716), 'os.path.join', 'os.path.join', (['tool_name', 'workflow_version'], {}), '(tool_name, workflow_version)\n', (16687, 16716), False, 'import os\n')] |
import unittest
from electropy.charge import Charge
import numpy as np
from electropy import volume
class VolumeTest(unittest.TestCase):
def setUp(self):
self.position_1 = [0, 0, 0]
self.position_2 = [-2, 4, 1]
self.charge = 7e-9
def tearDown(self):
pass
# Potential function volume tests
def test_potential_volume_at_point_equal_class_potential(self):
charge = Charge(self.position_1, self.charge)
potential_volume = volume.potential(
[charge],
x_range=[-10, 10],
y_range=[-10, 10],
z_range=[-10, 10],
h=1,
)
# Point = [-6, -6, -6]
potential_at_point = potential_volume[4][4][4]
expected_potential = charge.potential([-6, -6, -6])
np.testing.assert_equal(potential_at_point, expected_potential)
def test_two_charge_potential_volume_eq_sum_of_class_potential(self):
charges = [Charge(self.position_1, self.charge)]
charges.append(Charge(self.position_2, -self.charge))
potential_volume = volume.potential(
charges,
x_range=[-10, 10],
y_range=[-10, 10],
z_range=[-10, 10],
h=1,
)
# Point = [-6, -5, -3]
potential_at_point = potential_volume[4][5][7]
expected_potential = np.add(
charges[0].potential([-6, -5, -3]),
charges[1].potential([-6, -5, -3]),
)
np.testing.assert_equal(potential_at_point, expected_potential)
# Field function volume tests
def test_field_volume_at_point_equal_class_field(self):
charge = Charge(self.position_1, self.charge)
field_volume = volume.field(
[charge],
x_range=[-10, 10],
y_range=[-10, 10],
z_range=[-10, 10],
h=1,
)
# Point = [-10, -6, -3]
field_at_point = field_volume[0][4][7]
expected_field = charge.field([-10, -6, -3])
np.testing.assert_equal(field_at_point, expected_field)
def test_two_charge_field_volume_eq_sum_of_class_field(self):
charges = [Charge(self.position_1, self.charge)]
charges.append(Charge(self.position_2, -self.charge))
field_volume = volume.field(
charges,
x_range=[-10, 10],
y_range=[-10, 10],
z_range=[-10, 10],
h=1,
)
# Point = [-6, -5, -3]
field_at_point = field_volume[4][5][7]
expected_field = np.add(
charges[0].field([-6, -5, -3]), charges[1].field([-6, -5, -3])
)
np.testing.assert_equal(field_at_point, expected_field)
def test_charge_field_volume_x_components_eq_sum_of_class_field_x(self):
charges = [Charge(self.position_1, self.charge)]
charges.append(Charge(self.position_2, -self.charge))
field_volume = volume.field(
charges,
x_range=[-10, 10],
y_range=[-10, 10],
z_range=[-10, 10],
h=1,
component="x",
)
# Point = [-6, -5, -3]
field_at_point = field_volume[4][5][7]
expected_field = np.add(
charges[0].field([-6, -5, -3], component="x"),
charges[1].field([-6, -5, -3], component="x"),
)
np.testing.assert_equal(field_at_point, expected_field)
def test_charge_field_volume_y_components_eq_sum_of_class_field_y(self):
charges = [Charge(self.position_1, self.charge)]
charges.append(Charge(self.position_2, -self.charge))
field_volume = volume.field(
charges,
x_range=[-10, 10],
y_range=[-10, 10],
z_range=[-10, 10],
h=1,
component="y",
)
# Point = [-6, -5, -3]
field_at_point = field_volume[4][5][7]
expected_field = np.add(
charges[0].field([-6, -5, -3], component="y"),
charges[1].field([-6, -5, -3], component="y"),
)
np.testing.assert_equal(field_at_point, expected_field)
def test_charge_field_volume_z_components_eq_sum_of_class_field_z(self):
charges = [Charge(self.position_1, self.charge)]
charges.append(Charge(self.position_2, -self.charge))
field_volume = volume.field(
charges,
x_range=[-10, 10],
y_range=[-10, 10],
z_range=[-10, 10],
h=1,
component="z",
)
# Point = [-6, -5, -3]
field_at_point = field_volume[4][5][7]
expected_field = np.add(
charges[0].field([-6, -5, -3], component="z"),
charges[1].field([-6, -5, -3], component="z"),
)
np.testing.assert_equal(field_at_point, expected_field)
def test_field_returns_singleton_dim_for_single_slice(self):
charge = Charge(self.position_1, self.charge)
field_volume = volume.field(
[charge],
x_range=[-10, 10],
y_range=[1, 1],
z_range=[-10, 10],
h=0.1,
)
expected_shape = (201, 1, 201)
actual_shape = field_volume.shape
np.testing.assert_equal(actual_shape, expected_shape)
def test__arange_almost_equals_numpy_arange(self):
actual = volume._arange(-10, 10, 0.1) # Mine is rounder anyways =)
expected = np.arange(-10, 10 + 0.1, 0.1)
np.testing.assert_almost_equal(actual, expected)
| [
"numpy.testing.assert_equal",
"electropy.volume.field",
"electropy.charge.Charge",
"numpy.testing.assert_almost_equal",
"electropy.volume.potential",
"electropy.volume._arange",
"numpy.arange"
] | [((425, 461), 'electropy.charge.Charge', 'Charge', (['self.position_1', 'self.charge'], {}), '(self.position_1, self.charge)\n', (431, 461), False, 'from electropy.charge import Charge\n'), ((490, 583), 'electropy.volume.potential', 'volume.potential', (['[charge]'], {'x_range': '[-10, 10]', 'y_range': '[-10, 10]', 'z_range': '[-10, 10]', 'h': '(1)'}), '([charge], x_range=[-10, 10], y_range=[-10, 10], z_range=[-\n 10, 10], h=1)\n', (506, 583), False, 'from electropy import volume\n'), ((807, 870), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['potential_at_point', 'expected_potential'], {}), '(potential_at_point, expected_potential)\n', (830, 870), True, 'import numpy as np\n'), ((1094, 1186), 'electropy.volume.potential', 'volume.potential', (['charges'], {'x_range': '[-10, 10]', 'y_range': '[-10, 10]', 'z_range': '[-10, 10]', 'h': '(1)'}), '(charges, x_range=[-10, 10], y_range=[-10, 10], z_range=[-\n 10, 10], h=1)\n', (1110, 1186), False, 'from electropy import volume\n'), ((1493, 1556), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['potential_at_point', 'expected_potential'], {}), '(potential_at_point, expected_potential)\n', (1516, 1556), True, 'import numpy as np\n'), ((1670, 1706), 'electropy.charge.Charge', 'Charge', (['self.position_1', 'self.charge'], {}), '(self.position_1, self.charge)\n', (1676, 1706), False, 'from electropy.charge import Charge\n'), ((1731, 1820), 'electropy.volume.field', 'volume.field', (['[charge]'], {'x_range': '[-10, 10]', 'y_range': '[-10, 10]', 'z_range': '[-10, 10]', 'h': '(1)'}), '([charge], x_range=[-10, 10], y_range=[-10, 10], z_range=[-10, \n 10], h=1)\n', (1743, 1820), False, 'from electropy import volume\n'), ((2030, 2085), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['field_at_point', 'expected_field'], {}), '(field_at_point, expected_field)\n', (2053, 2085), True, 'import numpy as np\n'), ((2297, 2385), 'electropy.volume.field', 'volume.field', (['charges'], {'x_range': '[-10, 10]', 'y_range': '[-10, 10]', 'z_range': '[-10, 10]', 'h': '(1)'}), '(charges, x_range=[-10, 10], y_range=[-10, 10], z_range=[-10, \n 10], h=1)\n', (2309, 2385), False, 'from electropy import volume\n'), ((2659, 2714), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['field_at_point', 'expected_field'], {}), '(field_at_point, expected_field)\n', (2682, 2714), True, 'import numpy as np\n'), ((2937, 3040), 'electropy.volume.field', 'volume.field', (['charges'], {'x_range': '[-10, 10]', 'y_range': '[-10, 10]', 'z_range': '[-10, 10]', 'h': '(1)', 'component': '"""x"""'}), "(charges, x_range=[-10, 10], y_range=[-10, 10], z_range=[-10, \n 10], h=1, component='x')\n", (2949, 3040), False, 'from electropy import volume\n'), ((3369, 3424), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['field_at_point', 'expected_field'], {}), '(field_at_point, expected_field)\n', (3392, 3424), True, 'import numpy as np\n'), ((3647, 3750), 'electropy.volume.field', 'volume.field', (['charges'], {'x_range': '[-10, 10]', 'y_range': '[-10, 10]', 'z_range': '[-10, 10]', 'h': '(1)', 'component': '"""y"""'}), "(charges, x_range=[-10, 10], y_range=[-10, 10], z_range=[-10, \n 10], h=1, component='y')\n", (3659, 3750), False, 'from electropy import volume\n'), ((4079, 4134), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['field_at_point', 'expected_field'], {}), '(field_at_point, expected_field)\n', (4102, 4134), True, 'import numpy as np\n'), ((4357, 4460), 'electropy.volume.field', 'volume.field', (['charges'], {'x_range': '[-10, 10]', 'y_range': '[-10, 10]', 'z_range': '[-10, 10]', 'h': '(1)', 'component': '"""z"""'}), "(charges, x_range=[-10, 10], y_range=[-10, 10], z_range=[-10, \n 10], h=1, component='z')\n", (4369, 4460), False, 'from electropy import volume\n'), ((4789, 4844), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['field_at_point', 'expected_field'], {}), '(field_at_point, expected_field)\n', (4812, 4844), True, 'import numpy as np\n'), ((4929, 4965), 'electropy.charge.Charge', 'Charge', (['self.position_1', 'self.charge'], {}), '(self.position_1, self.charge)\n', (4935, 4965), False, 'from electropy.charge import Charge\n'), ((4990, 5077), 'electropy.volume.field', 'volume.field', (['[charge]'], {'x_range': '[-10, 10]', 'y_range': '[1, 1]', 'z_range': '[-10, 10]', 'h': '(0.1)'}), '([charge], x_range=[-10, 10], y_range=[1, 1], z_range=[-10, 10],\n h=0.1)\n', (5002, 5077), False, 'from electropy import volume\n'), ((5236, 5289), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['actual_shape', 'expected_shape'], {}), '(actual_shape, expected_shape)\n', (5259, 5289), True, 'import numpy as np\n'), ((5364, 5392), 'electropy.volume._arange', 'volume._arange', (['(-10)', '(10)', '(0.1)'], {}), '(-10, 10, 0.1)\n', (5378, 5392), False, 'from electropy import volume\n'), ((5442, 5471), 'numpy.arange', 'np.arange', (['(-10)', '(10 + 0.1)', '(0.1)'], {}), '(-10, 10 + 0.1, 0.1)\n', (5451, 5471), True, 'import numpy as np\n'), ((5480, 5528), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (5510, 5528), True, 'import numpy as np\n'), ((966, 1002), 'electropy.charge.Charge', 'Charge', (['self.position_1', 'self.charge'], {}), '(self.position_1, self.charge)\n', (972, 1002), False, 'from electropy.charge import Charge\n'), ((1027, 1064), 'electropy.charge.Charge', 'Charge', (['self.position_2', '(-self.charge)'], {}), '(self.position_2, -self.charge)\n', (1033, 1064), False, 'from electropy.charge import Charge\n'), ((2173, 2209), 'electropy.charge.Charge', 'Charge', (['self.position_1', 'self.charge'], {}), '(self.position_1, self.charge)\n', (2179, 2209), False, 'from electropy.charge import Charge\n'), ((2234, 2271), 'electropy.charge.Charge', 'Charge', (['self.position_2', '(-self.charge)'], {}), '(self.position_2, -self.charge)\n', (2240, 2271), False, 'from electropy.charge import Charge\n'), ((2813, 2849), 'electropy.charge.Charge', 'Charge', (['self.position_1', 'self.charge'], {}), '(self.position_1, self.charge)\n', (2819, 2849), False, 'from electropy.charge import Charge\n'), ((2874, 2911), 'electropy.charge.Charge', 'Charge', (['self.position_2', '(-self.charge)'], {}), '(self.position_2, -self.charge)\n', (2880, 2911), False, 'from electropy.charge import Charge\n'), ((3523, 3559), 'electropy.charge.Charge', 'Charge', (['self.position_1', 'self.charge'], {}), '(self.position_1, self.charge)\n', (3529, 3559), False, 'from electropy.charge import Charge\n'), ((3584, 3621), 'electropy.charge.Charge', 'Charge', (['self.position_2', '(-self.charge)'], {}), '(self.position_2, -self.charge)\n', (3590, 3621), False, 'from electropy.charge import Charge\n'), ((4233, 4269), 'electropy.charge.Charge', 'Charge', (['self.position_1', 'self.charge'], {}), '(self.position_1, self.charge)\n', (4239, 4269), False, 'from electropy.charge import Charge\n'), ((4294, 4331), 'electropy.charge.Charge', 'Charge', (['self.position_2', '(-self.charge)'], {}), '(self.position_2, -self.charge)\n', (4300, 4331), False, 'from electropy.charge import Charge\n')] |
import unittest
from concourse_common import testutil
import out
class TestOut(unittest.TestCase):
def test_invalid_json(self):
testutil.put_stdin(
"""
{
"source": {
"user": "user",
"password": "password",
"host": "hostname"
},
"params": {
}
}
""")
self.assertEqual(out.execute('/'), -1)
def test_params_required_json(self):
testutil.put_stdin(
"""
{
"source": {
"user": "user",
"password": "password",
"host": "hostname"
}
}
""")
self.assertEqual(out.execute('/'), -1)
| [
"concourse_common.testutil.put_stdin",
"out.execute"
] | [((144, 413), 'concourse_common.testutil.put_stdin', 'testutil.put_stdin', (['"""\n {\n "source": {\n "user": "user",\n "password": "password",\n "host": "hostname"\n },\n "params": {\n }\n }\n """'], {}), '(\n """\n {\n "source": {\n "user": "user",\n "password": "password",\n "host": "hostname"\n },\n "params": {\n }\n }\n """\n )\n', (162, 413), False, 'from concourse_common import testutil\n'), ((515, 741), 'concourse_common.testutil.put_stdin', 'testutil.put_stdin', (['"""\n {\n "source": {\n "user": "user",\n "password": "password",\n "host": "hostname"\n }\n }\n """'], {}), '(\n """\n {\n "source": {\n "user": "user",\n "password": "password",\n "host": "hostname"\n }\n }\n """\n )\n', (533, 741), False, 'from concourse_common import testutil\n'), ((443, 459), 'out.execute', 'out.execute', (['"""/"""'], {}), "('/')\n", (454, 459), False, 'import out\n'), ((771, 787), 'out.execute', 'out.execute', (['"""/"""'], {}), "('/')\n", (782, 787), False, 'import out\n')] |
"""These are utilities designed for carefully handling communication between
processes while multithreading.
The code for ``pool_imap_unordered`` is copied nearly wholesale from GrantJ's
`Stack Overflow answer here
<https://stackoverflow.com/questions/5318936/python-multiprocessing-pool-lazy-iteration?noredirect=1&lq=1>`_.
It allows for a lazy imap over an iterable and the return of very large objects
"""
from multiprocessing import Process, Queue, cpu_count
try:
from Queue import Full as QueueFull
from Queue import Empty as QueueEmpty
except ImportError: # python3
from queue import Full as QueueFull
from queue import Empty as QueueEmpty
__all__ = ["pool_imap_unordered"]
def worker(func, recvq, sendq):
for args in iter(recvq.get, None):
# The args are training_data, scoring_data, var_idx
# Thus, we want to return the var_idx and then
# send those args to the abstract runner.
result = (args[-1], func(*args))
sendq.put(result)
def pool_imap_unordered(func, iterable, procs=cpu_count()):
"""Lazily imaps in an unordered manner over an iterable in parallel as a
generator
:Author: <NAME> <https://stackoverflow.com/users/232571/grantj>
:param func: function to perform on each iterable
:param iterable: iterable which has items to map over
:param procs: number of workers in the pool. Defaults to the cpu count
:yields: the results of the mapping
"""
# Create queues for sending/receiving items from iterable.
sendq = Queue(procs)
recvq = Queue()
# Start worker processes.
for rpt in range(procs):
Process(target=worker, args=(func, sendq, recvq)).start()
# Iterate iterable and communicate with worker processes.
send_len = 0
recv_len = 0
itr = iter(iterable)
try:
value = next(itr)
while True:
try:
sendq.put(value, True, 0.1)
send_len += 1
value = next(itr)
except QueueFull:
while True:
try:
result = recvq.get(False)
recv_len += 1
yield result
except QueueEmpty:
break
except StopIteration:
pass
# Collect all remaining results.
while recv_len < send_len:
result = recvq.get()
recv_len += 1
yield result
# Terminate worker processes.
for rpt in range(procs):
sendq.put(None)
| [
"multiprocessing.Queue",
"multiprocessing.cpu_count",
"multiprocessing.Process"
] | [((1066, 1077), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (1075, 1077), False, 'from multiprocessing import Process, Queue, cpu_count\n'), ((1552, 1564), 'multiprocessing.Queue', 'Queue', (['procs'], {}), '(procs)\n', (1557, 1564), False, 'from multiprocessing import Process, Queue, cpu_count\n'), ((1577, 1584), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (1582, 1584), False, 'from multiprocessing import Process, Queue, cpu_count\n'), ((1654, 1703), 'multiprocessing.Process', 'Process', ([], {'target': 'worker', 'args': '(func, sendq, recvq)'}), '(target=worker, args=(func, sendq, recvq))\n', (1661, 1703), False, 'from multiprocessing import Process, Queue, cpu_count\n')] |
#!/usr/bin/env python
"""
@package mi.dataset.parser.test.test_flcdrpf_ckl_mmp_cds
@file marine-integrations/mi/dataset/parser/test/test_flcdrpf_ckl_mmp_cds.py
@author <NAME>
@brief Test code for a flcdrpf_ckl_mmp_cds data parser
"""
import os
from nose.plugins.attrib import attr
from mi.core.exceptions import SampleException
from mi.core.log import get_logger
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.driver.flntu_x.mmp_cds.resource import RESOURCE_PATH
from mi.dataset.parser.mmp_cds_base import MmpCdsParser
from mi.dataset.test.test_parser import ParserUnitTestCase
log = get_logger()
@attr('UNIT', group='mi')
class FlntuXMmpCdsParserUnitTestCase(ParserUnitTestCase):
"""
flntu_x_mmp_cds Parser unit test suite
"""
def setUp(self):
ParserUnitTestCase.setUp(self)
self.config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.flntu_x_mmp_cds',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'FlntuXMmpCdsParserDataParticle'
}
def test_simple(self):
"""
This test reads in a small number of particles and verifies the result of one of the particles.
"""
with open(os.path.join(RESOURCE_PATH, 'flntu_1_20131124T005004_458.mpk'), 'rb') as stream_handle:
parser = MmpCdsParser(self.config, stream_handle, self.exception_callback)
particles = parser.get_records(6)
# this yml file only has particle 0 in it
self.assert_particles(particles[0:1], 'first.yml', RESOURCE_PATH)
# this yml file only has particle 1 in it
self.assert_particles(particles[1:2], 'second.yml', RESOURCE_PATH)
# this yml file only has particle 5 in it
self.assert_particles(particles[5:6], 'good.yml', RESOURCE_PATH)
def test_get_many(self):
"""
This test exercises retrieving 20 particles, verifying the 20th particle, then retrieves 30 particles
and verifies the 30th particle.
"""
with open(os.path.join(RESOURCE_PATH, 'flntu_1_20131124T005004_458.mpk'), 'rb') as stream_handle:
parser = MmpCdsParser(self.config, stream_handle, self.exception_callback)
particles = parser.get_records(20)
# Should end up with 20 particles
self.assertTrue(len(particles) == 20)
# this yml file only has particle 0 in it
self.assert_particles(particles[0:1], 'first.yml', RESOURCE_PATH)
# this yml file only has particle 19 in it
self.assert_particles(particles[19:20], 'get_many_one.yml', RESOURCE_PATH)
particles = parser.get_records(30)
# Should end up with 30 particles
self.assertTrue(len(particles) == 30)
# this yml file only has particle 29 in it
self.assert_particles(particles[29:30], 'get_many_two.yml', RESOURCE_PATH)
def test_long_stream(self):
"""
This test exercises retrieve approximately 200 particles.
"""
# Using two concatenated msgpack files to simulate two chunks to get more particles.
with open(os.path.join(RESOURCE_PATH, 'flntu_concat.mpk'), 'rb') as stream_handle:
parser = MmpCdsParser(self.config, stream_handle, self.exception_callback)
# Attempt to retrieve 200 particles, but we will retrieve less
particles = parser.get_records(200)
# Should end up with 172 particles
self.assertTrue(len(particles) == 184)
def test_bad_data_one(self):
"""
This test verifies that a SampleException is raised when msgpack data is malformed.
"""
with open(os.path.join(RESOURCE_PATH, 'flntu_1_20131124T005004_458-BAD.mpk'), 'rb') as stream_handle:
parser = MmpCdsParser(self.config, stream_handle, self.exception_callback)
parser.get_records(1)
self.assertEqual(len(self.exception_callback_value), 1)
self.assert_(isinstance(self.exception_callback_value[0], SampleException))
def test_bad_data_two(self):
"""
This test verifies that a SampleException is raised when an entire msgpack buffer is not msgpack.
"""
with open(os.path.join(RESOURCE_PATH, 'not-msg-pack.mpk'), 'rb') as stream_handle:
parser = MmpCdsParser(self.config, stream_handle, self.exception_callback)
parser.get_records(1)
self.assertTrue(len(self.exception_callback_value) >= 1)
self.assert_(isinstance(self.exception_callback_value[0], SampleException))
| [
"nose.plugins.attrib.attr",
"os.path.join",
"mi.core.log.get_logger",
"mi.dataset.parser.mmp_cds_base.MmpCdsParser",
"mi.dataset.test.test_parser.ParserUnitTestCase.setUp"
] | [((642, 654), 'mi.core.log.get_logger', 'get_logger', ([], {}), '()\n', (652, 654), False, 'from mi.core.log import get_logger\n'), ((661, 685), 'nose.plugins.attrib.attr', 'attr', (['"""UNIT"""'], {'group': '"""mi"""'}), "('UNIT', group='mi')\n", (665, 685), False, 'from nose.plugins.attrib import attr\n'), ((840, 870), 'mi.dataset.test.test_parser.ParserUnitTestCase.setUp', 'ParserUnitTestCase.setUp', (['self'], {}), '(self)\n', (864, 870), False, 'from mi.dataset.test.test_parser import ParserUnitTestCase\n'), ((1378, 1443), 'mi.dataset.parser.mmp_cds_base.MmpCdsParser', 'MmpCdsParser', (['self.config', 'stream_handle', 'self.exception_callback'], {}), '(self.config, stream_handle, self.exception_callback)\n', (1390, 1443), False, 'from mi.dataset.parser.mmp_cds_base import MmpCdsParser\n'), ((2245, 2310), 'mi.dataset.parser.mmp_cds_base.MmpCdsParser', 'MmpCdsParser', (['self.config', 'stream_handle', 'self.exception_callback'], {}), '(self.config, stream_handle, self.exception_callback)\n', (2257, 2310), False, 'from mi.dataset.parser.mmp_cds_base import MmpCdsParser\n'), ((3379, 3444), 'mi.dataset.parser.mmp_cds_base.MmpCdsParser', 'MmpCdsParser', (['self.config', 'stream_handle', 'self.exception_callback'], {}), '(self.config, stream_handle, self.exception_callback)\n', (3391, 3444), False, 'from mi.dataset.parser.mmp_cds_base import MmpCdsParser\n'), ((3966, 4031), 'mi.dataset.parser.mmp_cds_base.MmpCdsParser', 'MmpCdsParser', (['self.config', 'stream_handle', 'self.exception_callback'], {}), '(self.config, stream_handle, self.exception_callback)\n', (3978, 4031), False, 'from mi.dataset.parser.mmp_cds_base import MmpCdsParser\n'), ((4516, 4581), 'mi.dataset.parser.mmp_cds_base.MmpCdsParser', 'MmpCdsParser', (['self.config', 'stream_handle', 'self.exception_callback'], {}), '(self.config, stream_handle, self.exception_callback)\n', (4528, 4581), False, 'from mi.dataset.parser.mmp_cds_base import MmpCdsParser\n'), ((1266, 1328), 'os.path.join', 'os.path.join', (['RESOURCE_PATH', '"""flntu_1_20131124T005004_458.mpk"""'], {}), "(RESOURCE_PATH, 'flntu_1_20131124T005004_458.mpk')\n", (1278, 1328), False, 'import os\n'), ((2133, 2195), 'os.path.join', 'os.path.join', (['RESOURCE_PATH', '"""flntu_1_20131124T005004_458.mpk"""'], {}), "(RESOURCE_PATH, 'flntu_1_20131124T005004_458.mpk')\n", (2145, 2195), False, 'import os\n'), ((3282, 3329), 'os.path.join', 'os.path.join', (['RESOURCE_PATH', '"""flntu_concat.mpk"""'], {}), "(RESOURCE_PATH, 'flntu_concat.mpk')\n", (3294, 3329), False, 'import os\n'), ((3850, 3916), 'os.path.join', 'os.path.join', (['RESOURCE_PATH', '"""flntu_1_20131124T005004_458-BAD.mpk"""'], {}), "(RESOURCE_PATH, 'flntu_1_20131124T005004_458-BAD.mpk')\n", (3862, 3916), False, 'import os\n'), ((4419, 4466), 'os.path.join', 'os.path.join', (['RESOURCE_PATH', '"""not-msg-pack.mpk"""'], {}), "(RESOURCE_PATH, 'not-msg-pack.mpk')\n", (4431, 4466), False, 'import os\n')] |
# Generated by Django 3.0.8 on 2020-08-16 18:03
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('app_jumanji', '0006_auto_20200815_2218'),
]
operations = [
migrations.CreateModel(
name='Resume',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('surname', models.CharField(max_length=100)),
('status', models.CharField(choices=[('not_in_search', 'Не ищу работу'),
('consideration', 'Рассматриваю предложения'), ('in_search', 'Ищу работу')], max_length=100)),
('salary', models.FloatField()),
('specialty', models.CharField(choices=[('frontend', 'Фронтенд'), ('backend', 'Бэкенд'),
('gamedev', 'Геймдев'), ('devops', 'Девопс'), ('design', 'Дизайн'), ('products', 'Продукты'),
('management', 'Менеджмент'), ('testing', 'Тестирование')], max_length=100)),
('grade', models.CharField(choices=[('intern', 'intern'), ('junior', 'junior'), ('middle', 'middle'),
('senior', 'senior'), ('lead', 'lead')], max_length=100)),
('education', models.CharField(choices=[('missing', 'Отсутствует'), ('secondary', 'Среднее'),
('vocational', 'Средне-специальное'), ('incomplete_higher', 'Неполное высшее'),
('higher', 'Высшее')], max_length=100)),
('experience', models.CharField(max_length=500)),
('portfolio', models.CharField(max_length=500)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='resume', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"django.db.models.FloatField",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] | [((227, 284), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (258, 284), False, 'from django.db import migrations, models\n'), ((467, 560), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (483, 560), False, 'from django.db import migrations, models\n'), ((584, 616), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (600, 616), False, 'from django.db import migrations, models\n'), ((647, 679), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (663, 679), False, 'from django.db import migrations, models\n'), ((709, 872), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('not_in_search', 'Не ищу работу'), ('consideration',\n 'Рассматриваю предложения'), ('in_search', 'Ищу работу')]", 'max_length': '(100)'}), "(choices=[('not_in_search', 'Не ищу работу'), (\n 'consideration', 'Рассматриваю предложения'), ('in_search',\n 'Ищу работу')], max_length=100)\n", (725, 872), False, 'from django.db import migrations, models\n'), ((917, 936), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (934, 936), False, 'from django.db import migrations, models\n'), ((969, 1226), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('frontend', 'Фронтенд'), ('backend', 'Бэкенд'), ('gamedev', 'Геймдев'), (\n 'devops', 'Девопс'), ('design', 'Дизайн'), ('products', 'Продукты'), (\n 'management', 'Менеджмент'), ('testing', 'Тестирование')]", 'max_length': '(100)'}), "(choices=[('frontend', 'Фронтенд'), ('backend', 'Бэкенд'),\n ('gamedev', 'Геймдев'), ('devops', 'Девопс'), ('design', 'Дизайн'), (\n 'products', 'Продукты'), ('management', 'Менеджмент'), ('testing',\n 'Тестирование')], max_length=100)\n", (985, 1226), False, 'from django.db import migrations, models\n'), ((1290, 1447), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('intern', 'intern'), ('junior', 'junior'), ('middle', 'middle'), (\n 'senior', 'senior'), ('lead', 'lead')]", 'max_length': '(100)'}), "(choices=[('intern', 'intern'), ('junior', 'junior'), (\n 'middle', 'middle'), ('senior', 'senior'), ('lead', 'lead')],\n max_length=100)\n", (1306, 1447), False, 'from django.db import migrations, models\n'), ((1523, 1729), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('missing', 'Отсутствует'), ('secondary', 'Среднее'), ('vocational',\n 'Средне-специальное'), ('incomplete_higher', 'Неполное высшее'), (\n 'higher', 'Высшее')]", 'max_length': '(100)'}), "(choices=[('missing', 'Отсутствует'), ('secondary',\n 'Среднее'), ('vocational', 'Средне-специальное'), ('incomplete_higher',\n 'Неполное высшее'), ('higher', 'Высшее')], max_length=100)\n", (1539, 1729), False, 'from django.db import migrations, models\n'), ((1835, 1867), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (1851, 1867), False, 'from django.db import migrations, models\n'), ((1900, 1932), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (1916, 1932), False, 'from django.db import migrations, models\n'), ((1960, 2079), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""resume"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='resume', to=settings.AUTH_USER_MODEL)\n", (1977, 2079), False, 'from django.db import migrations, models\n')] |
import os
import pytest
import json
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
@pytest.fixture()
def waited_failed_task_name(host):
all_variables = host.ansible.get_variables()
return all_variables['waited_failed_task_name']
@pytest.fixture()
def waited_failed_result_msg(host):
all_variables = host.ansible.get_variables()
return all_variables['waited_failed_result_msg']
@pytest.fixture()
def failure_info(host):
all_variables = host.ansible.get_variables()
json_file_path = "{}/failure_{}.json".format(
os.environ['MOLECULE_EPHEMERAL_DIRECTORY'],
all_variables['inventory_hostname']
)
with open(json_file_path) as json_file:
data = json.load(json_file)
return data
def test_failed_task_name(host, failure_info, waited_failed_task_name):
assert failure_info['task_name'] == waited_failed_task_name
def test_failed_message(host, failure_info, waited_failed_result_msg):
assert failure_info['return']['msg'] == waited_failed_result_msg
| [
"pytest.fixture",
"json.load"
] | [((203, 219), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (217, 219), False, 'import pytest\n'), ((359, 375), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (373, 375), False, 'import pytest\n'), ((517, 533), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (531, 533), False, 'import pytest\n'), ((818, 838), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (827, 838), False, 'import json\n')] |
from django.shortcuts import render
# Create your views here.
from django.shortcuts import render,redirect, render_to_response
from .models import *
from django.views.generic import TemplateView, ListView, DetailView, CreateView, UpdateView, DeleteView
from django.urls import reverse
from django.http import HttpResponseRedirect
def loginView(request):
if request.method =="GET":
pass
elif request.method == 'POST':
pass
return render(request, 'myApp/login.html')
| [
"django.shortcuts.render"
] | [((461, 496), 'django.shortcuts.render', 'render', (['request', '"""myApp/login.html"""'], {}), "(request, 'myApp/login.html')\n", (467, 496), False, 'from django.shortcuts import render, redirect, render_to_response\n')] |
# -*- encoding: utf-8 -*-
"""
search_indexex.py: Creacion de los indices de busqueda.
@author <NAME>
@contact <EMAIL>
<EMAIL>
@camilortte on Twitter
@copyright Copyright 2014-2015, RecomendadorUD
@license GPL
@date 2014-10-10
@satus Pre-Alpha
@version= 0..215
"""
#import datetime
from haystack import indexes
from .models import Establecimiento, Categoria
class EstablecimientoIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True)
nombre = indexes.EdgeNgramField(model_attr='nombre')
#nombre_auto = indexes.EdgeNgramField(model_attr='nombre')
email = indexes.EdgeNgramField(model_attr='email')
web_page = indexes.EdgeNgramField(model_attr='web_page')
address= indexes.EdgeNgramField(model_attr='address')
sub_categorias = indexes.EdgeNgramField(model_attr='sub_categorias')
# content_auto = indexes.EdgeNgramField(model_attr='nombre')
def get_model(self):
return Establecimiento
def index_queryset(self, using=None):
# using select_related here should avoid an extra query for getting
# the manufacturer when indexing
return self.get_model().objects.all().select_related('sub_categorias')
# def search(self):
# if hasattr(self,'cleaned_data') and self.cleaned_data['q']:
# self.cleaned_data['q']=self.cleaned_data['q'].encode('translit/one/ascii', 'replace')
# sqs = super(RtSearchForm, self).search()
# return sqs
class CategoriaIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
nombre_categoria= indexes.EdgeNgramField(model_attr='nombre')
def get_model(self):
return Categoria
def index_queryset(self, using=None):
"""Used when the entire index for model is updated."""
#return self.get_model().objects.filter(pub_date__lte=datetime.datetime.now())
return self.get_model().objects.all() | [
"haystack.indexes.CharField",
"haystack.indexes.EdgeNgramField"
] | [((536, 568), 'haystack.indexes.CharField', 'indexes.CharField', ([], {'document': '(True)'}), '(document=True)\n', (553, 568), False, 'from haystack import indexes\n'), ((583, 626), 'haystack.indexes.EdgeNgramField', 'indexes.EdgeNgramField', ([], {'model_attr': '"""nombre"""'}), "(model_attr='nombre')\n", (605, 626), False, 'from haystack import indexes\n'), ((704, 746), 'haystack.indexes.EdgeNgramField', 'indexes.EdgeNgramField', ([], {'model_attr': '"""email"""'}), "(model_attr='email')\n", (726, 746), False, 'from haystack import indexes\n'), ((762, 807), 'haystack.indexes.EdgeNgramField', 'indexes.EdgeNgramField', ([], {'model_attr': '"""web_page"""'}), "(model_attr='web_page')\n", (784, 807), False, 'from haystack import indexes\n'), ((821, 865), 'haystack.indexes.EdgeNgramField', 'indexes.EdgeNgramField', ([], {'model_attr': '"""address"""'}), "(model_attr='address')\n", (843, 865), False, 'from haystack import indexes\n'), ((891, 942), 'haystack.indexes.EdgeNgramField', 'indexes.EdgeNgramField', ([], {'model_attr': '"""sub_categorias"""'}), "(model_attr='sub_categorias')\n", (913, 942), False, 'from haystack import indexes\n'), ((1649, 1700), 'haystack.indexes.CharField', 'indexes.CharField', ([], {'document': '(True)', 'use_template': '(True)'}), '(document=True, use_template=True)\n', (1666, 1700), False, 'from haystack import indexes\n'), ((1723, 1766), 'haystack.indexes.EdgeNgramField', 'indexes.EdgeNgramField', ([], {'model_attr': '"""nombre"""'}), "(model_attr='nombre')\n", (1745, 1766), False, 'from haystack import indexes\n')] |
''' Handling the data io '''
from torchvision import transforms, datasets
import numpy as np
import zipfile
from io import open
import glob
from PIL import Image, ImageOps
import os
import string
# Read data
def extractZipFiles(zip_file, extract_to):
''' Extract from zip '''
with zipfile.ZipFile(zip_file, 'r')as zipped_ref:
zipped_ref.extractall(extract_to)
print('done')
data_dir = 'data/captcha_images_v2/*.png'
def findFiles(path): return glob.glob(path)
# find letter inde from targets_flat
def letterToIndex(letter):
return all_letters.find(letter)
# print(letterToIndex('l'))
# index to letter
indexToLetter = {letterToIndex(i):i for i in all_letters}
data = [img for img in findFiles(data_dir)]
targets = [os.path.basename(x)[:-4] for x in glob.glob(data_dir)]
# abcde -> [a, b, c, d, e]
pre_targets_flat = [[c for c in x] for x in targets]
encoded_targets = np.array([[letterToIndex(c) for c in x] for x in pre_targets_flat])
targets_flat = [char for word in pre_targets_flat for char in word]
unique_letters = set(char for word in targets for char in word)
class CaptchaDataset(Dataset):
"""
Args:
data (string): Path to the file with all the images.
target (string): Path to the file with annotations.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
def __init__(self, data, target=None, transform=None):
self.data = data
self.target = target
self.transform = transform
def __getitem__(self, index):
# read image
x = Image.open(self.data[index]).convert('RGB')
y = self.target[index]
# resize, turn to 0,1
if self.transform:
x = self.transform(x)
return x, torch.tensor(y, dtype=torch.long)
return x, y
def __len__(self):
return len(self.data)
| [
"zipfile.ZipFile",
"os.path.basename",
"glob.glob",
"PIL.Image.open"
] | [((476, 491), 'glob.glob', 'glob.glob', (['path'], {}), '(path)\n', (485, 491), False, 'import glob\n'), ((294, 324), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_file', '"""r"""'], {}), "(zip_file, 'r')\n", (309, 324), False, 'import zipfile\n'), ((759, 778), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (775, 778), False, 'import os\n'), ((793, 812), 'glob.glob', 'glob.glob', (['data_dir'], {}), '(data_dir)\n', (802, 812), False, 'import glob\n'), ((1647, 1675), 'PIL.Image.open', 'Image.open', (['self.data[index]'], {}), '(self.data[index])\n', (1657, 1675), False, 'from PIL import Image, ImageOps\n')] |
# -*- coding: utf-8 -*-
"""Tests for the metaregistry."""
import unittest
import bioregistry
from bioregistry.export.rdf_export import metaresource_to_rdf_str
from bioregistry.schema import Registry
class TestMetaregistry(unittest.TestCase):
"""Tests for the metaregistry."""
def test_minimum_metadata(self):
"""Test the metaregistry entries have a minimum amount of data."""
for metaprefix, registry_pydantic in bioregistry.read_metaregistry().items():
self.assertIsInstance(registry_pydantic, Registry)
data = registry_pydantic.dict()
with self.subTest(metaprefix=metaprefix):
self.assertIn('name', data)
self.assertIn('homepage', data)
self.assertIn('example', data)
self.assertIn('description', data)
# When a registry is a provider, it means it
# provides for its entries
self.assertIn('provider', data)
if data['provider']:
self.assertIn('provider_url', data)
self.assertIn('$1', data['provider_url'])
# When a registry is a resolver, it means it
# can resolve entries (prefixes) + identifiers
self.assertIn('resolver', data)
if data['resolver']:
self.assertIn('resolver_url', data)
self.assertIn('$1', data['resolver_url'])
self.assertIn('$2', data['resolver_url'])
invalid_keys = set(data).difference({
'prefix', 'name', 'homepage', 'download',
'provider', 'resolver', 'description', 'provider_url',
'example', 'resolver_url', 'contact',
})
self.assertEqual(set(), invalid_keys, msg='invalid metadata')
def test_get_registry(self):
"""Test getting a registry."""
self.assertIsNone(bioregistry.get_registry('nope'))
self.assertIsNone(bioregistry.get_registry_name('nope'))
self.assertIsNone(bioregistry.get_registry_homepage('nope'))
self.assertIsNone(bioregistry.get_registry_url('nope', ...))
self.assertIsNone(bioregistry.get_registry_example('nope'))
self.assertIsNone(bioregistry.get_registry_description('nope'))
self.assertIsNone(bioregistry.get_registry_url('n2t', ...)) # no provider available for N2T registry
metaprefix = 'uniprot'
registry = bioregistry.get_registry(metaprefix)
self.assertIsInstance(registry, Registry)
self.assertEqual(metaprefix, registry.prefix)
self.assertEqual(registry.description, bioregistry.get_registry_description(metaprefix))
homepage = 'https://www.uniprot.org/database/'
self.assertEqual(homepage, registry.homepage)
self.assertEqual(homepage, bioregistry.get_registry_homepage(metaprefix))
name = 'UniProt Cross-ref database'
self.assertEqual(name, registry.name)
self.assertEqual(name, bioregistry.get_registry_name(metaprefix))
example = '0174'
self.assertEqual(example, registry.example)
self.assertEqual(example, bioregistry.get_registry_example(metaprefix))
url = bioregistry.get_registry_url(metaprefix, example)
self.assertEqual('https://www.uniprot.org/database/DB-0174', url)
def test_resolver(self):
"""Test generating resolver URLs."""
# Can't resolve since nope isn't a valid registry
self.assertIsNone(bioregistry.get_registry_resolve_url('nope', 'chebi', '1234'))
# Can't resolve since GO isn't a resolver
self.assertIsNone(bioregistry.get_registry_resolve_url('go', 'chebi', '1234'))
url = bioregistry.get_registry_resolve_url('bioregistry', 'chebi', '1234')
self.assertEqual('https://bioregistry.io/chebi:1234', url)
def test_get_rdf(self):
"""Test conversion to RDF."""
s = metaresource_to_rdf_str('uniprot')
self.assertIsInstance(s, str)
| [
"bioregistry.get_registry_url",
"bioregistry.get_registry_resolve_url",
"bioregistry.get_registry_name",
"bioregistry.get_registry",
"bioregistry.read_metaregistry",
"bioregistry.export.rdf_export.metaresource_to_rdf_str",
"bioregistry.get_registry_homepage",
"bioregistry.get_registry_description",
... | [((2517, 2553), 'bioregistry.get_registry', 'bioregistry.get_registry', (['metaprefix'], {}), '(metaprefix)\n', (2541, 2553), False, 'import bioregistry\n'), ((3286, 3335), 'bioregistry.get_registry_url', 'bioregistry.get_registry_url', (['metaprefix', 'example'], {}), '(metaprefix, example)\n', (3314, 3335), False, 'import bioregistry\n'), ((3784, 3852), 'bioregistry.get_registry_resolve_url', 'bioregistry.get_registry_resolve_url', (['"""bioregistry"""', '"""chebi"""', '"""1234"""'], {}), "('bioregistry', 'chebi', '1234')\n", (3820, 3852), False, 'import bioregistry\n'), ((3999, 4033), 'bioregistry.export.rdf_export.metaresource_to_rdf_str', 'metaresource_to_rdf_str', (['"""uniprot"""'], {}), "('uniprot')\n", (4022, 4033), False, 'from bioregistry.export.rdf_export import metaresource_to_rdf_str\n'), ((1979, 2011), 'bioregistry.get_registry', 'bioregistry.get_registry', (['"""nope"""'], {}), "('nope')\n", (2003, 2011), False, 'import bioregistry\n'), ((2039, 2076), 'bioregistry.get_registry_name', 'bioregistry.get_registry_name', (['"""nope"""'], {}), "('nope')\n", (2068, 2076), False, 'import bioregistry\n'), ((2104, 2145), 'bioregistry.get_registry_homepage', 'bioregistry.get_registry_homepage', (['"""nope"""'], {}), "('nope')\n", (2137, 2145), False, 'import bioregistry\n'), ((2173, 2214), 'bioregistry.get_registry_url', 'bioregistry.get_registry_url', (['"""nope"""', '...'], {}), "('nope', ...)\n", (2201, 2214), False, 'import bioregistry\n'), ((2242, 2282), 'bioregistry.get_registry_example', 'bioregistry.get_registry_example', (['"""nope"""'], {}), "('nope')\n", (2274, 2282), False, 'import bioregistry\n'), ((2310, 2354), 'bioregistry.get_registry_description', 'bioregistry.get_registry_description', (['"""nope"""'], {}), "('nope')\n", (2346, 2354), False, 'import bioregistry\n'), ((2382, 2422), 'bioregistry.get_registry_url', 'bioregistry.get_registry_url', (['"""n2t"""', '...'], {}), "('n2t', ...)\n", (2410, 2422), False, 'import bioregistry\n'), ((2706, 2754), 'bioregistry.get_registry_description', 'bioregistry.get_registry_description', (['metaprefix'], {}), '(metaprefix)\n', (2742, 2754), False, 'import bioregistry\n'), ((2901, 2946), 'bioregistry.get_registry_homepage', 'bioregistry.get_registry_homepage', (['metaprefix'], {}), '(metaprefix)\n', (2934, 2946), False, 'import bioregistry\n'), ((3070, 3111), 'bioregistry.get_registry_name', 'bioregistry.get_registry_name', (['metaprefix'], {}), '(metaprefix)\n', (3099, 3111), False, 'import bioregistry\n'), ((3225, 3269), 'bioregistry.get_registry_example', 'bioregistry.get_registry_example', (['metaprefix'], {}), '(metaprefix)\n', (3257, 3269), False, 'import bioregistry\n'), ((3569, 3630), 'bioregistry.get_registry_resolve_url', 'bioregistry.get_registry_resolve_url', (['"""nope"""', '"""chebi"""', '"""1234"""'], {}), "('nope', 'chebi', '1234')\n", (3605, 3630), False, 'import bioregistry\n'), ((3708, 3767), 'bioregistry.get_registry_resolve_url', 'bioregistry.get_registry_resolve_url', (['"""go"""', '"""chebi"""', '"""1234"""'], {}), "('go', 'chebi', '1234')\n", (3744, 3767), False, 'import bioregistry\n'), ((443, 474), 'bioregistry.read_metaregistry', 'bioregistry.read_metaregistry', ([], {}), '()\n', (472, 474), False, 'import bioregistry\n')] |
from __future__ import print_function
import gzip
import os
import sys
from decimal import Decimal
def calculate_gc(inpath):
inf = gzip.open(inpath) if inpath.endswith('.gz') else open(inpath)
ttl_bases = 0
gc_bases = 0
for i, l in enumerate(inf):
if i % 4 == 1:
s = l.strip().upper()
ttl_bases += len(s)
gc_bases += (s.count('G') + s.count('C'))
return gc_bases, ttl_bases
if __name__ == '__main__':
if not len(sys.argv) > 1 or not os.path.isfile(sys.argv[1]):
sys.stderr.write('Usage: gc.py <fastq[.gz] file with no blank lines>\n')
sys.exit(1)
gc, ttl = calculate_gc(sys.argv[1])
# The original pipeline returns 12 decimal places, so round this for consistency
calc = round(Decimal(gc / float(ttl)), 12)
print(gc, ttl, calc)
| [
"os.path.isfile",
"sys.stderr.write",
"sys.exit",
"gzip.open"
] | [((138, 155), 'gzip.open', 'gzip.open', (['inpath'], {}), '(inpath)\n', (147, 155), False, 'import gzip\n'), ((543, 615), 'sys.stderr.write', 'sys.stderr.write', (['"""Usage: gc.py <fastq[.gz] file with no blank lines>\n"""'], {}), "('Usage: gc.py <fastq[.gz] file with no blank lines>\\n')\n", (559, 615), False, 'import sys\n'), ((624, 635), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (632, 635), False, 'import sys\n'), ((506, 533), 'os.path.isfile', 'os.path.isfile', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (520, 533), False, 'import os\n')] |
import os
import time
import sys
import random
user_pass = ('''<PASSWORD>
admi
admin
universo
html
veggeta
Admin
bados
free-fire
royale
clang
free
fire
anonimo
anonimous
anoni
bills
anonymous
Aanonimous
pass
password
wordlist
kali
linux
kali-linux
start
Hacker
parrot
ubuntu
blacken
redhat
deepin
lubuntu
depin
gogeta
hacker
tor
2000
error
2001
2002
1999
root
home
space
2003
2004
2005
red-tor
redtor
enero
2006
2007
2008
home
2009
2010
2020
goku
febrero
user
usuario
xmr7
marzo
administrador
abril
mayo
junio
administrativo
2011
homme
2013
2012
security
2014
wine
seguridad
2015
2016
2017
2018
2019
hack
black
hackblack
julio
anonsurf
decsec
agosto
metasploit
supersu
super
user-root
septiembre
octubre
october
novienbre
juan
adrian
diciembre
cuarenta
curentena
1234
4321
0000
docker
python
aaaa
dead
deat
muerte
sudo
sudosu
sudo su
we are hacker
2222
1010
wearehacker
123456
1111
12345
mexico
peru
amor
123
vida
love
loveyou
you
live
5678
scan
56789
mylife
estudio
mrhacker
mr hacker
jhom
jhon
fores
benjamin
mr-rebot
mr robot
mr-roboth
mr roboth
roboth
scryp
1010
tool
nombre
anom''')
def colores ():
listas = ['\033[1;36m','\033[1;31m','\033[1;34m','\033[1;30m','\033[1;37m','\033[1;35m','\033[1;32m','\033[1;33m']
indice=random.randrange(len(listas))
lista=listas[indice]
print(lista)
user_user = ('''admin
admi
admin
Admin
anonimo
anonimous
anoni
anonymous
benjamin
mr-rebot
mr robot
mr-roboth
mr roboth
roboth
Aanonimous
pass
password
wordlist
kali
linux
kali-linux
start
Hacker
parrot
ubuntu
redhat
deepin
depin
gogeta
hacker
tor
2000
2001
2002
1999
root
home
space
2003
2004
2005
red-tor
redtor
enero
2006
2007
2008
2009
2010
2020
goku
febrero
user
usuario
marzo
administrador
abril
mayo
junio
administrativo
2011
2013
2012
2014
2015
2016
2017
2018
2019
hack
black
hackblack
julio
anonsurf
agosto
metasploit
supersu
super
user-root
septiembre
octubre
october
novienbre
juan
diciembre
cuarenta
curentena
1234
4321
0000
docker
aaaa
dead
deat
muerte
sudo
sudosu
sudo su
we are hacker
2222
1010
wearehacker
123456
1111
12345
mexico
peru
amor
123
vida
love
loveyou
you
live
5678
scan
56789
mylife
estudio
mrhacker
mr hacker
jhom
jhon
fores
scryp
1010
tool
anom
''')
#print (user)
def slowprint(s):
for c in s + '\n':
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(1./70)
def fth ():
colores()
listalinuxs = ['''
██████ █████ █████
███░░███ ░░███ ░░███
░███ ░░░ ███████ ░███████
███████ ░░░███░ ░███░░███
░░░███░ ░███ ░███ ░███
░███ ░███ ███ ░███ ░███
█████ ██ ░░█████ ██ ████ █████
░░░░░ ░░ ░░░░░ ░░ ░░░░ ░░░░░
''','''
'########::::::'########::::::'##::::'##:
##.....:::::::... ##..::::::: ##:::: ##:
##::::::::::::::: ##::::::::: ##:::: ##:
######::::::::::: ##::::::::: #########:
##...:::::::::::: ##::::::::: ##.... ##:
##:::::::'###:::: ##::::'###: ##:::: ##:
##::::::: ###:::: ##:::: ###: ##:::: ##:
..::::::::...:::::..:::::...::..:::::..::
''','''
███████╗████████╗██╗ ██╗
██╔════╝╚══██╔══╝██║ ██║
█████╗ ██║ ███████║
██╔══╝ ██║ ██╔══██║
██║██╗ ██║██╗██║ ██║
╚═╝╚═╝ ╚═╝╚═╝╚═╝ ╚═╝
''','''
O)) O)) O))
O) O)) O)) O)) O)
O)O) O)O)O) O)O)) O)) O)) O)) O)) O))O)) O))
O)) O)) O) O) O))O)) O)) O))O)) O)) O) O))
O)) O)) O)) O)) O))O)) O)) O))O)) O)) O)
O)) O)) O) O)) O))O)) O)) O))O)) O)) O) O))
O)) O)) O)) O))O)))O))O))) O)) O))O))O)) O))
''','''
.%%%%%%..%%%%%%..%%..%%..%%......%%%%%%..%%..%%..%%..%%..%%..%%.
.%%........%%....%%..%%..%%........%%....%%%.%%..%%..%%...%%%%..
.%%%%......%%....%%%%%%..%%........%%....%%.%%%..%%..%%....%%...
.%%........%%....%%..%%..%%........%%....%%..%%..%%..%%...%%%%..
.%%........%%....%%..%%..%%%%%%..%%%%%%..%%..%%...%%%%...%%..%%.
................................................................
''','''
@@@@@@@@ @@@@@@@ @@@ @@@ @@@ @@@ @@@ @@@ @@@ @@@ @@@ @@@
@@@@@@@@ @@@@@@@ @@@ @@@ @@@ @@@ @@@@ @@@ @@@ @@@ @@@ @@@
@@! @@! @@! @@@ @@! @@! @@!@!@@@ @@! @@@ @@! !@@
!@! !@! !@! @!@ !@! !@! !@!!@!@! !@! @!@ !@! @!!
@!!!:! @!! @!@!@!@! @!@!@!@!@ @!! !!@ @!@ !!@! @!@ !@! !@@!@!
!!!!!: !!! !!!@!!!! !!!@!@!!! !!! !!! !@! !!! !@! !!! @!!!
!!: !!: !!: !!! !!: !!: !!: !!! !!: !!! !: :!!
:!: :!: :!: !:! :!: :!: :!: !:! :!: !:! :!: !:!
:: :: :: ::: :: :::: :: :: :: ::::: :: :: :::
: : : : : : :: : : : :: : : : : : ::
''']
indice=random.randrange(len(listalinuxs))
listalinux=listalinuxs[indice]
slowprint(listalinux)
fth()
def clear ():
os.system('clear')
def help ():
print ('\n ------Menu_de ayuda_------\n')
ayuda = '''
Creacion de usuario = use user [ no son util ]
Creacion de password = use pass [ no son util ]\n
create_new_pass [Crea una nueva lista de deccionario manual]\n\n
create_new_user [Crea una nueva lista de diccionario manual ]\n
create pass list [crea la lista de diccionario creada por el usuario]\n
create user list [crea la lista de diccionario creada por el usuario]\n
clear [LIMPIA LA PANTALLA ]\n
use force_brute [con este comando podra hacer fuerza bruta a ftp,ssh.etc]
open msfconsole [podras tambien ejecutar metasploit desde la misma scryp]
\n
use hacker_git [como instancia con solo este comando se instalara una tools]
creada por el usuario disponible en github
\n
create automated payload [ crearas un payload ya automatizado [windows/android][1]]
\n
Tambien podras llamar cualquier herramienta del sistema..
escribiendo el nombre de la herramienta
Ejemplo [ nmap 127.0.0.1 ] etc
\n\n
------Nota------
\n\n
Para ahorrarte trabajo el mismo creador del scrypt a creado un diccionario
con posibles password y user puedes crearlos y darles usos en el apartado
de contraseña y usuario --------------- tambien el creador de contraseña
te generara y podras guardar cada [user y pass] en la lista de diccionarios
del scrypt para darle uso en otra ocacion --------------------------------\n\n
Tambien tendra el privilejio de editar sus diccionarios desde el directorio
de la herramienta.. ya que automaticamente el scrypt le heredara privilegios
--------------------------------super-user----------------------------------
'''
print (ayuda)
def create_new_Pass ():
print('Creando .txt ....')
time.sleep(2)
os.system ('nano New_Pass.txt')
print('Creado con exito ....')
def create_new_user ():
print('Creando .txt ....\n')
time.sleep(2)
os.system ('nano New_user.txt')
print('Creado con exito ....')
def password ():
diccionaripass = open('user_pass.txt', 'a')
diccionaripass.write(user_pass)
def usuarios ():
diccionariuser = open('user_user.txt', 'a')
diccionariuser.write(user_user)
def menu ():
colores()
Menu_de_inicio = ('''
[Escribe [help] para ver modo de uso ]
\n
Generar diccionario\n
Crear nuevo diccionario\n
Editar diccionario existente
''')
print('\n -----------------------------------welcome------------------------------\n')
print(Menu_de_inicio)
menu()
def chmod ():
os.system('chmod 777 -R *')
def creacion ():
chmod()
usr_pas = input('menu $ ')
if usr_pas == 'use user' :
print('''\n\n
para agregar tus propias palabras\n
debes crear generar el diccionario user-pas \n\n
espera
\n\n''')
print ('\nSi ya lo isiste antes preciona [n]\n\n')
dic = input('[y/n] $ ')
if dic == 'y' :
usuarios()
password()
print('Listo quieres agregar palabras de [use](pass/user) escribe ')
pausr = input('[pass/user] $ ')
if pausr == 'pass' :
os.system ('nano user_pass.txt')
creacion()
elif pausr == 'user' :
os.system('nano user_user.txt')
creacion()
elif pausr == 'create_new pass' :
os.system('nano New_Pass.txt')
creacion()
elif pausr == 'create_new user' :
os.system('nano New_user.txt')
creacion()
else:
print('ERROR OPCION INVALIDA ... ')
elif dic == 'n' :
pausr = input('[pass/user] $ ')
if pausr == 'pass' :
os.system ('nano user_pass.txt')
creacion()
elif pausr == 'user' :
os.system('nano user_user.txt')
creacion()
elif pausr == 'create_new pass' :
os.system('nano New_Pass.txt')
creacion()
elif pausr == 'create_new user' :
os.system('nano New_user.txt')
creacion()
elif pausr == 'clear' :
clear()
creacion()
else:
print('ERROR OPCION INVALIDA ... ')
elif usr_pas == 'use pass' :
print('''\n\n
para agregar tus propias palabras\n
debes crear generar el diccionario user-pas \n\n
espera
\n\n''')
print ('\nSi ya lo isiste antes preciona [n]\n\n')
dic = input('[y/n] $ ')
if dic == 'y' :
usuarios()
password()
print('Listo quieres agregar palabras de [use](pass/user) escribe \n\n')
pausr = input('[pass/user] $ ')
if pausr == 'pass' :
os.system ('nano user_pass.txt')
creacion()
elif pausr == 'user' :
os.system('nano user_user.txt')
creacion()
elif pausr == 'create_new pass' :
os.system('nano New_Pass.txt')
creacion()
elif pausr == 'create_new user' :
os.system('nano New_user.txt')
creacion()
elif pausr == 'help' :
help()
creacion()
elif pausr == 'exit' :
print ('Saludes un gusto adios')
os.system('exit')
elif pausr == 'clear' :
clear()
creacion()
else:
print('ERROR OPCION INVALIDA ... ')
elif dic == 'n' :
pausr = input('[pass/user] $ ')
if pausr == 'pass' :
os.system ('nano user_pass.txt')
creacion()
elif pausr == 'user' :
os.system('nano user_user.txt')
creacion()
elif pausr == 'create_new pass' :
os.system('nano New_Pass.txt')
creacion()
elif pausr == 'create_new user' :
os.system('nano New_user.txt')
creacion()
elif pausr == 'help' :
help()
creacion()
elif pausr == 'exit' :
print ('Saludes un gusto adios')
os.system('exit')
elif pausr == 'clear' :
clear()
creacion()
else:
print('ERROR OPCION INVALIDA ... ')
elif usr_pas == 'help' :
clear()
colores()
help()
colores()
creacion()
elif usr_pas == 'use hacker_git' :
colores()
print (' Instalando ')
time.sleep(1)
print('Instalacion..escribe la ruta a guardar la herramienta .. \n')
instalacion = input('$rute $ ')
os.system ('git clone https://github.com/anonymous-sys19/Hacker.git')
os.system ("mv Hacker " + instalacion)
print('\n listo se instalo con exito\n')
creacion()
elif usr_pas == 'create_new_pass' :
create_new_Pass()
elif usr_pas == 'create_new_user' :
create_new_user()
elif usr_pas == 'exit' :
clear()
colores()
print ('\n--------Saludes un gusto adios-----\n\n')
os.system('exit')
elif usr_pas == 'clear' :
clear()
colores()
creacion()
elif usr_pas == 'use force_brute' :
colores()
print ('..........')
time.sleep(0.50)
print (' ..........' )
time.sleep(0.50)
print ('-----abriendo modulo-----')
colores()
time.sleep(0.50)
print ('..........')
time.sleep(0.50)
print ('..........')
time.sleep(0.50)
clear()
os.system('python3 modulos/mod_force.py')
colores()
creacion()
elif usr_pas == 'create user list' :
usuarios()
print('Creado con exito ....')
time.sleep(2)
chmod()
clear()
creacion()
elif usr_pas == 'create pass list' :
password()
print('Creado con exito .... ')
time.sleep(2)
chmod()
clear()
creacion()
elif usr_pas == 'open msfconsole' :
print('pleace wait ... execute msfconsole \n\n')
os.system('msfconsole')
creacion()
elif usr_pas == 'create automated payload' :
os.system('bash modulos/modsh/modulo_payload.sh')
colores()
creacion()
elif usr_pas == usr_pas :
os.system(usr_pas)
colores()
creacion()
else:
clear()
print('\n\n error ')
creacion()
creacion() | [
"os.system",
"sys.stdout.flush",
"time.sleep",
"sys.stdout.write"
] | [((5726, 5744), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (5735, 5744), False, 'import os\n'), ((7620, 7633), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (7630, 7633), False, 'import time\n'), ((7638, 7668), 'os.system', 'os.system', (['"""nano New_Pass.txt"""'], {}), "('nano New_Pass.txt')\n", (7647, 7668), False, 'import os\n'), ((7772, 7785), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (7782, 7785), False, 'import time\n'), ((7790, 7820), 'os.system', 'os.system', (['"""nano New_user.txt"""'], {}), "('nano New_user.txt')\n", (7799, 7820), False, 'import os\n'), ((8466, 8493), 'os.system', 'os.system', (['"""chmod 777 -R *"""'], {}), "('chmod 777 -R *')\n", (8475, 8493), False, 'import os\n'), ((2259, 2278), 'sys.stdout.write', 'sys.stdout.write', (['c'], {}), '(c)\n', (2275, 2278), False, 'import sys\n'), ((2287, 2305), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2303, 2305), False, 'import sys\n'), ((2314, 2334), 'time.sleep', 'time.sleep', (['(1.0 / 70)'], {}), '(1.0 / 70)\n', (2324, 2334), False, 'import time\n'), ((9098, 9129), 'os.system', 'os.system', (['"""nano user_pass.txt"""'], {}), "('nano user_pass.txt')\n", (9107, 9129), False, 'import os\n'), ((9209, 9240), 'os.system', 'os.system', (['"""nano user_user.txt"""'], {}), "('nano user_user.txt')\n", (9218, 9240), False, 'import os\n'), ((9697, 9728), 'os.system', 'os.system', (['"""nano user_pass.txt"""'], {}), "('nano user_pass.txt')\n", (9706, 9728), False, 'import os\n'), ((10813, 10844), 'os.system', 'os.system', (['"""nano user_pass.txt"""'], {}), "('nano user_pass.txt')\n", (10822, 10844), False, 'import os\n'), ((12699, 12712), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (12709, 12712), False, 'import time\n'), ((12838, 12906), 'os.system', 'os.system', (['"""git clone https://github.com/anonymous-sys19/Hacker.git"""'], {}), "('git clone https://github.com/anonymous-sys19/Hacker.git')\n", (12847, 12906), False, 'import os\n'), ((12916, 12953), 'os.system', 'os.system', (["('mv Hacker ' + instalacion)"], {}), "('mv Hacker ' + instalacion)\n", (12925, 12953), False, 'import os\n'), ((9330, 9360), 'os.system', 'os.system', (['"""nano New_Pass.txt"""'], {}), "('nano New_Pass.txt')\n", (9339, 9360), False, 'import os\n'), ((9808, 9839), 'os.system', 'os.system', (['"""nano user_user.txt"""'], {}), "('nano user_user.txt')\n", (9817, 9839), False, 'import os\n'), ((10924, 10955), 'os.system', 'os.system', (['"""nano user_user.txt"""'], {}), "('nano user_user.txt')\n", (10933, 10955), False, 'import os\n'), ((11718, 11749), 'os.system', 'os.system', (['"""nano user_pass.txt"""'], {}), "('nano user_pass.txt')\n", (11727, 11749), False, 'import os\n'), ((9450, 9480), 'os.system', 'os.system', (['"""nano New_user.txt"""'], {}), "('nano New_user.txt')\n", (9459, 9480), False, 'import os\n'), ((9929, 9959), 'os.system', 'os.system', (['"""nano New_Pass.txt"""'], {}), "('nano New_Pass.txt')\n", (9938, 9959), False, 'import os\n'), ((11045, 11075), 'os.system', 'os.system', (['"""nano New_Pass.txt"""'], {}), "('nano New_Pass.txt')\n", (11054, 11075), False, 'import os\n'), ((11829, 11860), 'os.system', 'os.system', (['"""nano user_user.txt"""'], {}), "('nano user_user.txt')\n", (11838, 11860), False, 'import os\n'), ((10049, 10079), 'os.system', 'os.system', (['"""nano New_user.txt"""'], {}), "('nano New_user.txt')\n", (10058, 10079), False, 'import os\n'), ((11165, 11195), 'os.system', 'os.system', (['"""nano New_user.txt"""'], {}), "('nano New_user.txt')\n", (11174, 11195), False, 'import os\n'), ((11950, 11980), 'os.system', 'os.system', (['"""nano New_Pass.txt"""'], {}), "('nano New_Pass.txt')\n", (11959, 11980), False, 'import os\n'), ((13292, 13309), 'os.system', 'os.system', (['"""exit"""'], {}), "('exit')\n", (13301, 13309), False, 'import os\n'), ((12070, 12100), 'os.system', 'os.system', (['"""nano New_user.txt"""'], {}), "('nano New_user.txt')\n", (12079, 12100), False, 'import os\n'), ((11408, 11425), 'os.system', 'os.system', (['"""exit"""'], {}), "('exit')\n", (11417, 11425), False, 'import os\n'), ((13488, 13503), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (13498, 13503), False, 'import time\n'), ((13544, 13559), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (13554, 13559), False, 'import time\n'), ((13631, 13646), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (13641, 13646), False, 'import time\n'), ((13685, 13700), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (13695, 13700), False, 'import time\n'), ((13739, 13754), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (13749, 13754), False, 'import time\n'), ((13780, 13821), 'os.system', 'os.system', (['"""python3 modulos/mod_force.py"""'], {}), "('python3 modulos/mod_force.py')\n", (13789, 13821), False, 'import os\n'), ((12313, 12330), 'os.system', 'os.system', (['"""exit"""'], {}), "('exit')\n", (12322, 12330), False, 'import os\n'), ((13966, 13979), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (13976, 13979), False, 'import time\n'), ((14139, 14152), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (14149, 14152), False, 'import time\n'), ((14309, 14332), 'os.system', 'os.system', (['"""msfconsole"""'], {}), "('msfconsole')\n", (14318, 14332), False, 'import os\n'), ((14409, 14458), 'os.system', 'os.system', (['"""bash modulos/modsh/modulo_payload.sh"""'], {}), "('bash modulos/modsh/modulo_payload.sh')\n", (14418, 14458), False, 'import os\n'), ((14534, 14552), 'os.system', 'os.system', (['usr_pas'], {}), '(usr_pas)\n', (14543, 14552), False, 'import os\n')] |
import itertools
import math
import pprint
import sys
import typing
import map_funcs
GOOGLE_EARTH_AIRPORT_IMAGES = {
'GoogleEarth_AirportCamera_C.jpg' : {
'path': 'video_images/GoogleEarth_AirportCamera_C.jpg',
'width': 4800,
'height': 3011,
# Originally measured on the 100m legend as 181 px
# 'm_per_px': 100 / (4786 - 4605),
# Corrected to 185 to give runway length of 1650.5 m
'm_per_px': 100 / (4786 - 4601),
'datum': 'runway_23_start',
'measurements': {
# 'datum_1': 'runway_23_end',
'runway_23_start': map_funcs.Point(3217, 204),
'runway_23_end': map_funcs.Point((1310 + 1356) / 2, (2589 + 2625) / 2),
'perimeter_fence': map_funcs.Point(967, 2788),
'red_building': map_funcs.Point(914, 2827),
'helicopter': map_funcs.Point(2630, 1236),
'camera_B': map_funcs.Point(2890, 1103),
'buildings_apron_edge': map_funcs.Point(2213, 1780),
# The next three are from camera B frame 850
# Dark smudge on right
'right_dark_grass': map_funcs.Point(2742, 1137),
# Pale smudge on right where tarmac meets grass
'right_light_grass': map_funcs.Point(2755, 1154),
# Pale smudge on left where tarmac taxiway meets grass
# 'left_light_grass': map_funcs.Point(2492, 1488),
# Bright roofed house
'bright_roofed_house': map_funcs.Point(1067, 2243),
}
},
}
# This is an estimate of the absolute position error in metres of a single point in isolation.
ABSOLUTE_POSITION_ERROR_M = 10.0
# Points this close together have an accuracy of ABSOLUTE_POSITION_ERROR_M.
# If closer then the error is proportionally less.
# If further apart then the error is proportionally greater.
RELATIVE_POSITION_ERROR_BASELINE_M = 1000.0
RELATIVE_BEARING_ERROR_DEG = 0.5
def relative_position_error(distance_between: float) -> float:
"""This returns a relative position error estimate of two points separated by distance_between.
It holds the idea that it is extremely unlikely that two points close together have extreme errors
but as they separate the error is likely to be greater.
"""
return ABSOLUTE_POSITION_ERROR_M * distance_between / RELATIVE_POSITION_ERROR_BASELINE_M
RUNWAY_LENGTH_M = map_funcs.distance(
GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements']['runway_23_start'],
GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements']['runway_23_end'],
GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['m_per_px'],
)
RUNWAY_HEADING_DEG = map_funcs.bearing(
GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements']['runway_23_start'],
GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements']['runway_23_end'],
)
def measurements_relative_to_runway() -> typing.Dict[str, map_funcs.Point]:
"""Returns a dict of measurements in metres that are reduced to the runway axis."""
ret: typing.Dict[str, map_funcs.Point] = {}
datum_name = GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['datum']
origin = GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements'][datum_name]
m_per_px = GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['m_per_px']
for k in GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements']:
pt = GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements'][k]
new_pt = map_funcs.translate_rotate(pt, RUNWAY_HEADING_DEG, origin)
ret[k] = map_funcs.Point(m_per_px * new_pt.x, m_per_px * new_pt.y)
return ret
def bearings_from_camera_b() -> typing.Dict[str, float]:
ret: typing.Dict[str, float] = {}
camera_b = GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements']['camera_B']
m_per_px = GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['m_per_px']
for k, v in GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements'].items():
if k != 'camera_B':
b = map_funcs.bearing(camera_b, v)
b_min, b_max = map_funcs.bearing_min_max(camera_b, v, ABSOLUTE_POSITION_ERROR_M / m_per_px)
ret[k] = b, b_min, b_max
return ret
def main() -> int:
# Check scale and runway length
m_per_px = GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['m_per_px']
print(f'GoogleEarth_AirportCamera_C.jpg scale {m_per_px:0.4f} (m/pixel)')
print(f'GoogleEarth_AirportCamera_C.jpg runway length {RUNWAY_LENGTH_M:.1f} (m)')
print(f'GoogleEarth_AirportCamera_C.jpg runway heading {RUNWAY_HEADING_DEG:.2f} (degrees)')
measurements = measurements_relative_to_runway()
print('X-Y Relative')
for k in measurements:
print(f'{k:24} : x={measurements[k].x:8.1f} y={measurements[k].y:8.1f}')
bearings = bearings_from_camera_b()
print('Bearings')
for k in bearings:
# print(f'{k:24} : {bearings[k]:8.1f}')
b, b_min, b_max = bearings[k]
# print(f'{k:24} : {bearings[k]}')
print(f'{k:24} : {b:8.2f} ± {b_max - b:.2f}/{b_min - b:.2f}')
for a, b in itertools.combinations(('red_building', 'helicopter', 'buildings_apron_edge'), 2):
ba, ba_min, ba_max = bearings[a]
bb, bb_min, bb_max = bearings[b]
print(a, '<->', b)
print(f'{ba - bb:4.2f} {ba_max - bb_min:4.2f} {ba_min - bb_max:4.2f}')
return 0
if __name__ == '__main__':
sys.exit(main()) | [
"map_funcs.bearing_min_max",
"map_funcs.translate_rotate",
"map_funcs.distance",
"itertools.combinations",
"map_funcs.bearing",
"map_funcs.Point"
] | [((2377, 2686), 'map_funcs.distance', 'map_funcs.distance', (["GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements'][\n 'runway_23_start']", "GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements'][\n 'runway_23_end']", "GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['m_per_px']"], {}), "(GOOGLE_EARTH_AIRPORT_IMAGES[\n 'GoogleEarth_AirportCamera_C.jpg']['measurements']['runway_23_start'],\n GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg'][\n 'measurements']['runway_23_end'], GOOGLE_EARTH_AIRPORT_IMAGES[\n 'GoogleEarth_AirportCamera_C.jpg']['m_per_px'])\n", (2395, 2686), False, 'import map_funcs\n'), ((2705, 2932), 'map_funcs.bearing', 'map_funcs.bearing', (["GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements'][\n 'runway_23_start']", "GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements'][\n 'runway_23_end']"], {}), "(GOOGLE_EARTH_AIRPORT_IMAGES[\n 'GoogleEarth_AirportCamera_C.jpg']['measurements']['runway_23_start'],\n GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg'][\n 'measurements']['runway_23_end'])\n", (2722, 2932), False, 'import map_funcs\n'), ((5303, 5388), 'itertools.combinations', 'itertools.combinations', (["('red_building', 'helicopter', 'buildings_apron_edge')", '(2)'], {}), "(('red_building', 'helicopter',\n 'buildings_apron_edge'), 2)\n", (5325, 5388), False, 'import itertools\n'), ((3632, 3690), 'map_funcs.translate_rotate', 'map_funcs.translate_rotate', (['pt', 'RUNWAY_HEADING_DEG', 'origin'], {}), '(pt, RUNWAY_HEADING_DEG, origin)\n', (3658, 3690), False, 'import map_funcs\n'), ((3708, 3765), 'map_funcs.Point', 'map_funcs.Point', (['(m_per_px * new_pt.x)', '(m_per_px * new_pt.y)'], {}), '(m_per_px * new_pt.x, m_per_px * new_pt.y)\n', (3723, 3765), False, 'import map_funcs\n'), ((611, 637), 'map_funcs.Point', 'map_funcs.Point', (['(3217)', '(204)'], {}), '(3217, 204)\n', (626, 637), False, 'import map_funcs\n'), ((668, 721), 'map_funcs.Point', 'map_funcs.Point', (['((1310 + 1356) / 2)', '((2589 + 2625) / 2)'], {}), '((1310 + 1356) / 2, (2589 + 2625) / 2)\n', (683, 721), False, 'import map_funcs\n'), ((754, 780), 'map_funcs.Point', 'map_funcs.Point', (['(967)', '(2788)'], {}), '(967, 2788)\n', (769, 780), False, 'import map_funcs\n'), ((810, 836), 'map_funcs.Point', 'map_funcs.Point', (['(914)', '(2827)'], {}), '(914, 2827)\n', (825, 836), False, 'import map_funcs\n'), ((864, 891), 'map_funcs.Point', 'map_funcs.Point', (['(2630)', '(1236)'], {}), '(2630, 1236)\n', (879, 891), False, 'import map_funcs\n'), ((917, 944), 'map_funcs.Point', 'map_funcs.Point', (['(2890)', '(1103)'], {}), '(2890, 1103)\n', (932, 944), False, 'import map_funcs\n'), ((982, 1009), 'map_funcs.Point', 'map_funcs.Point', (['(2213)', '(1780)'], {}), '(2213, 1780)\n', (997, 1009), False, 'import map_funcs\n'), ((1135, 1162), 'map_funcs.Point', 'map_funcs.Point', (['(2742)', '(1137)'], {}), '(2742, 1137)\n', (1150, 1162), False, 'import map_funcs\n'), ((1257, 1284), 'map_funcs.Point', 'map_funcs.Point', (['(2755)', '(1154)'], {}), '(2755, 1154)\n', (1272, 1284), False, 'import map_funcs\n'), ((1485, 1512), 'map_funcs.Point', 'map_funcs.Point', (['(1067)', '(2243)'], {}), '(1067, 2243)\n', (1500, 1512), False, 'import map_funcs\n'), ((4222, 4252), 'map_funcs.bearing', 'map_funcs.bearing', (['camera_b', 'v'], {}), '(camera_b, v)\n', (4239, 4252), False, 'import map_funcs\n'), ((4280, 4356), 'map_funcs.bearing_min_max', 'map_funcs.bearing_min_max', (['camera_b', 'v', '(ABSOLUTE_POSITION_ERROR_M / m_per_px)'], {}), '(camera_b, v, ABSOLUTE_POSITION_ERROR_M / m_per_px)\n', (4305, 4356), False, 'import map_funcs\n')] |
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils.validation import check_random_state
from sklearn.datasets import fetch_olivetti_faces
from sklearn.externals import joblib
rng = check_random_state(21)
dataset = fetch_olivetti_faces()
X = dataset.images.reshape(dataset.images.shape[0], -1)
train = X[dataset.target < 30]
test = X[dataset.target >= 30]
n_faces = 3
face_ids = rng.randint(test.shape[0], size=(n_faces,))
test = test[face_ids, :]
n_pixels = X.shape[1]
# Upper half of the faces
X_train = train[:, :(n_pixels + 1) // 2]
# Lower half of the faces
y_train = train[:, n_pixels // 2:]
X_test = test[:, :(n_pixels + 1) // 2]
y_test = test[:, n_pixels // 2:]
n_rows = 2
imshape = (64, 64,)
def test_model(y_pred, model_name):
plt.figure(figsize=(1.7*n_faces, 4))
plt.suptitle('Face completion with ' + model_name, size=12)
# plot the true faces first
for i in range(n_faces):
plt.subplot(int( '{}{}{}'.format( n_rows, n_faces, i + 1 ) ))
plt.axis('off')
plt.imshow(np.hstack((X_test[i], y_test[i])).reshape(imshape), cmap=plt.cm.gray, interpolation='nearest')
# then plot the predictions
for i in range(n_faces):
plt.subplot(int( '{}{}{}'.format( n_rows, n_faces, i + n_faces + 1 ) ))
plt.axis('off')
plt.imshow(np.hstack((X_test[i], y_pred[i])).reshape(imshape), cmap=plt.cm.gray, interpolation='nearest')
test_model(joblib.load('../trained_models/nn_face_completion.pkl').predict(X_test), 'Face completion with a Neural Network')
test_model(joblib.load('../trained_models/knn_face_completion.pkl').predict(X_test), 'Face completion with a k-Nearest Neighbors')
test_model(joblib.load('../trained_models/dt_face_completion.pkl').predict(X_test), 'Face completion with a Decision Tree')
plt.show()
| [
"numpy.hstack",
"sklearn.utils.validation.check_random_state",
"sklearn.externals.joblib.load",
"sklearn.datasets.fetch_olivetti_faces",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.show"
] | [((202, 224), 'sklearn.utils.validation.check_random_state', 'check_random_state', (['(21)'], {}), '(21)\n', (220, 224), False, 'from sklearn.utils.validation import check_random_state\n'), ((235, 257), 'sklearn.datasets.fetch_olivetti_faces', 'fetch_olivetti_faces', ([], {}), '()\n', (255, 257), False, 'from sklearn.datasets import fetch_olivetti_faces\n'), ((1812, 1822), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1820, 1822), True, 'import matplotlib.pyplot as plt\n'), ((769, 807), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1.7 * n_faces, 4)'}), '(figsize=(1.7 * n_faces, 4))\n', (779, 807), True, 'import matplotlib.pyplot as plt\n'), ((810, 869), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (["('Face completion with ' + model_name)"], {'size': '(12)'}), "('Face completion with ' + model_name, size=12)\n", (822, 869), True, 'import matplotlib.pyplot as plt\n'), ((1011, 1026), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1019, 1026), True, 'import matplotlib.pyplot as plt\n'), ((1300, 1315), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1308, 1315), True, 'import matplotlib.pyplot as plt\n'), ((1443, 1498), 'sklearn.externals.joblib.load', 'joblib.load', (['"""../trained_models/nn_face_completion.pkl"""'], {}), "('../trained_models/nn_face_completion.pkl')\n", (1454, 1498), False, 'from sklearn.externals import joblib\n'), ((1568, 1624), 'sklearn.externals.joblib.load', 'joblib.load', (['"""../trained_models/knn_face_completion.pkl"""'], {}), "('../trained_models/knn_face_completion.pkl')\n", (1579, 1624), False, 'from sklearn.externals import joblib\n'), ((1699, 1754), 'sklearn.externals.joblib.load', 'joblib.load', (['"""../trained_models/dt_face_completion.pkl"""'], {}), "('../trained_models/dt_face_completion.pkl')\n", (1710, 1754), False, 'from sklearn.externals import joblib\n'), ((1046, 1079), 'numpy.hstack', 'np.hstack', (['(X_test[i], y_test[i])'], {}), '((X_test[i], y_test[i]))\n', (1055, 1079), True, 'import numpy as np\n'), ((1335, 1368), 'numpy.hstack', 'np.hstack', (['(X_test[i], y_pred[i])'], {}), '((X_test[i], y_pred[i]))\n', (1344, 1368), True, 'import numpy as np\n')] |
import newspaper
from newspaper import Article
def getarticle(url):
articleurl = url
article = Article(articleurl)
try:
article.download()
article.parse()
alltext = article.text
return alltext
except:
return "this website is not available"
| [
"newspaper.Article"
] | [((111, 130), 'newspaper.Article', 'Article', (['articleurl'], {}), '(articleurl)\n', (118, 130), False, 'from newspaper import Article\n')] |
import tensorflow as tf
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras import models
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg16 import preprocess_input
import numpy as np
import cv2
# prebuild model with pre-trained weights on imagenet
base_model = VGG16(weights='imagenet', include_top=True)
print (base_model)
for i, layer in enumerate(base_model.layers):
print (i, layer.name, layer.output_shape)
# extract features from block4_pool block
model = models.Model(inputs=base_model.input,
outputs=base_model.get_layer('block4_pool').output)
img_path = 'cat.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
# get the features from this block
features = model.predict(x)
print(features)
| [
"tensorflow.keras.preprocessing.image.load_img",
"tensorflow.keras.applications.vgg16.preprocess_input",
"numpy.expand_dims",
"tensorflow.keras.applications.vgg16.VGG16",
"tensorflow.keras.preprocessing.image.img_to_array"
] | [((336, 379), 'tensorflow.keras.applications.vgg16.VGG16', 'VGG16', ([], {'weights': '"""imagenet"""', 'include_top': '(True)'}), "(weights='imagenet', include_top=True)\n", (341, 379), False, 'from tensorflow.keras.applications.vgg16 import VGG16\n'), ((669, 717), 'tensorflow.keras.preprocessing.image.load_img', 'image.load_img', (['img_path'], {'target_size': '(224, 224)'}), '(img_path, target_size=(224, 224))\n', (683, 717), False, 'from tensorflow.keras.preprocessing import image\n'), ((723, 746), 'tensorflow.keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (741, 746), False, 'from tensorflow.keras.preprocessing import image\n'), ((752, 777), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (766, 777), True, 'import numpy as np\n'), ((783, 802), 'tensorflow.keras.applications.vgg16.preprocess_input', 'preprocess_input', (['x'], {}), '(x)\n', (799, 802), False, 'from tensorflow.keras.applications.vgg16 import preprocess_input\n')] |
"""
call in shell: python evaluate.py --dir <rootdir/experiment/> --epoch <epoch to>
e.g. in shell: python evaluate.py --dir Runs/se_resnet_trained_final/ --epoch 149
loops over all folds and calculates + stores the accuracies in a file in the root folder of the experiment
you might change the model in line 45 from resnet to se_resnet (see comment)
"""
import torch
from torch.utils.data import Dataset, DataLoader
#import matplotlib.pyplot as plt
#import seaborn as sns; sns.set()
import numpy as np
import os
from os.path import join
import argparse
import Training_custom.load_dataset
from senet.baseline import resnet20
from senet.se_resnet import se_resnet20
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dir', type=str, metavar='', required=True, help='Directory of the x_folds.')
parser.add_argument('-e', '--epoch', type=int, metavar='', required=True, help='from which epoch should the model be loaded?')
args = parser.parse_args()
working_dir = os.getcwd()
rootpath = join(working_dir, args.dir)
def evaluate(fold_i):
# path zu einem checkpoint
CHKPT = f"{args.dir}/fold_{fold_i}/checkpoints/train_chkpt_{args.epoch}.tar"
# Das file train_chkpt_100.tar is ein dictionary das ein snapshot vom trainingszustand
# der 100. epoche ist.
# es interessieren eig nur die keys "train_loss", "val_loss" und "model_state_dict".
# Train und val loss sind 1D torch tensors die den mean loss von der jeweiligen epoche (idx)
# halten.
train_status = torch.load(CHKPT, map_location='cpu')
#print(train_status)
# model wiederherstellen
model = resnet20(num_classes=4) #resnet20(num_classes=4) or alternatively: se_resnet20(num_classes=4, reduction=16)
model.load_state_dict(train_status['model_state_dict'])
model.eval()
test_data = Training_custom.load_dataset.imagewise_dataset(datadir = '/home/vbarth/HIWI/classificationDataValentin/mixed_cropped/test')
#dataloader = DataLoader(test_data, batch_size=16,
# shuffle=False, num_workers=0)
acc=0 #initialize accuracy
i = 0 #will count up
for x, y in test_data: #iterate over testset
x = x.unsqueeze(0) #add one dimension (batch missing) to get 4d tensor
y_pred = model(x).squeeze()
pred, ind = torch.max(y_pred, 0)
if y.item() == ind.item():
acc = acc + 1 #add one when the prediction was right else add nothing
i = i +1 ##print every 3000th sampel
if i % 3000 == 0:
print("Sample: ", i, "\n y_pred: ",y_pred, "\n pred: ", pred, "\n ind: ", ind, "\n y: ", y.item())
acc = acc/len(test_data)
#print("Accuracy: ", acc ) ##def of accuracy
return f"folder: {fold_i}, accuracy: {acc} \n"
if __name__ == "__main__":
n_files = (len([name for name in os.listdir(rootpath)]))
#print(n_files)
accs = []
for fold in range(n_files):
print(f"Processing folder number {fold}")
acc_str = evaluate(fold+1)
accs.append(acc_str)
with open(join(rootpath, "accuracies"), 'w') as f:
for string in accs:
f.write(string)
| [
"os.listdir",
"argparse.ArgumentParser",
"senet.baseline.resnet20",
"torch.load",
"torch.max",
"os.path.join",
"os.getcwd"
] | [((680, 705), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (703, 705), False, 'import argparse\n'), ((982, 993), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (991, 993), False, 'import os\n'), ((1006, 1033), 'os.path.join', 'join', (['working_dir', 'args.dir'], {}), '(working_dir, args.dir)\n', (1010, 1033), False, 'from os.path import join\n'), ((1518, 1555), 'torch.load', 'torch.load', (['CHKPT'], {'map_location': '"""cpu"""'}), "(CHKPT, map_location='cpu')\n", (1528, 1555), False, 'import torch\n'), ((1627, 1650), 'senet.baseline.resnet20', 'resnet20', ([], {'num_classes': '(4)'}), '(num_classes=4)\n', (1635, 1650), False, 'from senet.baseline import resnet20\n'), ((2353, 2373), 'torch.max', 'torch.max', (['y_pred', '(0)'], {}), '(y_pred, 0)\n', (2362, 2373), False, 'import torch\n'), ((3129, 3157), 'os.path.join', 'join', (['rootpath', '"""accuracies"""'], {}), "(rootpath, 'accuracies')\n", (3133, 3157), False, 'from os.path import join\n'), ((2901, 2921), 'os.listdir', 'os.listdir', (['rootpath'], {}), '(rootpath)\n', (2911, 2921), False, 'import os\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import absolute_import
import logging
import json
import hmac
import hashlib
import time
import socket
import ssl
from bson import ObjectId
from tg import expose, validate, redirect, flash, config
from tg.decorators import with_trailing_slash, without_trailing_slash
from tg import tmpl_context as c
from tg import response, request
from formencode import validators as fev, schema, Invalid
from ming.odm import session
from webob import exc
from pymongo.errors import DuplicateKeyError
from paste.deploy.converters import asint, aslist
from allura.app import AdminControllerMixin
from allura.controllers import BaseController
from allura.lib import helpers as h
import requests
from allura.lib import validators as v
from allura.lib.decorators import require_post, task
from allura.lib.utils import DateJSONEncoder
from allura import model as M
import six
from six.moves import map
log = logging.getLogger(__name__)
class WebhookValidator(fev.FancyValidator):
def __init__(self, sender, app, **kw):
self.app = app
self.sender = sender
super(WebhookValidator, self).__init__(**kw)
def _to_python(self, value, state):
wh = None
if isinstance(value, M.Webhook):
wh = value
elif isinstance(value, ObjectId):
wh = M.Webhook.query.get(_id=value)
else:
try:
wh = M.Webhook.query.get(_id=ObjectId(value))
except Exception:
pass
if wh and wh.type == self.sender.type and wh.app_config_id == self.app.config._id:
return wh
raise Invalid('Invalid webhook', value, state)
class WebhookCreateForm(schema.Schema):
url = fev.URL(not_empty=True)
secret = v.UnicodeString()
class WebhookEditForm(WebhookCreateForm):
def __init__(self, sender, app):
super(WebhookEditForm, self).__init__()
self.add_field('webhook', WebhookValidator(
sender=sender, app=app, not_empty=True))
class WebhookControllerMeta(type):
def __call__(cls, sender, app, *args, **kw):
"""Decorate post handlers with a validator that references
the appropriate webhook sender for this controller.
"""
if hasattr(cls, 'create'):
cls.create = validate(
cls.create_form(),
error_handler=getattr(cls.index, '__func__', cls.index),
)(cls.create)
if hasattr(cls, 'edit'):
cls.edit = validate(
cls.edit_form(sender, app),
error_handler=getattr(cls._default, '__func__', cls._default),
)(cls.edit)
return type.__call__(cls, sender, app, *args, **kw)
class WebhookController(six.with_metaclass(WebhookControllerMeta, BaseController, AdminControllerMixin)):
create_form = WebhookCreateForm
edit_form = WebhookEditForm
def __init__(self, sender, app):
super(WebhookController, self).__init__()
self.sender = sender()
self.app = app
def gen_secret(self):
return h.cryptographic_nonce(20)
def update_webhook(self, wh, url, secret=None):
if not secret:
secret = self.gen_secret()
wh.hook_url = url
wh.secret = secret
try:
session(wh).flush(wh)
except DuplicateKeyError:
session(wh).expunge(wh)
msg = '_the_form: "{}" webhook already exists for {} {}'.format(
wh.type, self.app.config.options.mount_label, url)
raise Invalid(msg, None, None)
@with_trailing_slash
@expose('jinja:allura:templates/webhooks/create_form.html')
def index(self, **kw):
if not c.form_values and kw:
# Executes if update_webhook raises an error
c.form_values = {'url': kw.get('url'),
'secret': kw.get('secret')}
return {'sender': self.sender,
'action': 'create',
'form': self.create_form()}
@expose('jinja:allura:templates/webhooks/create_form.html') # needed when we "return self.index(...)"
@require_post()
# @validate set dynamically in WebhookControllerMeta
def create(self, url, secret):
if self.sender.enforce_limit(self.app):
webhook = M.Webhook(
type=self.sender.type,
app_config_id=self.app.config._id)
try:
self.update_webhook(webhook, url, secret)
except Invalid as e:
# trigger error_handler directly
c.form_errors['_the_form'] = e
return self.index(url=url, secret=secret)
M.AuditLog.log('add webhook %s %s %s',
webhook.type, webhook.hook_url,
webhook.app_config.url())
flash('Created successfully', 'ok')
else:
flash('You have exceeded the maximum number of webhooks '
'you are allowed to create for this project/app', 'error')
redirect(self.app.admin_url + 'webhooks')
@expose('jinja:allura:templates/webhooks/create_form.html') # needed when we "return self._default(...)"
@require_post()
# @validate set dynamically in WebhookControllerMeta
def edit(self, webhook, url, secret):
old_url = webhook.hook_url
old_secret = webhook.secret
try:
self.update_webhook(webhook, url, secret)
except Invalid as e:
# trigger error_handler directly
c.form_errors['_the_form'] = e
return self._default(webhook=webhook, url=url, secret=secret)
M.AuditLog.log('edit webhook %s\n%s => %s\n%s',
webhook.type, old_url, url,
'secret changed' if old_secret != secret else '')
flash('Edited successfully', 'ok')
redirect(self.app.admin_url + 'webhooks')
@expose('json:')
@require_post()
def delete(self, webhook, **kw):
form = self.edit_form(self.sender, self.app)
try:
wh = form.fields['webhook'].to_python(webhook)
except Invalid:
raise exc.HTTPNotFound()
wh.delete()
M.AuditLog.log('delete webhook %s %s %s',
wh.type, wh.hook_url, wh.app_config.url())
return {'status': 'ok'}
@without_trailing_slash
@expose('jinja:allura:templates/webhooks/create_form.html')
def _default(self, webhook, **kw):
form = self.edit_form(self.sender, self.app)
try:
wh = form.fields['webhook'].to_python(webhook)
except Invalid:
raise exc.HTTPNotFound()
c.form_values = {'url': kw.get('url') or wh.hook_url,
'secret': kw.get('secret') or wh.secret,
'webhook': six.text_type(wh._id)}
return {'sender': self.sender,
'action': 'edit',
'form': form}
class WebhookRestController(BaseController):
def __init__(self, sender, app):
super(WebhookRestController, self).__init__()
self.sender = sender()
self.app = app
self.create_form = WebhookController.create_form
self.edit_form = WebhookController.edit_form
def _error(self, e):
error = getattr(e, 'error_dict', None)
if error:
_error = {}
for k, val in six.iteritems(error):
_error[k] = six.text_type(val)
return _error
error = getattr(e, 'msg', None)
if not error:
error = getattr(e, 'message', '')
return error
def update_webhook(self, wh, url, secret=None):
controller = WebhookController(self.sender.__class__, self.app)
controller.update_webhook(wh, url, secret)
@expose('json:')
@require_post()
def index(self, **kw):
response.content_type = str('application/json')
try:
params = {'secret': kw.pop('secret', ''),
'url': kw.pop('url', None)}
valid = self.create_form().to_python(params)
except Exception as e:
response.status_int = 400
return {'result': 'error', 'error': self._error(e)}
if self.sender.enforce_limit(self.app):
webhook = M.Webhook(
type=self.sender.type,
app_config_id=self.app.config._id)
try:
self.update_webhook(webhook, valid['url'], valid['secret'])
except Invalid as e:
response.status_int = 400
return {'result': 'error', 'error': self._error(e)}
M.AuditLog.log('add webhook %s %s %s',
webhook.type, webhook.hook_url,
webhook.app_config.url())
response.status_int = 201
# refetch updated values (e.g. mod_date)
session(webhook).expunge(webhook)
webhook = M.Webhook.query.get(_id=webhook._id)
return webhook.__json__()
else:
limits = {
'max': M.Webhook.max_hooks(
self.sender.type,
self.app.config.tool_name),
'used': M.Webhook.query.find({
'type': self.sender.type,
'app_config_id': self.app.config._id,
}).count(),
}
resp = {
'result': 'error',
'error': 'You have exceeded the maximum number of webhooks '
'you are allowed to create for this project/app',
'limits': limits,
}
response.status_int = 400
return resp
@expose('json:')
def _default(self, webhook, **kw):
form = self.edit_form(self.sender, self.app)
try:
wh = form.fields['webhook'].to_python(webhook)
except Invalid:
raise exc.HTTPNotFound()
if request.method == 'POST':
return self._edit(wh, form, **kw)
elif request.method == 'DELETE':
return self._delete(wh)
else:
return wh.__json__()
def _edit(self, webhook, form, **kw):
old_secret = webhook.secret
old_url = webhook.hook_url
try:
params = {'secret': kw.pop('secret', old_secret),
'url': kw.pop('url', old_url),
'webhook': six.text_type(webhook._id)}
valid = form.to_python(params)
except Exception as e:
response.status_int = 400
return {'result': 'error', 'error': self._error(e)}
try:
self.update_webhook(webhook, valid['url'], valid['secret'])
except Invalid as e:
response.status_int = 400
return {'result': 'error', 'error': self._error(e)}
M.AuditLog.log(
'edit webhook %s\n%s => %s\n%s',
webhook.type, old_url, valid['url'],
'secret changed' if old_secret != valid['secret'] else '')
# refetch updated values (e.g. mod_date)
session(webhook).expunge(webhook)
webhook = M.Webhook.query.get(_id=webhook._id)
return webhook.__json__()
def _delete(self, webhook):
webhook.delete()
M.AuditLog.log(
'delete webhook %s %s %s',
webhook.type,
webhook.hook_url,
webhook.app_config.url())
return {'result': 'ok'}
class SendWebhookHelper(object):
def __init__(self, webhook, payload):
self.webhook = webhook
self.payload = payload
@property
def timeout(self):
return asint(config.get('webhook.timeout', 30))
@property
def retries(self):
t = aslist(config.get('webhook.retry', [60, 120, 240]))
return list(map(int, t))
def sign(self, json_payload):
signature = hmac.new(
self.webhook.secret.encode('utf-8'),
json_payload.encode('utf-8'),
hashlib.sha1)
return 'sha1=' + signature.hexdigest()
def log_msg(self, msg, response=None):
message = '{}: {} {} {}'.format(
msg,
self.webhook.type,
self.webhook.hook_url,
self.webhook.app_config.url())
if response is not None:
message = '{} {} {} {}'.format(
message,
response.status_code,
response.text,
response.headers)
return message
def send(self):
json_payload = json.dumps(self.payload, cls=DateJSONEncoder)
signature = self.sign(json_payload)
headers = {'content-type': 'application/json',
'User-Agent': 'Allura Webhook (https://allura.apache.org/)',
'X-Allura-Signature': signature}
ok = self._send(self.webhook.hook_url, json_payload, headers)
if not ok:
log.info('Retrying webhook in: %s', self.retries)
for t in self.retries:
log.info('Retrying webhook in %s seconds', t)
time.sleep(t)
ok = self._send(self.webhook.hook_url, json_payload, headers)
if ok:
return
def _send(self, url, data, headers):
try:
r = requests.post(
url,
data=data,
headers=headers,
timeout=self.timeout)
except (requests.exceptions.RequestException,
socket.timeout,
ssl.SSLError):
log.exception(self.log_msg('Webhook send error'))
return False
if r.status_code >= 200 and r.status_code < 300:
log.info(self.log_msg('Webhook successfully sent'))
return True
else:
log.error(self.log_msg('Webhook send error', response=r))
return False
@task()
def send_webhook(webhook_id, payload):
webhook = M.Webhook.query.get(_id=webhook_id)
SendWebhookHelper(webhook, payload).send()
class WebhookSender(object):
"""Base class for webhook senders.
Subclasses are required to implement :meth:`get_payload()` and set
:attr:`type` and :attr:`triggered_by`.
"""
type = None
triggered_by = []
controller = WebhookController
api_controller = WebhookRestController
def get_payload(self, **kw):
"""Return a dict with webhook payload"""
raise NotImplementedError('get_payload')
def send(self, params_or_list):
"""Post a task that will send webhook payload
:param params_or_list: dict with keyword parameters to be passed to
:meth:`get_payload` or a list of such dicts. If it's a list for each
element appropriate payload will be submitted, but limit will be
enforced only once for each webhook.
"""
if not isinstance(params_or_list, list):
params_or_list = [params_or_list]
webhooks = M.Webhook.query.find(dict(
app_config_id=c.app.config._id,
type=self.type,
)).all()
if webhooks:
payloads = [self.get_payload(**params)
for params in params_or_list]
for webhook in webhooks:
if webhook.enforce_limit():
webhook.update_limit()
for payload in payloads:
send_webhook.post(webhook._id, payload)
else:
log.warn('Webhook fires too often: %s. Skipping', webhook)
def enforce_limit(self, app):
'''
Checks if limit of webhooks created for given project/app is reached.
Returns False if limit is reached, True otherwise.
'''
count = M.Webhook.query.find(dict(
app_config_id=app.config._id,
type=self.type,
)).count()
limit = M.Webhook.max_hooks(self.type, app.config.tool_name)
return count < limit
class RepoPushWebhookSender(WebhookSender):
type = 'repo-push'
triggered_by = ['git', 'hg', 'svn']
def _before(self, repo, commit_ids):
if len(commit_ids) > 0:
ci = commit_ids[-1]
parents = repo.commit(ci).parent_ids
if len(parents) > 0:
# Merge commit will have multiple parents. As far as I can tell
# the last one will be the branch head before merge
return self._convert_id(parents[-1])
return ''
def _after(self, commit_ids):
if len(commit_ids) > 0:
return self._convert_id(commit_ids[0])
return ''
def _convert_id(self, _id):
if ':' in _id:
_id = 'r' + _id.rsplit(':', 1)[1]
return _id
def get_payload(self, commit_ids, **kw):
app = kw.get('app') or c.app
commits = [app.repo.commit(ci).webhook_info for ci in commit_ids]
for ci in commits:
ci['id'] = self._convert_id(ci['id'])
before = self._before(app.repo, commit_ids)
after = self._after(commit_ids)
payload = {
'size': len(commits),
'commits': commits,
'before': before,
'after': after,
'repository': {
'name': app.config.options.mount_label,
'full_name': app.url,
'url': h.absurl(app.url),
},
}
if kw.get('ref'):
payload['ref'] = kw['ref']
return payload
| [
"logging.getLogger",
"requests.post",
"formencode.validators.URL",
"tg.config.get",
"time.sleep",
"allura.model.AuditLog.log",
"six.text_type",
"webob.exc.HTTPNotFound",
"allura.lib.helpers.cryptographic_nonce",
"json.dumps",
"tg.redirect",
"allura.lib.decorators.task",
"six.with_metaclass",... | [((1819, 1846), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1836, 1846), False, 'import logging\n'), ((3635, 3714), 'six.with_metaclass', 'six.with_metaclass', (['WebhookControllerMeta', 'BaseController', 'AdminControllerMixin'], {}), '(WebhookControllerMeta, BaseController, AdminControllerMixin)\n', (3653, 3714), False, 'import six\n'), ((14805, 14811), 'allura.lib.decorators.task', 'task', ([], {}), '()\n', (14809, 14811), False, 'from allura.lib.decorators import require_post, task\n'), ((2618, 2641), 'formencode.validators.URL', 'fev.URL', ([], {'not_empty': '(True)'}), '(not_empty=True)\n', (2625, 2641), True, 'from formencode import validators as fev, schema, Invalid\n'), ((2655, 2672), 'allura.lib.validators.UnicodeString', 'v.UnicodeString', ([], {}), '()\n', (2670, 2672), True, 'from allura.lib import validators as v\n'), ((4498, 4556), 'tg.expose', 'expose', (['"""jinja:allura:templates/webhooks/create_form.html"""'], {}), "('jinja:allura:templates/webhooks/create_form.html')\n", (4504, 4556), False, 'from tg import expose, validate, redirect, flash, config\n'), ((4911, 4969), 'tg.expose', 'expose', (['"""jinja:allura:templates/webhooks/create_form.html"""'], {}), "('jinja:allura:templates/webhooks/create_form.html')\n", (4917, 4969), False, 'from tg import expose, validate, redirect, flash, config\n'), ((5018, 5032), 'allura.lib.decorators.require_post', 'require_post', ([], {}), '()\n', (5030, 5032), False, 'from allura.lib.decorators import require_post, task\n'), ((5987, 6045), 'tg.expose', 'expose', (['"""jinja:allura:templates/webhooks/create_form.html"""'], {}), "('jinja:allura:templates/webhooks/create_form.html')\n", (5993, 6045), False, 'from tg import expose, validate, redirect, flash, config\n'), ((6097, 6111), 'allura.lib.decorators.require_post', 'require_post', ([], {}), '()\n', (6109, 6111), False, 'from allura.lib.decorators import require_post, task\n'), ((6819, 6834), 'tg.expose', 'expose', (['"""json:"""'], {}), "('json:')\n", (6825, 6834), False, 'from tg import expose, validate, redirect, flash, config\n'), ((6840, 6854), 'allura.lib.decorators.require_post', 'require_post', ([], {}), '()\n', (6852, 6854), False, 'from allura.lib.decorators import require_post, task\n'), ((7280, 7338), 'tg.expose', 'expose', (['"""jinja:allura:templates/webhooks/create_form.html"""'], {}), "('jinja:allura:templates/webhooks/create_form.html')\n", (7286, 7338), False, 'from tg import expose, validate, redirect, flash, config\n'), ((8703, 8718), 'tg.expose', 'expose', (['"""json:"""'], {}), "('json:')\n", (8709, 8718), False, 'from tg import expose, validate, redirect, flash, config\n'), ((8724, 8738), 'allura.lib.decorators.require_post', 'require_post', ([], {}), '()\n', (8736, 8738), False, 'from allura.lib.decorators import require_post, task\n'), ((10617, 10632), 'tg.expose', 'expose', (['"""json:"""'], {}), "('json:')\n", (10623, 10632), False, 'from tg import expose, validate, redirect, flash, config\n'), ((14865, 14900), 'allura.model.Webhook.query.get', 'M.Webhook.query.get', ([], {'_id': 'webhook_id'}), '(_id=webhook_id)\n', (14884, 14900), True, 'from allura import model as M\n'), ((2525, 2565), 'formencode.Invalid', 'Invalid', (['"""Invalid webhook"""', 'value', 'state'], {}), "('Invalid webhook', value, state)\n", (2532, 2565), False, 'from formencode import validators as fev, schema, Invalid\n'), ((3969, 3994), 'allura.lib.helpers.cryptographic_nonce', 'h.cryptographic_nonce', (['(20)'], {}), '(20)\n', (3990, 3994), True, 'from allura.lib import helpers as h\n'), ((5939, 5980), 'tg.redirect', 'redirect', (["(self.app.admin_url + 'webhooks')"], {}), "(self.app.admin_url + 'webhooks')\n", (5947, 5980), False, 'from tg import expose, validate, redirect, flash, config\n'), ((6548, 6679), 'allura.model.AuditLog.log', 'M.AuditLog.log', (['"""edit webhook %s\n%s => %s\n%s"""', 'webhook.type', 'old_url', 'url', "('secret changed' if old_secret != secret else '')"], {}), '("""edit webhook %s\n%s => %s\n%s""", webhook.type, old_url,\n url, \'secret changed\' if old_secret != secret else \'\')\n', (6562, 6679), True, 'from allura import model as M\n'), ((6728, 6762), 'tg.flash', 'flash', (['"""Edited successfully"""', '"""ok"""'], {}), "('Edited successfully', 'ok')\n", (6733, 6762), False, 'from tg import expose, validate, redirect, flash, config\n'), ((6771, 6812), 'tg.redirect', 'redirect', (["(self.app.admin_url + 'webhooks')"], {}), "(self.app.admin_url + 'webhooks')\n", (6779, 6812), False, 'from tg import expose, validate, redirect, flash, config\n'), ((11768, 11917), 'allura.model.AuditLog.log', 'M.AuditLog.log', (['"""edit webhook %s\n%s => %s\n%s"""', 'webhook.type', 'old_url', "valid['url']", "('secret changed' if old_secret != valid['secret'] else '')"], {}), '("""edit webhook %s\n%s => %s\n%s""", webhook.type, old_url,\n valid[\'url\'], \'secret changed\' if old_secret != valid[\'secret\'] else \'\')\n', (11782, 11917), True, 'from allura import model as M\n'), ((12058, 12094), 'allura.model.Webhook.query.get', 'M.Webhook.query.get', ([], {'_id': 'webhook._id'}), '(_id=webhook._id)\n', (12077, 12094), True, 'from allura import model as M\n'), ((13456, 13501), 'json.dumps', 'json.dumps', (['self.payload'], {'cls': 'DateJSONEncoder'}), '(self.payload, cls=DateJSONEncoder)\n', (13466, 13501), False, 'import json\n'), ((16811, 16863), 'allura.model.Webhook.max_hooks', 'M.Webhook.max_hooks', (['self.type', 'app.config.tool_name'], {}), '(self.type, app.config.tool_name)\n', (16830, 16863), True, 'from allura import model as M\n'), ((5195, 5262), 'allura.model.Webhook', 'M.Webhook', ([], {'type': 'self.sender.type', 'app_config_id': 'self.app.config._id'}), '(type=self.sender.type, app_config_id=self.app.config._id)\n', (5204, 5262), True, 'from allura import model as M\n'), ((5734, 5769), 'tg.flash', 'flash', (['"""Created successfully"""', '"""ok"""'], {}), "('Created successfully', 'ok')\n", (5739, 5769), False, 'from tg import expose, validate, redirect, flash, config\n'), ((5796, 5919), 'tg.flash', 'flash', (['"""You have exceeded the maximum number of webhooks you are allowed to create for this project/app"""', '"""error"""'], {}), "(\n 'You have exceeded the maximum number of webhooks you are allowed to create for this project/app'\n , 'error')\n", (5801, 5919), False, 'from tg import expose, validate, redirect, flash, config\n'), ((7728, 7749), 'six.text_type', 'six.text_type', (['wh._id'], {}), '(wh._id)\n', (7741, 7749), False, 'import six\n'), ((8297, 8317), 'six.iteritems', 'six.iteritems', (['error'], {}), '(error)\n', (8310, 8317), False, 'import six\n'), ((9199, 9266), 'allura.model.Webhook', 'M.Webhook', ([], {'type': 'self.sender.type', 'app_config_id': 'self.app.config._id'}), '(type=self.sender.type, app_config_id=self.app.config._id)\n', (9208, 9266), True, 'from allura import model as M\n'), ((9858, 9894), 'allura.model.Webhook.query.get', 'M.Webhook.query.get', ([], {'_id': 'webhook._id'}), '(_id=webhook._id)\n', (9877, 9894), True, 'from allura import model as M\n'), ((12574, 12607), 'tg.config.get', 'config.get', (['"""webhook.timeout"""', '(30)'], {}), "('webhook.timeout', 30)\n", (12584, 12607), False, 'from tg import expose, validate, redirect, flash, config\n'), ((12666, 12709), 'tg.config.get', 'config.get', (['"""webhook.retry"""', '[60, 120, 240]'], {}), "('webhook.retry', [60, 120, 240])\n", (12676, 12709), False, 'from tg import expose, validate, redirect, flash, config\n'), ((12731, 12742), 'six.moves.map', 'map', (['int', 't'], {}), '(int, t)\n', (12734, 12742), False, 'from six.moves import map\n'), ((14210, 14278), 'requests.post', 'requests.post', (['url'], {'data': 'data', 'headers': 'headers', 'timeout': 'self.timeout'}), '(url, data=data, headers=headers, timeout=self.timeout)\n', (14223, 14278), False, 'import requests\n'), ((2223, 2253), 'allura.model.Webhook.query.get', 'M.Webhook.query.get', ([], {'_id': 'value'}), '(_id=value)\n', (2242, 2253), True, 'from allura import model as M\n'), ((4442, 4466), 'formencode.Invalid', 'Invalid', (['msg', 'None', 'None'], {}), '(msg, None, None)\n', (4449, 4466), False, 'from formencode import validators as fev, schema, Invalid\n'), ((7059, 7077), 'webob.exc.HTTPNotFound', 'exc.HTTPNotFound', ([], {}), '()\n', (7075, 7077), False, 'from webob import exc\n'), ((7545, 7563), 'webob.exc.HTTPNotFound', 'exc.HTTPNotFound', ([], {}), '()\n', (7561, 7563), False, 'from webob import exc\n'), ((8347, 8365), 'six.text_type', 'six.text_type', (['val'], {}), '(val)\n', (8360, 8365), False, 'import six\n'), ((9993, 10057), 'allura.model.Webhook.max_hooks', 'M.Webhook.max_hooks', (['self.sender.type', 'self.app.config.tool_name'], {}), '(self.sender.type, self.app.config.tool_name)\n', (10012, 10057), True, 'from allura import model as M\n'), ((10839, 10857), 'webob.exc.HTTPNotFound', 'exc.HTTPNotFound', ([], {}), '()\n', (10855, 10857), False, 'from webob import exc\n'), ((11340, 11366), 'six.text_type', 'six.text_type', (['webhook._id'], {}), '(webhook._id)\n', (11353, 11366), False, 'import six\n'), ((12006, 12022), 'ming.odm.session', 'session', (['webhook'], {}), '(webhook)\n', (12013, 12022), False, 'from ming.odm import session\n'), ((13997, 14010), 'time.sleep', 'time.sleep', (['t'], {}), '(t)\n', (14007, 14010), False, 'import time\n'), ((18281, 18298), 'allura.lib.helpers.absurl', 'h.absurl', (['app.url'], {}), '(app.url)\n', (18289, 18298), True, 'from allura.lib import helpers as h\n'), ((4188, 4199), 'ming.odm.session', 'session', (['wh'], {}), '(wh)\n', (4195, 4199), False, 'from ming.odm import session\n'), ((9802, 9818), 'ming.odm.session', 'session', (['webhook'], {}), '(webhook)\n', (9809, 9818), False, 'from ming.odm import session\n'), ((4256, 4267), 'ming.odm.session', 'session', (['wh'], {}), '(wh)\n', (4263, 4267), False, 'from ming.odm import session\n'), ((10124, 10215), 'allura.model.Webhook.query.find', 'M.Webhook.query.find', (["{'type': self.sender.type, 'app_config_id': self.app.config._id}"], {}), "({'type': self.sender.type, 'app_config_id': self.app.\n config._id})\n", (10144, 10215), True, 'from allura import model as M\n'), ((2330, 2345), 'bson.ObjectId', 'ObjectId', (['value'], {}), '(value)\n', (2338, 2345), False, 'from bson import ObjectId\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 7 08:38:28 2020
pyqt realtime plot tutorial
source: https://www.learnpyqt.com/courses/graphics-plotting/plotting-pyqtgraph/
@author: nlourie
"""
from PyQt5 import QtWidgets, QtCore,uic
from pyqtgraph import PlotWidget, plot,QtGui
import pyqtgraph as pg
import sys # We need sys so that we can pass argv to QApplication
import os
from datetime import datetime
import numpy as np
from scipy import signal
import board
import busio
import adafruit_lps35hw
import time
from scipy import interpolate
#import monitor_utils as mu
# Initialize the i2c bus
i2c = busio.I2C(board.SCL, board.SDA)
# Using the adafruit_lps35hw class to read in the pressure sensor
# note the address must be in decimal.
# allowed addresses are:
# 92 (0x5c - if you put jumper from SDO to Gnd)
# 93 (0x5d - default)
p2 = adafruit_lps35hw.LPS35HW(i2c, address = 92)
p1 = adafruit_lps35hw.LPS35HW(i2c, address = 93)
p1.data_rate = adafruit_lps35hw.DataRate.RATE_75_HZ
p2.data_rate = adafruit_lps35hw.DataRate.RATE_75_HZ
mbar2cmh20 = 1.01972
# Now read out the pressure difference between the sensors
print('p1_0 = ',p1.pressure,' mbar')
print('p1_0 = ',p1.pressure*mbar2cmh20,' cmH20')
print('p2_0 = ',p2.pressure,' mbar')
print('p2_0 = ',p2.pressure*mbar2cmh20,' cmH20')
print('')
print('Now zero the pressure:')
# Not sure why sometimes I have to do this twice??
p1.zero_pressure()
p1.zero_pressure()
time.sleep(1)
p2.zero_pressure()
p2.zero_pressure()
time.sleep(1)
print('p1_0 = ',p1.pressure,' mbar')
print('p1_0 = ',p1.pressure*mbar2cmh20,' cmH20')
print('p2_0 = ',p2.pressure,' mbar')
print('p2_0 = ',p2.pressure*mbar2cmh20,' cmH20')
print()
def breath_detect_coarse(flow,fs,plotflag = False):
"""
%% This function detects peaks of flow signal
% Inputs:
% flow: flow signal
% fs: sampling frequency
% plotflag: set to 1 to plot
% Output:
% peak (location, amplitude)
% Written by: <NAME>, PhD
% Email: <EMAIL>
% Updated on: 12 Nov 2015.
% Ver: 1.0
# Converted to python by: <NAME>, PhD
# Email: <EMAIL>
# Updated on: April, 2020
"""
# detect peaks of flow signal
minpeakwidth = fs*0.3
peakdistance = fs*1.5
#print('peakdistance = ',peakdistance)
minPeak = 0.05 # flow threshold = 0.05 (L/s)
minpeakprominence = 0.05
peak_index, _ = signal.find_peaks(flow,
height = minPeak,
distance = peakdistance,
prominence = minpeakprominence,
width = minpeakwidth)
"""
valley_index, _ = signal.find_peaks(-1*flow,
height = minPeak,
distance = peakdistance,
prominence = minpeakprominence,
width = minpeakwidth)
"""
print('found peaks at index = ',peak_index)
return peak_index
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.setWindowTitle("Standalone Respiratory Monitor")
self.graph0 = pg.PlotWidget()
self.graph1 = pg.PlotWidget()
self.graph2 = pg.PlotWidget()
self.graph3 = pg.PlotWidget()
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.graph0)
layout.addWidget(self.graph1)
layout.addWidget(self.graph2)
layout.addWidget(self.graph3)
widget = QtWidgets.QWidget()
widget.setLayout(layout)
# make the window with a graph widget
#self.graph1 = pg.PlotWidget()
self.setCentralWidget(widget)
# set the plot properties
self.graph1.setBackground('k')
self.graph0.showGrid(x = True, y = True)
self.graph1.showGrid(x=True,y=True)
self.graph2.showGrid(x = True, y = True)
self.graph3.showGrid(x = True, y = True)
# Set the label properties with valid CSS commands -- https://groups.google.com/forum/#!topic/pyqtgraph/jS1Ju8R6PXk
labelStyle = {'color': '#FFF', 'font-size': '12pt'}
self.graph0.setLabel('left','P','cmH20',**labelStyle)
self.graph1.setLabel('left','Flow','L/s',**labelStyle)
self.graph3.setLabel('bottom', 'Time', 's', **labelStyle)
#self.graph2.setLabel('left', 'V raw','L',**labelStyle)
self.graph3.setLabel('left','V corr','L',**labelStyle)
# change the plot range
#self.graph0.setYRange(-30,30,padding = 0.1)
#self.graph1.setYRange(-2,2,padding = 0.1)
#self.graph3.setYRange(-0.5,1.5,padding = 0.1)
#self.graph3.setYRange(200,200,padding = 0.1)
self.x = [0]
self.t = [datetime.utcnow().timestamp()]
self.dt = [0]
self.x = [0]
self.dt = [0]
#self.y = [honeywell_v2f(chan.voltage)]
self.dp = [(p1.pressure - p2.pressure)*mbar2cmh20]
self.p1 = [(p1.pressure)*mbar2cmh20]
self.p2 = [(p2.pressure)*mbar2cmh20]
self.flow = [0]
self.vol = [0]
print('P1 = ',p1.pressure,' cmH20')
print('P2 = ',p2.pressure,' cmH20')
# plot data: x, y values
# make a QPen object to hold the marker properties
pen = pg.mkPen(color = 'y',width = 1)
pen2 = pg.mkPen(color = 'b',width = 2)
self.data_line01 = self.graph0.plot(self.dt,self.p1,pen = pen)
self.data_line02 = self.graph0.plot(self.dt,self.p2,pen = pen2)
self.data_line1 = self.graph1.plot(self.dt, self.flow,pen = pen)
# graph2
self.data_line21 = self.graph2.plot(self.dt,self.flow,pen = pen)
self.data_line22 = self.graph2.plot(self.dt,self.flow,pen = pen)
# graph3
self.data_line3 = self.graph3.plot(self.dt,self.vol,pen = pen)
self.calibrating = False
"""
# Slower timer
self.t_cal = 100
self.cal_timer = QtCore.QTimer()
self.cal_timer.setInterval(self.t_cal)
self.cal_timer.timeout.connect(self.update_cal)
self.cal_timer.start()
"""
# Stuff with the timer
self.t_update = 10 #update time of timer in ms
self.timer = QtCore.QTimer()
self.timer.setInterval(self.t_update)
self.timer.timeout.connect(self.update_plot_data)
self.timer.start()
self.drift_model = [0,datetime.utcnow().timestamp()/1000*self.t_update]
self.i_valleys = []
self.time_to_show = 30 #s
def update_plot_data(self):
# This is what happens every timer loop
if self.dt[-1] >= self.time_to_show:
self.x = self.x[1:] # Remove the first element
#self.y = self.y[1:] # remove the first element
self.dp = self.dp[1:]
self.t = self.t[1:] # remove the first element
self.dt= self.dt[1:]
self.p1 = self.p1[1:]
self.p2 = self.p2[1:]
self.vol = self.vol[1:]
self.flow = self.flow[1:]
self.x.append(self.x[-1] + 1) # add a new value 1 higher than the last
self.t.append(datetime.utcnow().timestamp())
self.dt = [(ti - self.t[0]) for ti in self.t]
dp_cmh20 = ((p1.pressure - p2.pressure))*mbar2cmh20
self.dp.append(dp_cmh20)
self.flow.append(dp_cmh20)
self.p1.append(p1.pressure*mbar2cmh20)
self.p2.append(p2.pressure*mbar2cmh20)
# remove any linear trend in the volume data since it's just nonsense.
# THis should zero it out okay if there's no noticeable "dips"
self.vol = signal.detrend(np.cumsum(self.flow))
self.fs = 1/(self.t[-1] - self.t[-2])
print('Sample Freq = ',self.fs)
negative_mean_subtracted_volume = [-1*(v-np.mean(self.vol)) for v in self.vol]
i_valleys = breath_detect_coarse(negative_mean_subtracted_volume,fs = self.fs,plotflag = False)
self.i_valleys = i_valleys
#print('i_valleys = ',self.i_valleys)
#print('datatype of i_valleys = ',type(self.i_valleys))
if len(self.i_valleys) >= 2:
t = np.array(self.t)
vol = np.array(self.vol)
dt = np.array(self.dt)
print('found peaks at dt = ',dt[self.i_valleys])
#self.drift_model = np.polyfit(t[self.i_valleys],vol[self.i_valleys],1)
#self.v_drift = np.polyval(self.drift_model,t)
#self.vol_corr = vol - self.v_drift
#self.data_line22.setData(self.dt,self.v_drift)
self.drift_model = interpolate.interp1d(t[i_valleys],vol[i_valleys],kind = 'linear')
v_drift_within_spline = self.drift_model(t[i_valleys[0]:i_valleys[-1]])
v_drift = np.zeros(len(t))
v_drift[0:self.i_valleys[1]] = np.polyval(np.polyfit(t[i_valleys[0:1]],vol[self.i_valleys[0:1]],1),t[0:self.i_valleys[1]],)
v_drift[self.i_valleys[0]:self.i_valleys[-1]] = v_drift_within_spline
v_drift[self.i_valleys[-1]:] = np.polyval(np.polyfit(t[self.i_valleys[-2:]],vol[self.i_valleys[-2:]],1),t[self.i_valleys[-1]:])
self.v_drift = v_drift
self.vol_corr = vol - v_drift
self.data_line22.setData(self.dt,self.v_drift)
else:
self.vol_corr = self.vol
self.data_line01.setData(self.dt,self.p1)
self.data_line02.setData(self.dt,self.p2)
self.data_line1.setData(self.dt,self.flow) #update the data
self.data_line21.setData(self.dt,self.vol)
self.data_line3.setData(self.dt,self.vol_corr)
"""
def update_cal(self) :
print ('len dt = ',len(self.dt))
if len(self.dt) > 50:
# try to run the monitor utils functions
fs = 1000/self.t_update
i_peaks,i_valleys,i_infl_points,vol_last_peak,flow,self.vol_corr,self.vol_offset,time,vol,drift_model = mu.get_processed_flow(np.array(self.t),np.array(self.y),fs,SmoothingParam = 0,smoothflag=True,plotflag = False)
if len(i_peaks) > 2:
self.drift_model = drift_model
print('updating calibration')
self.calibrating = True
self.data_line2.setData(self.dt,vol)
self.data_line5.setData(self.dt,np.polyval(self.drift_model,time))
self.data_line3.setData(self.dt,vol - np.polyval(self.drift_model,time))
print('drift model = ',self.drift_model)
"""
def main():
app = QtWidgets.QApplication(sys.argv)
main = MainWindow()
main.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| [
"PyQt5.QtWidgets.QWidget",
"numpy.mean",
"adafruit_lps35hw.LPS35HW",
"numpy.polyfit",
"datetime.datetime.utcnow",
"busio.I2C",
"PyQt5.QtCore.QTimer",
"time.sleep",
"scipy.interpolate.interp1d",
"numpy.array",
"pyqtgraph.PlotWidget",
"numpy.cumsum",
"PyQt5.QtWidgets.QApplication",
"scipy.si... | [((632, 663), 'busio.I2C', 'busio.I2C', (['board.SCL', 'board.SDA'], {}), '(board.SCL, board.SDA)\n', (641, 663), False, 'import busio\n'), ((904, 945), 'adafruit_lps35hw.LPS35HW', 'adafruit_lps35hw.LPS35HW', (['i2c'], {'address': '(92)'}), '(i2c, address=92)\n', (928, 945), False, 'import adafruit_lps35hw\n'), ((953, 994), 'adafruit_lps35hw.LPS35HW', 'adafruit_lps35hw.LPS35HW', (['i2c'], {'address': '(93)'}), '(i2c, address=93)\n', (977, 994), False, 'import adafruit_lps35hw\n'), ((1488, 1501), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1498, 1501), False, 'import time\n'), ((1540, 1553), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1550, 1553), False, 'import time\n'), ((2510, 2627), 'scipy.signal.find_peaks', 'signal.find_peaks', (['flow'], {'height': 'minPeak', 'distance': 'peakdistance', 'prominence': 'minpeakprominence', 'width': 'minpeakwidth'}), '(flow, height=minPeak, distance=peakdistance, prominence=\n minpeakprominence, width=minpeakwidth)\n', (2527, 2627), False, 'from scipy import signal\n'), ((10966, 10998), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (10988, 10998), False, 'from PyQt5 import QtWidgets, QtCore, uic\n'), ((3390, 3405), 'pyqtgraph.PlotWidget', 'pg.PlotWidget', ([], {}), '()\n', (3403, 3405), True, 'import pyqtgraph as pg\n'), ((3428, 3443), 'pyqtgraph.PlotWidget', 'pg.PlotWidget', ([], {}), '()\n', (3441, 3443), True, 'import pyqtgraph as pg\n'), ((3466, 3481), 'pyqtgraph.PlotWidget', 'pg.PlotWidget', ([], {}), '()\n', (3479, 3481), True, 'import pyqtgraph as pg\n'), ((3504, 3519), 'pyqtgraph.PlotWidget', 'pg.PlotWidget', ([], {}), '()\n', (3517, 3519), True, 'import pyqtgraph as pg\n'), ((3546, 3569), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (3567, 3569), False, 'from PyQt5 import QtWidgets, QtCore, uic\n'), ((3748, 3767), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (3765, 3767), False, 'from PyQt5 import QtWidgets, QtCore, uic\n'), ((5597, 5625), 'pyqtgraph.mkPen', 'pg.mkPen', ([], {'color': '"""y"""', 'width': '(1)'}), "(color='y', width=1)\n", (5605, 5625), True, 'import pyqtgraph as pg\n'), ((5644, 5672), 'pyqtgraph.mkPen', 'pg.mkPen', ([], {'color': '"""b"""', 'width': '(2)'}), "(color='b', width=2)\n", (5652, 5672), True, 'import pyqtgraph as pg\n'), ((6595, 6610), 'PyQt5.QtCore.QTimer', 'QtCore.QTimer', ([], {}), '()\n', (6608, 6610), False, 'from PyQt5 import QtWidgets, QtCore, uic\n'), ((8054, 8074), 'numpy.cumsum', 'np.cumsum', (['self.flow'], {}), '(self.flow)\n', (8063, 8074), True, 'import numpy as np\n'), ((8592, 8608), 'numpy.array', 'np.array', (['self.t'], {}), '(self.t)\n', (8600, 8608), True, 'import numpy as np\n'), ((8627, 8645), 'numpy.array', 'np.array', (['self.vol'], {}), '(self.vol)\n', (8635, 8645), True, 'import numpy as np\n'), ((8663, 8680), 'numpy.array', 'np.array', (['self.dt'], {}), '(self.dt)\n', (8671, 8680), True, 'import numpy as np\n'), ((9037, 9102), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['t[i_valleys]', 'vol[i_valleys]'], {'kind': '"""linear"""'}), "(t[i_valleys], vol[i_valleys], kind='linear')\n", (9057, 9102), False, 'from scipy import interpolate\n'), ((9280, 9338), 'numpy.polyfit', 'np.polyfit', (['t[i_valleys[0:1]]', 'vol[self.i_valleys[0:1]]', '(1)'], {}), '(t[i_valleys[0:1]], vol[self.i_valleys[0:1]], 1)\n', (9290, 9338), True, 'import numpy as np\n'), ((9498, 9561), 'numpy.polyfit', 'np.polyfit', (['t[self.i_valleys[-2:]]', 'vol[self.i_valleys[-2:]]', '(1)'], {}), '(t[self.i_valleys[-2:]], vol[self.i_valleys[-2:]], 1)\n', (9508, 9561), True, 'import numpy as np\n'), ((5050, 5067), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (5065, 5067), False, 'from datetime import datetime\n'), ((7554, 7571), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (7569, 7571), False, 'from datetime import datetime\n'), ((8221, 8238), 'numpy.mean', 'np.mean', (['self.vol'], {}), '(self.vol)\n', (8228, 8238), True, 'import numpy as np\n'), ((6790, 6807), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (6805, 6807), False, 'from datetime import datetime\n')] |
import pandas as pd
import numpy as np
print(pd.__version__)
# 1.0.0
print(pd.DataFrame.agg is pd.DataFrame.aggregate)
# True
df = pd.DataFrame({'A': [0, 1, 2], 'B': [3, 4, 5]})
print(df)
# A B
# 0 0 3
# 1 1 4
# 2 2 5
print(df.agg(['sum', 'mean', 'min', 'max']))
# A B
# sum 3.0 12.0
# mean 1.0 4.0
# min 0.0 3.0
# max 2.0 5.0
print(type(df.agg(['sum', 'mean', 'min', 'max'])))
# <class 'pandas.core.frame.DataFrame'>
print(df.agg(['sum']))
# A B
# sum 3 12
print(type(df.agg(['sum'])))
# <class 'pandas.core.frame.DataFrame'>
print(df.agg('sum'))
# A 3
# B 12
# dtype: int64
print(type(df.agg('sum')))
# <class 'pandas.core.series.Series'>
print(df.agg({'A': ['sum', 'min', 'max'],
'B': ['mean', 'min', 'max']}))
# A B
# max 2.0 5.0
# mean NaN 4.0
# min 0.0 3.0
# sum 3.0 NaN
print(df.agg({'A': 'sum', 'B': 'mean'}))
# A 3.0
# B 4.0
# dtype: float64
print(df.agg({'A': ['sum'], 'B': ['mean']}))
# A B
# mean NaN 4.0
# sum 3.0 NaN
print(df.agg({'A': ['min', 'max'], 'B': 'mean'}))
# A B
# max 2.0 NaN
# mean NaN 4.0
# min 0.0 NaN
print(df.agg(['sum', 'mean', 'min', 'max'], axis=1))
# sum mean min max
# 0 3.0 1.5 0.0 3.0
# 1 5.0 2.5 1.0 4.0
# 2 7.0 3.5 2.0 5.0
s = df['A']
print(s)
# 0 0
# 1 1
# 2 2
# Name: A, dtype: int64
print(s.agg(['sum', 'mean', 'min', 'max']))
# sum 3.0
# mean 1.0
# min 0.0
# max 2.0
# Name: A, dtype: float64
print(type(s.agg(['sum', 'mean', 'min', 'max'])))
# <class 'pandas.core.series.Series'>
print(s.agg(['sum']))
# sum 3
# Name: A, dtype: int64
print(type(s.agg(['sum'])))
# <class 'pandas.core.series.Series'>
print(s.agg('sum'))
# 3
print(type(s.agg('sum')))
# <class 'numpy.int64'>
print(s.agg({'Total': 'sum', 'Average': 'mean', 'Min': 'min', 'Max': 'max'}))
# Total 3.0
# Average 1.0
# Min 0.0
# Max 2.0
# Name: A, dtype: float64
# print(s.agg({'NewLabel_1': ['sum', 'max'], 'NewLabel_2': ['mean', 'min']}))
# SpecificationError: nested renamer is not supported
print(df.agg(['mad', 'amax', 'dtype']))
# A B
# mad 0.666667 0.666667
# amax 2 5
# dtype int64 int64
print(df['A'].mad())
# 0.6666666666666666
print(np.amax(df['A']))
# 2
print(df['A'].dtype)
# int64
# print(df.agg(['xxx']))
# AttributeError: 'xxx' is not a valid function for 'Series' object
# print(df.agg('xxx'))
# AttributeError: 'xxx' is not a valid function for 'DataFrame' object
print(hasattr(pd.DataFrame, '__array__'))
# True
print(hasattr(pd.core.groupby.GroupBy, '__array__'))
# False
print(df.agg([np.sum, max]))
# A B
# sum 3 12
# max 2 5
print(np.sum(df['A']))
# 3
print(max(df['A']))
# 2
print(np.abs(df['A']))
# 0 0
# 1 1
# 2 2
# Name: A, dtype: int64
print(df.agg([np.abs]))
# A B
# absolute absolute
# 0 0 3
# 1 1 4
# 2 2 5
# print(df.agg([np.abs, max]))
# ValueError: cannot combine transform and aggregation operations
def my_func(x):
return min(x) / max(x)
print(df.agg([my_func, lambda x: min(x) / max(x)]))
# A B
# my_func 0.0 0.6
# <lambda> 0.0 0.6
print(df['A'].std())
# 1.0
print(df['A'].std(ddof=0))
# 0.816496580927726
print(df.agg(['std', lambda x: x.std(ddof=0)]))
# A B
# std 1.000000 1.000000
# <lambda> 0.816497 0.816497
print(df.agg('std', ddof=0))
# A 0.816497
# B 0.816497
# dtype: float64
print(df.agg(['std'], ddof=0))
# A B
# std 1.0 1.0
df_str = df.assign(C=['X', 'Y', 'Z'])
print(df_str)
# A B C
# 0 0 3 X
# 1 1 4 Y
# 2 2 5 Z
# df_str['C'].mean()
# TypeError: Could not convert XYZ to numeric
print(df_str.agg(['sum', 'mean']))
# A B C
# sum 3.0 12.0 XYZ
# mean 1.0 4.0 NaN
print(df_str.agg(['mean', 'std']))
# A B
# mean 1.0 4.0
# std 1.0 1.0
print(df_str.agg(['sum', 'min', 'max']))
# A B C
# sum 3 12 XYZ
# min 0 3 X
# max 2 5 Z
print(df_str.select_dtypes(include='number').agg(['sum', 'mean']))
# A B
# sum 3.0 12.0
# mean 1.0 4.0
| [
"pandas.DataFrame",
"numpy.sum",
"numpy.amax",
"numpy.abs"
] | [((134, 180), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': [0, 1, 2], 'B': [3, 4, 5]}"], {}), "({'A': [0, 1, 2], 'B': [3, 4, 5]})\n", (146, 180), True, 'import pandas as pd\n'), ((2325, 2341), 'numpy.amax', 'np.amax', (["df['A']"], {}), "(df['A'])\n", (2332, 2341), True, 'import numpy as np\n'), ((2754, 2769), 'numpy.sum', 'np.sum', (["df['A']"], {}), "(df['A'])\n", (2760, 2769), True, 'import numpy as np\n'), ((2807, 2822), 'numpy.abs', 'np.abs', (["df['A']"], {}), "(df['A'])\n", (2813, 2822), True, 'import numpy as np\n')] |
from flask_sqlalchemy import SQLAlchemy
from flask_user import UserMixin
from itsdangerous import (TimedJSONWebSignatureSerializer as
Serializer, BadSignature, SignatureExpired)
db = SQLAlchemy()
class User(db.Model, UserMixin):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
active = db.Column('is_active', db.Boolean(), nullable=False, server_default='1')
username = db.Column(db.String(100), nullable=False, unique=True)
password = db.Column(db.String(255), nullable=False, server_default='')
email = db.Column(db.String(255))
email_confirmed_at = db.Column(db.DateTime())
# User information
first_name = db.Column(db.String(100), nullable=False, server_default='')
last_name = db.Column(db.String(100), nullable=False, server_default='')
# Define the relationships
roles = db.relationship('Role', secondary='user_role')
datasets = db.relationship('Dataset', secondary='user_dataset')
groups = db.relationship('Group', secondary='user_group')
def generate_auth_token(self, expiration = 600):
s = Serializer(db.app.config['SECRET_KEY'], expires_in=expiration)
return s.dumps({ 'id': self.id })
@staticmethod
def verify_auth_token(token):
s = Serializer(db.app.config['SECRET_KEY'])
try:
data = s.loads(token)
except SignatureExpired:
return None # valid token, but expired
except BadSignature:
return None # invalid token
user = User.query.get(data['id'])
return user
class Group(db.Model):
__tablename__ = 'group'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), unique=True, nullable=False)
description = db.Column(db.String(50), nullable=False)
class Role(db.Model):
__tablename__ = 'role'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), unique=True)
description = db.Column(db.String(50))
class Dataset(db.Model):
__tablename__ = 'dataset'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
description = db.Column(db.String(1000))
train = db.Column(db.Boolean)
test = db.Column(db.Boolean)
features_type = db.Column(db.String(20))
labels_type = db.Column(db.String(20))
label = db.Column(db.Boolean)
project_id = db.Column(db.Integer)
user_id = db.Column(db.Integer)
file_id = db.Column(db.Integer, db.ForeignKey('file.id', ondelete='CASCADE'))
class UserRole(db.Model):
__tablename__ = 'user_role'
id = db.Column(db.Integer(), primary_key=True)
user_id = db.Column(db.Integer(), db.ForeignKey('user.id', ondelete='CASCADE'))
role_id = db.Column(db.Integer(), db.ForeignKey('role.id', ondelete='CASCADE'))
class UserDataset(db.Model):
__tablename__ = 'user_dataset'
id = db.Column(db.Integer(), primary_key=True)
user_id = db.Column(db.Integer(), db.ForeignKey('user.id', ondelete='CASCADE'))
dataset_id = db.Column(db.Integer(), db.ForeignKey('dataset.id', ondelete='CASCADE'))
class UserDatasets(db.Model):
__tablename__ = 'user_group'
id = db.Column(db.Integer(), primary_key=True)
user_id = db.Column(db.Integer(), db.ForeignKey('user.id', ondelete='CASCADE'))
group_id = db.Column(db.Integer(), db.ForeignKey('group.id', ondelete='CASCADE'))
class TrainedModel(db.Model):
__tablename__ = 'trained_model'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
description = db.Column(db.String(1000))
project_id = db.Column(db.Integer)
user_id = db.Column(db.Integer)
algorithm_id = db.Column(db.Integer)
file_id = db.Column(db.Integer)
dataset_id = db.Column(
db.Integer(),
db.ForeignKey('dataset.id', ondelete='CASCADE')
)
class File(db.Model):
__tablename__ = 'file'
id = db.Column(db.Integer, primary_key=True)
content= db.Column(db.LargeBinary, nullable=False) | [
"flask_sqlalchemy.SQLAlchemy",
"itsdangerous.TimedJSONWebSignatureSerializer"
] | [((209, 221), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', ([], {}), '()\n', (219, 221), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((1121, 1183), 'itsdangerous.TimedJSONWebSignatureSerializer', 'Serializer', (["db.app.config['SECRET_KEY']"], {'expires_in': 'expiration'}), "(db.app.config['SECRET_KEY'], expires_in=expiration)\n", (1131, 1183), True, 'from itsdangerous import TimedJSONWebSignatureSerializer as Serializer, BadSignature, SignatureExpired\n'), ((1291, 1330), 'itsdangerous.TimedJSONWebSignatureSerializer', 'Serializer', (["db.app.config['SECRET_KEY']"], {}), "(db.app.config['SECRET_KEY'])\n", (1301, 1330), True, 'from itsdangerous import TimedJSONWebSignatureSerializer as Serializer, BadSignature, SignatureExpired\n')] |
#!/usr/bin/env python3
# (c) 2021 <NAME>
from abc import ABC, abstractmethod
from enum import Enum
from os import linesep
from twisted.internet import reactor
from twisted.internet.error import ConnectionDone
from twisted.internet.protocol import Factory, Protocol
from twisted.logger import Logger
from twisted.protocols import basic
from twisted.python.failure import Failure
from models import Version, Message, VerAck, MessageError, GetAddr, NetworkAddress, Addr, Ping, Pong, ChatMessage
Factory.noisy = False
class States(Enum):
"""
Enum class representing possible states of the PeerProtocol.
"""
INIT = 0
WAIT_FOR_VERSION = 1
WAIT_FOR_VERACK = 2
CON_ESTABLISHED = 3
class PeerProtocol(Protocol, ABC):
log = Logger()
def __init__(self, client: 'P2PClient'):
self.client = client
self.state = States.INIT
self._peer = None
@property
def peer(self) -> NetworkAddress:
if self._peer is None:
peer = self.transport.getPeer()
return NetworkAddress(peer.host, peer.port)
else:
return self._peer
def connectionLost(self, reason: Failure = ConnectionDone):
self.log.debug(f'connection to Peer {self.peer} lost')
self.log.debug('reason:' + str(reason))
self.client.remove_connection(self)
def dataReceived(self, data: bytes):
try:
message = Message.from_bytes(data)
except MessageError:
self.log.failure(f'Invalid message received from {self.peer}.')
self.transport.loseConnection()
return
self.log.debug(f'Message received from {self.peer}')
if isinstance(message, Version):
self.handle_version(message)
elif isinstance(message, VerAck):
self.handle_verack(message)
elif isinstance(message, GetAddr):
self.handle_getadr(message)
elif isinstance(message, Addr):
self.handle_addr(message)
elif isinstance(message, Ping):
self.handle_ping(message)
elif isinstance(message, Pong):
self.handle_pong(message)
elif isinstance(message, ChatMessage):
self.handle_chat_message(message)
def handle_chat_message(self, chat_message: ChatMessage):
self.client.broadcast(chat_message, self.peer)
@abstractmethod
def connectionMade(self):
"""
What has to be done, when a new connection has been made depends on who initiated it.
Subclasses must implement this.
"""
self.log.debug(f'Connected to {self.peer}.')
def handle_getadr(self, getadr: GetAddr):
self.log.debug(f'Address request received from {self.peer}.')
addr_msg = Addr(list(self.client.known_participants.values()))
def handle_addr(self, addr: Addr):
self.log.debug(f'Address information received from {self.peer}.')
map(self.client.add_participant, addr.addresses)
self.client.broadcast(addr, self.peer)
def handle_ping(self, ping: Ping):
self.log.debug(f'Ping message received from {self.peer}.')
def handle_pong(self, pong: Pong):
self.log.debug(f'Pong message received from {self.peer}.')
def forward_message(self, message: Message):
self.log.debug(f'Forwarding message to {self.peer}')
self.transport.write(bytes(message))
@abstractmethod
def handle_version(self, version: Version):
if self.state == States.WAIT_FOR_VERSION:
if self.client.version_compatible(version.version) and self.client.nonce != version.nonce:
self.transport.write(bytes(VerAck()))
self.client.add_participant(version.addr_from)
self._peer = version.addr_from
self.client.add_connection(self)
return
self.transport.loseConnection()
@abstractmethod
def handle_verack(self, verack: VerAck):
if self.state == States.WAIT_FOR_VERACK:
self.log.debug(f'Version acknowledged by {self.peer}.')
return
self.transport.loseConnection()
class IncomingPeerProtocol(PeerProtocol):
def connectionMade(self):
super().connectionMade()
self.state = States.WAIT_FOR_VERSION
def handle_version(self, version: Version):
super().handle_version(version)
reactor.callLater(0.1, self.transport.write,
bytes(Version(self.client.version, version.addr_from, self.client.address, self.client.nonce)))
self.state = States.WAIT_FOR_VERACK
def handle_verack(self, verack: VerAck):
super().handle_verack(verack)
self.log.debug(f'Connection to {self.peer} established.')
self.state = States.CON_ESTABLISHED
class OutgoingPeerProtocol(PeerProtocol):
log = Logger()
def connectionMade(self):
super().connectionMade()
self.transport.write(bytes(Version(self.client.version, self.peer, self.client.address, self.client.nonce)))
self.state = States.WAIT_FOR_VERACK
def handle_version(self, version: Version):
super().handle_version(version)
self.log.debug(f'Connection to {self.peer} established.')
self.state = States.CON_ESTABLISHED
reactor.callLater(0.1, self.transport.write, bytes(GetAddr()))
def handle_verack(self, verack: VerAck):
super().handle_verack(verack)
self.state = States.WAIT_FOR_VERSION
class UserInput(basic.LineReceiver):
delimiter = linesep.encode('utf-8')
def __init__(self, client):
self.client = client
def lineReceived(self, line):
if line.startswith(b'!'):
self.client.handle_command(line[1:])
else:
self.client.send_chat(line)
class PeerFactory(Factory):
protocol = NotImplemented
def __init__(self, client: 'P2PClient'):
self.client = client
def buildProtocol(self, addr):
return self.protocol(self.client)
class IncomingPeerFactory(PeerFactory):
protocol = IncomingPeerProtocol
class OutgoingPeerFactory(PeerFactory):
protocol = OutgoingPeerProtocol
| [
"os.linesep.encode",
"models.Version",
"models.NetworkAddress",
"models.Message.from_bytes",
"twisted.logger.Logger",
"models.VerAck",
"models.GetAddr"
] | [((755, 763), 'twisted.logger.Logger', 'Logger', ([], {}), '()\n', (761, 763), False, 'from twisted.logger import Logger\n'), ((4863, 4871), 'twisted.logger.Logger', 'Logger', ([], {}), '()\n', (4869, 4871), False, 'from twisted.logger import Logger\n'), ((5555, 5578), 'os.linesep.encode', 'linesep.encode', (['"""utf-8"""'], {}), "('utf-8')\n", (5569, 5578), False, 'from os import linesep\n'), ((1045, 1081), 'models.NetworkAddress', 'NetworkAddress', (['peer.host', 'peer.port'], {}), '(peer.host, peer.port)\n', (1059, 1081), False, 'from models import Version, Message, VerAck, MessageError, GetAddr, NetworkAddress, Addr, Ping, Pong, ChatMessage\n'), ((1424, 1448), 'models.Message.from_bytes', 'Message.from_bytes', (['data'], {}), '(data)\n', (1442, 1448), False, 'from models import Version, Message, VerAck, MessageError, GetAddr, NetworkAddress, Addr, Ping, Pong, ChatMessage\n'), ((4480, 4572), 'models.Version', 'Version', (['self.client.version', 'version.addr_from', 'self.client.address', 'self.client.nonce'], {}), '(self.client.version, version.addr_from, self.client.address, self.\n client.nonce)\n', (4487, 4572), False, 'from models import Version, Message, VerAck, MessageError, GetAddr, NetworkAddress, Addr, Ping, Pong, ChatMessage\n'), ((4972, 5051), 'models.Version', 'Version', (['self.client.version', 'self.peer', 'self.client.address', 'self.client.nonce'], {}), '(self.client.version, self.peer, self.client.address, self.client.nonce)\n', (4979, 5051), False, 'from models import Version, Message, VerAck, MessageError, GetAddr, NetworkAddress, Addr, Ping, Pong, ChatMessage\n'), ((5358, 5367), 'models.GetAddr', 'GetAddr', ([], {}), '()\n', (5365, 5367), False, 'from models import Version, Message, VerAck, MessageError, GetAddr, NetworkAddress, Addr, Ping, Pong, ChatMessage\n'), ((3675, 3683), 'models.VerAck', 'VerAck', ([], {}), '()\n', (3681, 3683), False, 'from models import Version, Message, VerAck, MessageError, GetAddr, NetworkAddress, Addr, Ping, Pong, ChatMessage\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
from math import atan2, degrees
import board
import busio
import adafruit_mpu6050
class GY521_MPU6050():
def __init__(self, id=None, address=0x68) -> None:
self.id_sensor = id
self.i2c = busio.I2C(board.SCL, board.SDA)
self.mpu = adafruit_mpu6050.MPU6050(self.i2c, address=address)
self.fix_gyro = {'X':0, 'Y':0, 'Z': 0}
def reset_gyro(self):
angle_xz, angle_yz = self.get_inclination(self.mpu)
inclinaisons = "%.2f,%.2f,%.2f"%(self.mpu.acceleration)
inclinaisons = inclinaisons.split(',')
inclinaison = {
'X': float(inclinaisons[0]),
'Y': float(inclinaisons[1]),
'Z': float(inclinaisons[2])
}
self.fix_gyro = {'X':inclinaison['X'], 'Y':inclinaison['Y'], 'Z':inclinaison['Z']}
def vector_2_degrees(self, x, y):
angle = degrees(atan2(y, x))
if angle < 0:
angle += 360
return angle
# Given an accelerometer sensor object return the inclination angles of X/Z and Y/Z
# Returns: tuple containing the two angles in degrees
def get_inclination(self, _sensor):
x, y, z = _sensor.acceleration
return self.vector_2_degrees(x, z), self.vector_2_degrees(y, z)
def measure(self) -> dict:
angle_xz, angle_yz = self.get_inclination(self.mpu)
inclinaisons = "%.2f,%.2f,%.2f"%(self.mpu.acceleration)
inclinaisons = inclinaisons.split(',')
inclinaison = {
'X': float(inclinaisons[0])-self.fix_gyro['X'],
'Y': float(inclinaisons[1])-self.fix_gyro['Y'],
'Z': float(inclinaisons[2])-self.fix_gyro['Z']
}
return {'value':{'acceleration': "{:6.2f},{:6.2f}".format(angle_xz, angle_yz), 'gyroscope': inclinaison, 'temperature': "%.2f"%self.mpu.temperature}}
def stop(self) -> None:
pass
def __str__(self):
return f'Sensor:{self.id_sensor}'
def __repr__(self):
return str(self)
if __name__ == '__main__':
sensor = GY521_MPU6050("GY521_MPU6050")
try:
while True:
print(sensor.measure())
time.sleep(1)
except Exception as e:
print(e)
| [
"busio.I2C",
"adafruit_mpu6050.MPU6050",
"math.atan2",
"time.sleep"
] | [((259, 290), 'busio.I2C', 'busio.I2C', (['board.SCL', 'board.SDA'], {}), '(board.SCL, board.SDA)\n', (268, 290), False, 'import busio\n'), ((306, 357), 'adafruit_mpu6050.MPU6050', 'adafruit_mpu6050.MPU6050', (['self.i2c'], {'address': 'address'}), '(self.i2c, address=address)\n', (330, 357), False, 'import adafruit_mpu6050\n'), ((863, 874), 'math.atan2', 'atan2', (['y', 'x'], {}), '(y, x)\n', (868, 874), False, 'from math import atan2, degrees\n'), ((2019, 2032), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2029, 2032), False, 'import time\n')] |
""" generates lists of SARS-CoV-2 samples which occurred before a particular date
Also generates a dictionary of reference compressed sequences
And a subset of these
Together, these can be passed to a ram_persistence object which
can be used instead of an fn3persistence object to test the performance of PCA, or for other
unit testing purposes.
Also useful for investigating how PCA detected the ingress of new strains over time.
Uses public cog metadata downloaded from COG-UK 7/4/2021, saved in
testdata/pca/cog_metadata.csv.gz, and requires access to an fn3persistence object containing the same data.
To run:
pipenv run python3 utils/make_temporal_subsets.py
"""
import os
import pandas as pd
import datetime
import gzip
import pickle
import progressbar
import random
from findn.mongoStore import fn3persistence
from findn.common_utils import ConfigManager
# open connection to existing covid datastore
config_file = os.path.join("demos", "covid", "covid_config_v3.json")
cfm = ConfigManager(config_file)
CONFIG = cfm.read_config()
PERSIST = fn3persistence(dbname=CONFIG["SERVERNAME"], connString=CONFIG["FNPERSISTENCE_CONNSTRING"], debug=CONFIG["DEBUGMODE"])
inputfile = "/data/software/fn4dev/testdata/pca/cog_metadata.csv.gz"
outputdir = "/data/data/pca/subsets" # or wherever
# get samples which are in server
extant_sample_ids = PERSIST.guids()
print("There are {0} samples in the server".format(len(extant_sample_ids)))
# read metadata file into pandas
with gzip.open(inputfile, "rt") as f:
df = pd.read_csv(f)
# we are using the middle part of the cog_id as the sample name as the sample_id; extract this.
sample_ids = df["sequence_name"].to_list()
df["sample_id"] = [x.split("/")[1] for x in sample_ids]
print("There are {0} samples in the COG-UK list".format(len(df.index)))
# what is in the server & not in the list?
server_sample_ids = set(extant_sample_ids)
inputfile_sample_ids = set(df['sample_id'])
missing = server_sample_ids - inputfile_sample_ids
print("Missing samples: n=", len(missing))
missing_df = pd.DataFrame({'missing': list(missing)})
print(missing_df)
missing_df.to_csv("/data/data/inputfasta/missing_meta.csv")
# load a small subset of the reference compressed sequences, for testing purposes
# load the reference compressed sequences
print("Dumping 5,000 sample test set")
storage_dict = {}
sampled = random.sample(df["sample_id"].to_list(), 5000)
bar = progressbar.ProgressBar(max_value=len(sampled))
print("Dumping all samples")
for i, sample_id in enumerate(sampled):
res = PERSIST.refcompressedsequence_read(sample_id)
bar.update(i)
storage_dict[sample_id] = res
bar.finish()
# write out the dictionary
outputfile = "/data/software/fn4dev/testdata/pca/seqs_5000test.pickle"
with open(outputfile, "wb") as f:
pickle.dump(storage_dict, f)
outputfile = "/data/software/fn4dev/testdata/pca/seqs_5000test_ids.pickle"
with open(outputfile, "wb") as f:
pickle.dump(sampled, f)
# load the reference compressed sequences
storage_dict = {}
bar = progressbar.ProgressBar(max_value=len(df.index))
for i, sample_id in enumerate(df["sample_id"]):
res = PERSIST.refcompressedsequence_read(sample_id)
bar.update(i)
storage_dict[sample_id] = res
bar.finish()
# write out the dictionary
outputfile = os.path.join(outputdir, "seqs_20210421.pickle")
with open(outputfile, "wb") as f:
pickle.dump(storage_dict, f)
# construct counts between 1 June 2020 and end March 2021
cnts = df.groupby(["sample_date"]).size()
cnts = cnts[cnts.index >= "2020-06-01"]
cnts = cnts[cnts.index < "2021-04-01"]
cnts = pd.DataFrame(cnts)
cnts.columns = ["count"]
cnts["dow"] = [datetime.date.fromisoformat(item).weekday() for item in cnts.index]
cnts["isodate"] = [datetime.date.fromisoformat(item) for item in cnts.index]
# write samples to consider in the PCA into a series of json files in the output directory
for cutoff_date in cnts.index:
dow = cnts.loc[cutoff_date, "dow"]
df_subset = df[df["sample_date"] < cutoff_date]
sample_ids = df_subset["sample_id"].to_list()
outputfile = os.path.join(outputdir, "{0}-{1}.pickle".format(dow, cutoff_date))
with open(outputfile, "wb") as f:
pickle.dump(sample_ids, f)
print(outputfile)
| [
"findn.common_utils.ConfigManager",
"pickle.dump",
"pandas.read_csv",
"gzip.open",
"os.path.join",
"findn.mongoStore.fn3persistence",
"pandas.DataFrame",
"datetime.date.fromisoformat"
] | [((931, 985), 'os.path.join', 'os.path.join', (['"""demos"""', '"""covid"""', '"""covid_config_v3.json"""'], {}), "('demos', 'covid', 'covid_config_v3.json')\n", (943, 985), False, 'import os\n'), ((992, 1018), 'findn.common_utils.ConfigManager', 'ConfigManager', (['config_file'], {}), '(config_file)\n', (1005, 1018), False, 'from findn.common_utils import ConfigManager\n'), ((1056, 1178), 'findn.mongoStore.fn3persistence', 'fn3persistence', ([], {'dbname': "CONFIG['SERVERNAME']", 'connString': "CONFIG['FNPERSISTENCE_CONNSTRING']", 'debug': "CONFIG['DEBUGMODE']"}), "(dbname=CONFIG['SERVERNAME'], connString=CONFIG[\n 'FNPERSISTENCE_CONNSTRING'], debug=CONFIG['DEBUGMODE'])\n", (1070, 1178), False, 'from findn.mongoStore import fn3persistence\n'), ((3278, 3325), 'os.path.join', 'os.path.join', (['outputdir', '"""seqs_20210421.pickle"""'], {}), "(outputdir, 'seqs_20210421.pickle')\n", (3290, 3325), False, 'import os\n'), ((3580, 3598), 'pandas.DataFrame', 'pd.DataFrame', (['cnts'], {}), '(cnts)\n', (3592, 3598), True, 'import pandas as pd\n'), ((1481, 1507), 'gzip.open', 'gzip.open', (['inputfile', '"""rt"""'], {}), "(inputfile, 'rt')\n", (1490, 1507), False, 'import gzip\n'), ((1523, 1537), 'pandas.read_csv', 'pd.read_csv', (['f'], {}), '(f)\n', (1534, 1537), True, 'import pandas as pd\n'), ((2785, 2813), 'pickle.dump', 'pickle.dump', (['storage_dict', 'f'], {}), '(storage_dict, f)\n', (2796, 2813), False, 'import pickle\n'), ((2927, 2950), 'pickle.dump', 'pickle.dump', (['sampled', 'f'], {}), '(sampled, f)\n', (2938, 2950), False, 'import pickle\n'), ((3364, 3392), 'pickle.dump', 'pickle.dump', (['storage_dict', 'f'], {}), '(storage_dict, f)\n', (3375, 3392), False, 'import pickle\n'), ((3726, 3759), 'datetime.date.fromisoformat', 'datetime.date.fromisoformat', (['item'], {}), '(item)\n', (3753, 3759), False, 'import datetime\n'), ((4179, 4205), 'pickle.dump', 'pickle.dump', (['sample_ids', 'f'], {}), '(sample_ids, f)\n', (4190, 4205), False, 'import pickle\n'), ((3639, 3672), 'datetime.date.fromisoformat', 'datetime.date.fromisoformat', (['item'], {}), '(item)\n', (3666, 3672), False, 'import datetime\n')] |
import os
import re
import pickle
import numpy as np
import pandas as pd
from tqdm import tqdm
# Assign labels used in eep conversion
eep_params = dict(
age = 'Age (yrs)',
hydrogen_lum = 'L_H',
lum = 'Log L',
logg = 'Log g',
log_teff = 'Log T',
core_hydrogen_frac = 'X_core', # must be added
core_helium_frac = 'Y_core',
teff_scale = 20, # used in metric function
lum_scale = 1, # used in metric function
# `intervals` is a list containing the number of secondary Equivalent
# Evolutionary Phases (EEPs) between each pair of primary EEPs.
intervals = [200, # Between PreMS and ZAMS
50, # Between ZAMS and EAMS
100, # Between EAMS and IAMS
100, # IAMS-TAMS
150], # TAMS-RGBump
)
def my_PreMS(track, eep_params, i0=None):
'''
Dartmouth models do not have central temperature, which is necessary for
the default PreMS calculation. For now, let the first point be the PreMS.
'''
return 0
def my_TAMS(track, eep_params, i0, Xmin=1e-5):
'''
By default, the TAMS is defined as the first point in the track where Xcen
drops below 10^-12. But not all the DSEP tracks hit this value. To ensure
the TAMS is placed correctly, here I'm using Xcen = 10^-5 as the critical
value.
'''
core_hydrogen_frac = eep_params['core_hydrogen_frac']
Xc_tr = track.loc[i0:, core_hydrogen_frac]
below_crit = Xc_tr <= Xmin
if not below_crit.any():
return -1
return below_crit.idxmax()
def my_RGBump(track, eep_params, i0=None):
'''
Modified from eep.get_RGBump to make luminosity logarithmic
'''
lum = eep_params['lum']
log_teff = eep_params['log_teff']
N = len(track)
lum_tr = track.loc[i0:, lum]
logT_tr = track.loc[i0:, log_teff]
lum_greater = (lum_tr > 1)
if not lum_greater.any():
return -1
RGBump = lum_greater.idxmax() + 1
while logT_tr[RGBump] < logT_tr[RGBump-1] and RGBump < N-1:
RGBump += 1
# Two cases: 1) We didn't reach an extremum, in which case RGBump gets
# set as the final index of the track. In this case, return -1.
# 2) We found the extremum, in which case RGBump gets set
# as the index corresponding to the extremum.
if RGBump >= N-1:
return -1
return RGBump-1
def my_HRD(track, eep_params):
'''
Adapted from eep._HRD_distance to fix lum logarithm
'''
# Allow for scaling to make changes in Teff and L comparable
Tscale = eep_params['teff_scale']
Lscale = eep_params['lum_scale']
log_teff = eep_params['log_teff']
lum = eep_params['lum']
logTeff = track[log_teff]
logLum = track[lum]
N = len(track)
dist = np.zeros(N)
for i in range(1, N):
temp_dist = (((logTeff.iloc[i] - logTeff.iloc[i-1])*Tscale)**2
+ ((logLum.iloc[i] - logLum.iloc[i-1])*Lscale)**2)
dist[i] = dist[i-1] + np.sqrt(temp_dist)
return dist
def from_dartmouth(path):
fname = path.split('/')[-1]
file_str = fname.replace('.trk', '')
mass = int(file_str[1:4])/100
met_str = file_str[7:10]
met = int(met_str[1:])/10
if met_str[0] == 'm':
met *= -1
alpha_str = file_str[13:]
alpha = int(alpha_str[1:])/10
if alpha_str[0] == 'm':
alpha *= -1
with open(path, 'r') as f:
header = f.readline()
col_line = f.readline()
data_lines = f.readlines()
columns = re.split(r'\s{2,}', col_line.strip('# \n'))
data = np.genfromtxt(data_lines)
# Build multi-indexed DataFrame, dropping unwanted columns
multi_index = pd.MultiIndex.from_tuples(
[(mass, met, step) for step in range(len(data))],
names=['initial_mass', 'initial_met', 'step'])
df = pd.DataFrame(data, index=multi_index, columns=columns)
return df
def all_from_dartmouth(raw_grids_path, progress=True):
df_list = []
filelist = [f for f in os.listdir(raw_grids_path) if '.trk' in f]
if progress:
file_iter = tqdm(filelist)
else:
file_iter = filelist
for fname in file_iter:
fpath = os.path.join(raw_grids_path, fname)
df_list.append(from_dartmouth(fpath))
dfs = pd.concat(df_list).sort_index()
# Need X_core for EEP computation
dfs['X_core'] = 1 - dfs['Y_core'] - dfs['Z_core']
return dfs
def install(
raw_grids_path,
name=None,
eep_params=eep_params,
eep_functions={'prems': my_PreMS, 'tams': my_TAMS, 'rgbump': my_RGBump},
metric_function=my_HRD,
):
'''
The main method to install grids that are output of the `rotevol` rotational
evolution tracer code.
Parameters
----------
raw_grids_path (str): the path to the folder containing the raw model grids.
name (str, optional): the name of the grid you're installing. By default,
the basename of the `raw_grids_path` will be used.
eep_params (dict, optional): contains a mapping from your grid's specific
column names to the names used by kiauhoku's default EEP functions.
It also contains 'eep_intervals', the number of secondary EEPs
between each consecutive pair of primary EEPs. By default, the params
defined at the top of this script will be used, but users may specify
their own.
eep_functions (dict, optional): if the default EEP functions won't do the
job, you can specify your own and supply them in a dictionary.
EEP functions must have the call signature
function(track, eep_params), where `track` is a single track.
If none are supplied, the default functions will be used.
metric_function (callable, None): the metric function is how the EEP
interpolator spaces the secondary EEPs. By default, the path
length along the evolution track on the H-R diagram (luminosity vs.
Teff) is used, but you can specify your own if desired.
metric_function must have the call signature
function(track, eep_params), where `track` is a single track.
If no function is supplied, defaults to dartmouth.my_HRD.
Returns None
'''
from .stargrid import from_pandas
from .stargrid import grids_path as install_path
if name is None:
name = os.path.basename(raw_grids_path)
# Create cache directories
path = os.path.join(install_path, name)
if not os.path.exists(path):
os.makedirs(path)
# Cache eep parameters
with open(os.path.join(path, 'eep_params.pkl'), 'wb') as f:
pickle.dump(eep_params, f)
print('Reading and combining grid files')
grids = all_from_dartmouth(raw_grids_path)
grids = from_pandas(grids, name=name)
# Save full grid to file
full_save_path = os.path.join(path, 'full_grid.pqt')
print(f'Saving to {full_save_path}')
grids.to_parquet(full_save_path)
print(f'Converting to eep-based tracks')
eeps = grids.to_eep(eep_params, eep_functions, metric_function)
# Save EEP grid to file
eep_save_path = os.path.join(path, 'eep_grid.pqt')
print(f'Saving to {eep_save_path}')
eeps.to_parquet(eep_save_path)
# Create and save interpolator to file
interp = eeps.to_interpolator()
interp_save_path = os.path.join(path, 'interpolator.pkl')
print(f'Saving interpolator to {interp_save_path}')
interp.to_pickle(path=interp_save_path)
print(f'Model grid "{name}" installed.') | [
"os.path.exists",
"os.listdir",
"pickle.dump",
"numpy.sqrt",
"os.makedirs",
"tqdm.tqdm",
"os.path.join",
"numpy.zeros",
"pandas.concat",
"os.path.basename",
"pandas.DataFrame",
"numpy.genfromtxt"
] | [((2750, 2761), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (2758, 2761), True, 'import numpy as np\n'), ((3554, 3579), 'numpy.genfromtxt', 'np.genfromtxt', (['data_lines'], {}), '(data_lines)\n', (3567, 3579), True, 'import numpy as np\n'), ((3815, 3869), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'index': 'multi_index', 'columns': 'columns'}), '(data, index=multi_index, columns=columns)\n', (3827, 3869), True, 'import pandas as pd\n'), ((6396, 6428), 'os.path.join', 'os.path.join', (['install_path', 'name'], {}), '(install_path, name)\n', (6408, 6428), False, 'import os\n'), ((6802, 6837), 'os.path.join', 'os.path.join', (['path', '"""full_grid.pqt"""'], {}), "(path, 'full_grid.pqt')\n", (6814, 6837), False, 'import os\n'), ((7079, 7113), 'os.path.join', 'os.path.join', (['path', '"""eep_grid.pqt"""'], {}), "(path, 'eep_grid.pqt')\n", (7091, 7113), False, 'import os\n'), ((7292, 7330), 'os.path.join', 'os.path.join', (['path', '"""interpolator.pkl"""'], {}), "(path, 'interpolator.pkl')\n", (7304, 7330), False, 'import os\n'), ((4066, 4080), 'tqdm.tqdm', 'tqdm', (['filelist'], {}), '(filelist)\n', (4070, 4080), False, 'from tqdm import tqdm\n'), ((4165, 4200), 'os.path.join', 'os.path.join', (['raw_grids_path', 'fname'], {}), '(raw_grids_path, fname)\n', (4177, 4200), False, 'import os\n'), ((6320, 6352), 'os.path.basename', 'os.path.basename', (['raw_grids_path'], {}), '(raw_grids_path)\n', (6336, 6352), False, 'import os\n'), ((6440, 6460), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6454, 6460), False, 'import os\n'), ((6470, 6487), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (6481, 6487), False, 'import os\n'), ((6588, 6614), 'pickle.dump', 'pickle.dump', (['eep_params', 'f'], {}), '(eep_params, f)\n', (6599, 6614), False, 'import pickle\n'), ((2960, 2978), 'numpy.sqrt', 'np.sqrt', (['temp_dist'], {}), '(temp_dist)\n', (2967, 2978), True, 'import numpy as np\n'), ((3985, 4011), 'os.listdir', 'os.listdir', (['raw_grids_path'], {}), '(raw_grids_path)\n', (3995, 4011), False, 'import os\n'), ((4258, 4276), 'pandas.concat', 'pd.concat', (['df_list'], {}), '(df_list)\n', (4267, 4276), True, 'import pandas as pd\n'), ((6530, 6566), 'os.path.join', 'os.path.join', (['path', '"""eep_params.pkl"""'], {}), "(path, 'eep_params.pkl')\n", (6542, 6566), False, 'import os\n')] |
from com.vividsolutions.jts.io import WKTReader, WKTWriter
from geoscript.util import deprecated
def readWKT(wkt):
"""
Constructs a geometry from Well Known Text.
*wkt* is the Well Known Text string representing the geometry as described by http://en.wikipedia.org/wiki/Well-known_text.
>>> readWKT('POINT (1 2)')
POINT (1 2)
"""
return WKTReader().read(wkt)
@deprecated
def fromWKT(wkt):
"""Use :func:`readWKT`"""
return readWKT(wkt)
def writeWKT(g):
"""
Writes a geometry as Well Known Text.
*g* is the geometry to serialize.
>>> from geoscript.geom import Point
>>> str(writeWKT(Point(1,2)))
'POINT (1 2)'
"""
return WKTWriter().write(g)
| [
"com.vividsolutions.jts.io.WKTReader",
"com.vividsolutions.jts.io.WKTWriter"
] | [((355, 366), 'com.vividsolutions.jts.io.WKTReader', 'WKTReader', ([], {}), '()\n', (364, 366), False, 'from com.vividsolutions.jts.io import WKTReader, WKTWriter\n'), ((664, 675), 'com.vividsolutions.jts.io.WKTWriter', 'WKTWriter', ([], {}), '()\n', (673, 675), False, 'from com.vividsolutions.jts.io import WKTReader, WKTWriter\n')] |
import os
import dj_database_url
import dotenv
from .base import BASE_DIR
env = BASE_DIR / '.env'
dotenv.read_dotenv(env)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ["multiple-vendor-e-commerce.herokuapp.com"]
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
db_from_env = dj_database_url.config()
DATABASES['default'].update(db_from_env)
# Stripe
STRIPE_PUBLIC_KEY = os.environ['STRIPE_PUBLIC_KEY']
STRIPE_SECRET_KEY = os.environ['STRIPE_SECRET_KEY']
# Handling Emails
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = '<EMAIL>'
EMAIL_HOST_PASSWORD = os.environ['EMAIL_HOST_PASSWORD']
EMAIL_PORT = 587
EMAIL_USE_LTS = True
DEFAULT_EMAIL_FROM = 'E-commerce <<EMAIL>>'
CONTACT_EMAIL = '<EMAIL>'
| [
"dj_database_url.config",
"dotenv.read_dotenv"
] | [((102, 125), 'dotenv.read_dotenv', 'dotenv.read_dotenv', (['env'], {}), '(env)\n', (120, 125), False, 'import dotenv\n'), ((490, 514), 'dj_database_url.config', 'dj_database_url.config', ([], {}), '()\n', (512, 514), False, 'import dj_database_url\n')] |
from precise.covariance.movingaverage import ema_scov
from precise.covariance.matrixfunctions import grand_mean, grand_shrink
from sklearn.covariance._shrunk_covariance import ledoit_wolf_shrinkage
import numpy as np
# Experimental estimator inspired by Ledoit-Wolf
# Keeps a buffer of last n_buffer observations
# Tracks quantities akin to a^2, d^2 in LW
def lw_ema_scov(s:dict, x=None, r=0.025)->dict:
if s.get('s_c') is None:
if isinstance(x,int):
return _lw_ema_scov_init(n_dim=x, r=r)
else:
s = _lw_ema_scov_init(n_dim=len(x), r=r)
if x is not None:
s = _lw_ema_scov_update(s=s, x=x, r=r)
return s
def _lw_ema_scov_init(n_dim, r):
sc = ema_scov({}, n_dim, r=r)
return {'s_c':sc,
'bn_bar':None,
'a2':0,
'mn':0,
'n_new':0,
'buffer':[]}
def _lw_ema_scov_update(s, x, r):
"""
Attempts to track quantities similar to those used to estimate LD shrinkage
"""
x = np.asarray(x)
s['s_c'] = ema_scov(s=s['s_c'], x=x, r=r)
s['buffer'].append(x)
if len(s['buffer'])>s['s_c']['n_emp']:
# Update running estimate of the LD shrinkage parameter
s['n_new'] = s['n_new']+1
xl = s['buffer'].pop(0)
xc = np.atleast_2d(xl-s['s_c']['mean']) # <--- Do we need this?
scov = s['s_c']['scov']
# Compute d^2
mn = grand_mean(scov)
s['mn'] = mn
n_dim = np.shape(scov)[0]
s['dn'] = np.linalg.norm(scov - mn * np.eye(n_dim))**2
# Update b^2
xc2 = xc
xl2 = np.dot(xc2.T,xc2) - scov
if s.get('bn_bar') is None:
s['bn_bar'] = s['lmbd']*s['dn']
s['lmbd_lw'] = 1.0 * s['lmbd']
r_shrink = r/2 # <--- Heuristic
bk = np.linalg.norm( xl2 )
s['bn_bar'] = (1-r_shrink)*s['bn_bar'] + r_shrink*bk # b^2
ratio = bk/s['dn']
# Imply new shrinkage
bn = min( s['bn_bar'], s['dn'] )
lmbd = bn/s['dn']
s['lmbd'] = lmbd
if 2< s['s_c']['n_samples']<2*s['s_c']['n_emp']:
# Override with traditional Ledoit-Shrinkage
X = np.asarray(s['buffer'])
s['lmbd'] = ledoit_wolf_shrinkage(X=X)
if s['s_c']['n_samples']>2:
scov = s['s_c']['scov']
s['scov'] = grand_shrink(a=scov, lmbd=s['lmbd'], copy=True)
return s
| [
"numpy.atleast_2d",
"sklearn.covariance._shrunk_covariance.ledoit_wolf_shrinkage",
"precise.covariance.movingaverage.ema_scov",
"numpy.eye",
"precise.covariance.matrixfunctions.grand_mean",
"numpy.asarray",
"numpy.dot",
"precise.covariance.matrixfunctions.grand_shrink",
"numpy.linalg.norm",
"numpy... | [((711, 735), 'precise.covariance.movingaverage.ema_scov', 'ema_scov', (['{}', 'n_dim'], {'r': 'r'}), '({}, n_dim, r=r)\n', (719, 735), False, 'from precise.covariance.movingaverage import ema_scov\n'), ((1017, 1030), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (1027, 1030), True, 'import numpy as np\n'), ((1047, 1077), 'precise.covariance.movingaverage.ema_scov', 'ema_scov', ([], {'s': "s['s_c']", 'x': 'x', 'r': 'r'}), "(s=s['s_c'], x=x, r=r)\n", (1055, 1077), False, 'from precise.covariance.movingaverage import ema_scov\n'), ((1290, 1326), 'numpy.atleast_2d', 'np.atleast_2d', (["(xl - s['s_c']['mean'])"], {}), "(xl - s['s_c']['mean'])\n", (1303, 1326), True, 'import numpy as np\n'), ((1418, 1434), 'precise.covariance.matrixfunctions.grand_mean', 'grand_mean', (['scov'], {}), '(scov)\n', (1428, 1434), False, 'from precise.covariance.matrixfunctions import grand_mean, grand_shrink\n'), ((1807, 1826), 'numpy.linalg.norm', 'np.linalg.norm', (['xl2'], {}), '(xl2)\n', (1821, 1826), True, 'import numpy as np\n'), ((2167, 2190), 'numpy.asarray', 'np.asarray', (["s['buffer']"], {}), "(s['buffer'])\n", (2177, 2190), True, 'import numpy as np\n'), ((2211, 2237), 'sklearn.covariance._shrunk_covariance.ledoit_wolf_shrinkage', 'ledoit_wolf_shrinkage', ([], {'X': 'X'}), '(X=X)\n', (2232, 2237), False, 'from sklearn.covariance._shrunk_covariance import ledoit_wolf_shrinkage\n'), ((2322, 2369), 'precise.covariance.matrixfunctions.grand_shrink', 'grand_shrink', ([], {'a': 'scov', 'lmbd': "s['lmbd']", 'copy': '(True)'}), "(a=scov, lmbd=s['lmbd'], copy=True)\n", (2334, 2369), False, 'from precise.covariance.matrixfunctions import grand_mean, grand_shrink\n'), ((1472, 1486), 'numpy.shape', 'np.shape', (['scov'], {}), '(scov)\n', (1480, 1486), True, 'import numpy as np\n'), ((1606, 1624), 'numpy.dot', 'np.dot', (['xc2.T', 'xc2'], {}), '(xc2.T, xc2)\n', (1612, 1624), True, 'import numpy as np\n'), ((1535, 1548), 'numpy.eye', 'np.eye', (['n_dim'], {}), '(n_dim)\n', (1541, 1548), True, 'import numpy as np\n')] |
import json
import os.path
import operator
import time
from multiprocessing import Pool
import markovify
from tqdm import tqdm
removeWords = ['c', 'tsp', 'qt', 'lb', 'pkg', 'oz', 'med', 'tbsp', 'sm']
removeWords2 = [" recipes "," recipe "," mashed "," fat ",' c. ',' c ','grams','gram','chopped','tbsps','tbsp','cups','cup','tsps','tsp','ozs','oz','qts','qt','lbs','lb']
ingredientsJson = {}
if not os.path.exists("ingredients.json"):
print("Generating ingredient list...")
ingredientList = open("../finished/ingredientList.txt",
"r").read().split("\n")
for ingredient in ingredientList:
ingredient = " "+ingredient.lower()+" "
for removeWord in removeWords:
ingredient = ingredient.replace(removeWord + '. ', '')
for removeWord in removeWords2:
ingredient = ingredient.replace(removeWord, '')
ingredient = ingredient.replace(' *', '')
try:
num = int(ingredient[0])
ingredient = ' '.join(ingredient.split()[1:])
except:
pass
try:
num = int(ingredient[0])
ingredient = ' '.join(ingredient.split()[1:])
except:
pass
ingredient = ' '.join(ingredient.split())
if ingredient not in ingredientsJson:
ingredientsJson[ingredient] = 0
ingredientsJson[ingredient] += 1
with open("ingredients.json", "w") as f:
f.write(json.dumps(ingredientsJson, indent=2))
else:
print("Loading ingredient list...")
ingredientsJson = json.load(open("ingredients.json", "r"))
ingredientsPriority = []
for ingredient in ingredientsJson.keys():
if ingredientsJson[ingredient] > 1000 and len(ingredient) > 2:
ingredientsPriority.append(ingredient)
ingredientsPriority2 = []
for ingredient in ingredientsJson.keys():
if ingredientsJson[ingredient] > 50 and ingredientsJson[ingredient] <= 5000 and len(ingredient) > 2:
ingredientsPriority2.append(ingredient)
ingredientsPriority.sort(key=len, reverse=True) # sorts by descending length
ingredientsPriority2.sort(key=len, reverse=True) # sorts by descending length
ingredients = ingredientsPriority #+ ingredientsPriority2
print(ingredients[:100])
print(ingredients[-10:])
def hasIngredients(sentence):
sentence = " "+sentence.replace('.', '').replace(':', '').replace(',', '')+" "
recipeIngredients = []
sentenceSize = len(sentence.split())
for ingredient in ingredients:
if " "+ingredient+" " in sentence:
recipeIngredients.append(ingredient)
sentence = sentence.replace(ingredient,'')
sentenceSize = len(sentence.split())
if sentenceSize < 2:
break
return recipeIngredients
# sortedIngredients = sorted(
# ingredients.items(), key=operator.itemgetter(1), reverse=True)
# for i in range(1000):
# print(sortedIngredients[i])
if os.path.exists("instructions_model.json"):
print("Loading instructions model...")
chain_json = json.load(open("instructions_model.json", "r"))
stored_chain = markovify.Chain.from_json(chain_json)
instructions_model = markovify.Text.from_chain(chain_json)
else:
print("Generating instructions model...")
with open("../finished/instructions.txt") as f:
text = f.read()
instructions_model = markovify.NewlineText(text, state_size=3)
with open("instructions_model.json", "w") as f:
f.write(json.dumps(instructions_model.chain.to_json()))
# if os.path.exists("title_model.json"):
# print("Loading title model...")
# chain_json = json.load(open("title_model.json", "r"))
# stored_chain = markovify.Chain.from_json(chain_json)
# title_model = markovify.Text.from_chain(chain_json)
# else:
# print("Generaring title model...")
# with open("../finished/titles.txt") as f:
# text = f.read()
# title_model = markovify.NewlineText(text)
# with open("title_model.json", "w") as f:
# f.write(json.dumps(title_model.chain.to_json()))
# if os.path.exists("ingredients_model.json"):
# print("Loading ingredients model...")
# chain_json = json.load(open("ingredients_model.json", "r"))
# stored_chain = markovify.Chain.from_json(chain_json)
# ingredients_model = markovify.Text.from_chain(chain_json)
# else:
# print("Generaring ingredients model...")
# with open("../finished/ingredients.txt") as f:
# text = f.read()
# ingredients_model = markovify.NewlineText(text)
# with open("ingredients_model.json", "w") as f:
# f.write(json.dumps(ingredients_model.chain.to_json()))
def makeFiles(i):
with open("markov_instructions.%d.txt" % i,"w") as f:
while True:
try:
ing = getInstruction()
foods = hasIngredients(ing)
f.write(json.dumps({'text':ing,'ingredients':foods}) + "\n")
except:
pass
def getIngredient(ing=""):
sentence = ""
if ing == "":
sentence = ingredients_model.make_sentence(tries=1).lower()
else:
tries = 0
while ing not in sentence:
sentence = ingredients_model.make_sentence(tries=1).lower()
tries += 1
if tries > 100:
break
return sentence
def getInstruction(ing=""):
sentence = ""
if ing == "":
sentence = instructions_model.make_sentence(tries=1).lower()
else:
tries = 0
while ing not in sentence:
sentence = instructions_model.make_sentence(tries=1).lower()
tries += 1
if tries > 100:
break
return sentence
def getTitle(num):
sentence = title_model.make_sentence(tries=1).lower()
return sentence
# print("Generating titles...")
# t = time.time()
# with open("markov_titles.txt", "w") as f:
# for i in tqdm(range(100)):
# try:
# f.write(getTitle(i) + "\n")
# except:
# pass
# print((time.time() - t) / 100.0)
print("Making ingredients...")
makeFiles(0)
# p = Pool(8)
# p.map(makeFiles, range(8))
# print("Generating ingredients...")
# t = time.time()
# with open("markov_titles.txt", "w") as f:
# for i in tqdm(range(100)):
# f.write(getIngredient() + "\n")
# print((time.time() - t) / 100.0)
# print("Generating instrutions...")
# with open("markov_instructions.txt", "w") as f:
# for i in tqdm(range(1000000)):
# f.write(getInstruction() + "\n")
def generateRecipe():
recipe = {}
print("Generating recipe...")
recipe['ingredients'] = []
recipe['title'] = getTitle(1)
recipe['title_ingredients'] = hasIngredients(recipe['title'])
recipe['ingredients'] += recipe['title_ingredients']
recipe['directions'] = []
recipe['direction_ingredients'] = []
print("Getting directions")
for ingredient in recipe['title_ingredients']:
print(ingredient)
instruct = getInstruction(ing=ingredient)
ings = hasIngredients(instruct)
recipe['ingredients'] += ings
recipe['direction_ingredients'].append(ings)
recipe['directions'].append(instruct)
recipe['ingredients'] = list(set(recipe['ingredients']))
recipe['ingredientList'] = []
print("Getting ingredients")
for ingredient in recipe['ingredients']:
print(ingredient)
recipe['ingredientList'].append(getIngredient(ingredient))
print(json.dumps(recipe, indent=2))
print("\n\n" + recipe['title'] + "\n\n" + "\n".join(recipe['ingredientList']
) + "\n\n" + "\n".join(recipe['directions']))
# generateRecipe()
| [
"json.dumps",
"markovify.Text.from_chain",
"markovify.Chain.from_json",
"markovify.NewlineText"
] | [((3093, 3130), 'markovify.Chain.from_json', 'markovify.Chain.from_json', (['chain_json'], {}), '(chain_json)\n', (3118, 3130), False, 'import markovify\n'), ((3156, 3193), 'markovify.Text.from_chain', 'markovify.Text.from_chain', (['chain_json'], {}), '(chain_json)\n', (3181, 3193), False, 'import markovify\n'), ((3351, 3392), 'markovify.NewlineText', 'markovify.NewlineText', (['text'], {'state_size': '(3)'}), '(text, state_size=3)\n', (3372, 3392), False, 'import markovify\n'), ((7475, 7503), 'json.dumps', 'json.dumps', (['recipe'], {'indent': '(2)'}), '(recipe, indent=2)\n', (7485, 7503), False, 'import json\n'), ((1456, 1493), 'json.dumps', 'json.dumps', (['ingredientsJson'], {'indent': '(2)'}), '(ingredientsJson, indent=2)\n', (1466, 1493), False, 'import json\n'), ((4885, 4932), 'json.dumps', 'json.dumps', (["{'text': ing, 'ingredients': foods}"], {}), "({'text': ing, 'ingredients': foods})\n", (4895, 4932), False, 'import json\n')] |
from django.db import models
from django.contrib.postgres.fields import ArrayField
from django.urls import reverse
# Create your models here.
class Neighbourhood(models.Model):
image = models.ImageField(upload_to='neighbourhood_avatars', default='dummy_neighbourhood.jpg')
name = models.CharField(max_length=200)
location = models.CharField(max_length=200)
police_hotline= ArrayField(models.CharField(max_length=13, blank=True),size=3, blank=True, null=True)
hospital_hotline= ArrayField(models.CharField(max_length=13, blank=True),size=3, blank=True, null=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('home')
class Business(models.Model):
FOOD = 1
BEAUTY = 2
SOCIAL = 3
ENTERTAINMENT = 4
HOUSING = 5
BUSINESS_CATEGORIES = [
(FOOD, 'Food and Beverages'),
(BEAUTY, 'Beauty shops'),
(SOCIAL,'Social Amentity'),
(ENTERTAINMENT, 'Entertainment'),
(HOUSING, 'Housing'),
]
image = models.ImageField(upload_to='business_avatars', default='business.jpg')
name = models.CharField(max_length=200)
location = models.CharField(max_length=200)
description = models.TextField(blank=True, null=True)
category = models.PositiveSmallIntegerField(choices=BUSINESS_CATEGORIES)
neighbourhood = models.ForeignKey(Neighbourhood, on_delete=models.CASCADE, related_name='businesses')
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('neighbourhood', args=[self.neighbourhood.id])
@classmethod
def search_business(cls,search_term,hood):
return cls.objects.get(
models.Q(name__icontains = search_term),
models.Q(description__icontains =search_term),
models.Q(neighbourhood = hood)
) | [
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.urls.reverse",
"django.db.models.ImageField",
"django.db.models.Q",
"django.db.models.PositiveSmallIntegerField",
"django.db.models.CharField"
] | [((192, 284), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""neighbourhood_avatars"""', 'default': '"""dummy_neighbourhood.jpg"""'}), "(upload_to='neighbourhood_avatars', default=\n 'dummy_neighbourhood.jpg')\n", (209, 284), False, 'from django.db import models\n'), ((291, 323), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (307, 323), False, 'from django.db import models\n'), ((339, 371), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (355, 371), False, 'from django.db import models\n'), ((1039, 1110), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""business_avatars"""', 'default': '"""business.jpg"""'}), "(upload_to='business_avatars', default='business.jpg')\n", (1056, 1110), False, 'from django.db import models\n'), ((1122, 1154), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1138, 1154), False, 'from django.db import models\n'), ((1170, 1202), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1186, 1202), False, 'from django.db import models\n'), ((1221, 1260), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1237, 1260), False, 'from django.db import models\n'), ((1276, 1337), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'choices': 'BUSINESS_CATEGORIES'}), '(choices=BUSINESS_CATEGORIES)\n', (1308, 1337), False, 'from django.db import models\n'), ((1358, 1448), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Neighbourhood'], {'on_delete': 'models.CASCADE', 'related_name': '"""businesses"""'}), "(Neighbourhood, on_delete=models.CASCADE, related_name=\n 'businesses')\n", (1375, 1448), False, 'from django.db import models\n'), ((403, 446), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(13)', 'blank': '(True)'}), '(max_length=13, blank=True)\n', (419, 446), False, 'from django.db import models\n'), ((511, 554), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(13)', 'blank': '(True)'}), '(max_length=13, blank=True)\n', (527, 554), False, 'from django.db import models\n'), ((683, 698), 'django.urls.reverse', 'reverse', (['"""home"""'], {}), "('home')\n", (690, 698), False, 'from django.urls import reverse\n'), ((1542, 1596), 'django.urls.reverse', 'reverse', (['"""neighbourhood"""'], {'args': '[self.neighbourhood.id]'}), "('neighbourhood', args=[self.neighbourhood.id])\n", (1549, 1596), False, 'from django.urls import reverse\n'), ((1706, 1743), 'django.db.models.Q', 'models.Q', ([], {'name__icontains': 'search_term'}), '(name__icontains=search_term)\n', (1714, 1743), False, 'from django.db import models\n'), ((1759, 1803), 'django.db.models.Q', 'models.Q', ([], {'description__icontains': 'search_term'}), '(description__icontains=search_term)\n', (1767, 1803), False, 'from django.db import models\n'), ((1818, 1846), 'django.db.models.Q', 'models.Q', ([], {'neighbourhood': 'hood'}), '(neighbourhood=hood)\n', (1826, 1846), False, 'from django.db import models\n')] |
from doubly_linked_list import DoublyLinkedList
class TextBuffer:
def __init__(self):
self.storage = DoublyLinkedList()
# return a string to the print function
def __str__(self):
# build a string
s = ""
current_node = self.storage.head
while current_node:
s += current_node.value
current_node = current_node.next
return s
# Add a character to the back of the text buffer
def append(self, string_to_add):
for char in string_to_add:
self.storage.add_to_tail(char)
# Add char to the front of the text buffer
def prepend(self, string_to_add):
for char in reversed(string_to_add):
self.storage.add_to_head(char)
# Remove a char from the front of the text buffer
def delete_front(self, chars_to_remove=1):
for _ in range(chars_to_remove):
self.storage.remove_from_head()
# Remove a char from the back of the text buffer
def delete_back(self, chars_to_remove=1):
for _ in range(chars_to_remove):
self.storage.remove_from_tail()
# concatenate another text buffer on to the end of this buffer
def join(self, other_buffer):
# join in the middle
# set the self storage tails next node to be the head of the other buffer
self.storage.tail.next = other_buffer.storage.head
# set the other buffers head to be the tail of this buffer
other_buffer.storage.head.prev = self.storage.tail
# join the ends to the correct refs
other_buffer.storage.head = self.storage.head
self.storage.tail = other_buffer.storage.tail
t = TextBuffer()
t.append("ook")
t.prepend("B")
t.append("'s are readable")
print(t)
t.delete_back(2)
print(t)
t.delete_front(6)
t.delete_front()
print(t)
t.append("le")
t.delete_front(4)
t2 = TextBuffer()
t2.append(" Hello")
print(t)
t.join(t2)
print(t)
| [
"doubly_linked_list.DoublyLinkedList"
] | [((115, 133), 'doubly_linked_list.DoublyLinkedList', 'DoublyLinkedList', ([], {}), '()\n', (131, 133), False, 'from doubly_linked_list import DoublyLinkedList\n')] |
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import threading
_json_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'constructors.json')
_class_mapping = None
_class_mapping_lock = threading.Lock()
def get_constructor_mapping():
global _class_mapping
if _class_mapping is not None:
return _class_mapping.copy()
with _class_mapping_lock:
if _class_mapping is not None:
return _class_mapping.copy()
tmp_class_mapping = {}
with open(_json_path, 'r') as json_file:
tmp_class_mapping.update(json.load(json_file))
_class_mapping = tmp_class_mapping
return tmp_class_mapping.copy()
| [
"os.path.realpath",
"threading.Lock",
"json.load"
] | [((792, 808), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (806, 808), False, 'import threading\n'), ((698, 724), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (714, 724), False, 'import os\n'), ((1167, 1187), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (1176, 1187), False, 'import json\n')] |
from adjutant.api.v1.models import register_taskview_class
from mfa_views import views
register_taskview_class(r'^openstack/edit-mfa/?$', views.EditMFA)
register_taskview_class(r'^openstack/users/?$', views.UserListMFA)
| [
"adjutant.api.v1.models.register_taskview_class"
] | [((90, 154), 'adjutant.api.v1.models.register_taskview_class', 'register_taskview_class', (['"""^openstack/edit-mfa/?$"""', 'views.EditMFA'], {}), "('^openstack/edit-mfa/?$', views.EditMFA)\n", (113, 154), False, 'from adjutant.api.v1.models import register_taskview_class\n'), ((156, 221), 'adjutant.api.v1.models.register_taskview_class', 'register_taskview_class', (['"""^openstack/users/?$"""', 'views.UserListMFA'], {}), "('^openstack/users/?$', views.UserListMFA)\n", (179, 221), False, 'from adjutant.api.v1.models import register_taskview_class\n')] |
# Copyright (c) 2015-2019 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
import numpy as np
from astropy.io import fits
from ..op import Operator
from ..timing import function_timer
from .tod_math import calibrate
from ..utils import Logger
def write_calibration_file(filename, gain):
"""Write gains to a FITS file in the standard TOAST format
Args:
filename (string): output filename, overwritten by default
gain (dict): Dictionary, key "TIME" has the common timestamps,
other keys are channel names their values are the gains
"""
log = Logger.get()
detectors = list(gain.keys())
detectors.remove("TIME")
hdus = [
fits.PrimaryHDU(),
fits.BinTableHDU.from_columns(
[
fits.Column(
name="DETECTORS",
array=detectors,
unit="",
format="{0}A".format(max([len(x) for x in detectors])),
)
]
),
]
hdus[1].header["EXTNAME"] = "DETECTORS"
cur_hdu = fits.BinTableHDU.from_columns(
[fits.Column(name="TIME", array=gain["TIME"], unit="s", format="1D")]
)
cur_hdu.header["EXTNAME"] = "TIME"
hdus.append(cur_hdu)
gain_table = np.zeros(
(len(detectors), len(gain["TIME"])), dtype=gain[detectors[0]].dtype
)
for i_det, det in enumerate(detectors):
gain_table[i_det, :] = gain[det]
gainhdu = fits.ImageHDU(gain_table)
gainhdu.header["EXTNAME"] = "GAINS"
hdus.append(gainhdu)
fits.HDUList(hdus).writeto(filename, overwrite=True)
log.info("Gains written to file {}".format(filename))
return
class OpApplyGain(Operator):
"""Operator which applies gains to timelines.
Args:
gain (dict): Dictionary, key "TIME" has the common timestamps,
other keys are channel names their values are the gains
name (str): Name of the output signal cache object will be
<name_in>_<detector>. If the object exists, it is used as
input. Otherwise signal is read using the tod read method.
"""
def __init__(self, gain, name=None):
self._gain = gain
self._name = name
# Call the parent class constructor
super().__init__()
@function_timer
def exec(self, data):
"""Apply the gains.
Args:
data (toast.Data): The distributed data.
"""
for obs in data.obs:
tod = obs["tod"]
for det in tod.local_dets:
# Cache the output signal
ref = tod.local_signal(det, self._name)
obs_times = tod.read_times()
calibrate(
obs_times,
ref,
self._gain["TIME"],
self._gain[det],
order=0,
inplace=True,
)
assert np.isnan(ref).sum() == 0, "The signal timestream includes NaN"
del ref
return
| [
"astropy.io.fits.HDUList",
"astropy.io.fits.PrimaryHDU",
"astropy.io.fits.ImageHDU",
"astropy.io.fits.Column",
"numpy.isnan"
] | [((1589, 1614), 'astropy.io.fits.ImageHDU', 'fits.ImageHDU', (['gain_table'], {}), '(gain_table)\n', (1602, 1614), False, 'from astropy.io import fits\n'), ((811, 828), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {}), '()\n', (826, 828), False, 'from astropy.io import fits\n'), ((1240, 1307), 'astropy.io.fits.Column', 'fits.Column', ([], {'name': '"""TIME"""', 'array': "gain['TIME']", 'unit': '"""s"""', 'format': '"""1D"""'}), "(name='TIME', array=gain['TIME'], unit='s', format='1D')\n", (1251, 1307), False, 'from astropy.io import fits\n'), ((1685, 1703), 'astropy.io.fits.HDUList', 'fits.HDUList', (['hdus'], {}), '(hdus)\n', (1697, 1703), False, 'from astropy.io import fits\n'), ((3090, 3103), 'numpy.isnan', 'np.isnan', (['ref'], {}), '(ref)\n', (3098, 3103), True, 'import numpy as np\n')] |
from django.db import models
# classification value objects
CHOICES_CLASSIFICATION = [
(0, 'unrelated'),
(1, 'simple'),
(2, 'complex'),
(3, 'substring'),
]
class Abbreviation(models.Model):
long_form = models.TextField(blank=False)
abbreviation = models.CharField(max_length=100, blank=False)
classification = models.IntegerField(choices=CHOICES_CLASSIFICATION, null=True)
| [
"django.db.models.IntegerField",
"django.db.models.TextField",
"django.db.models.CharField"
] | [((211, 240), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(False)'}), '(blank=False)\n', (227, 240), False, 'from django.db import models\n'), ((257, 302), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(False)'}), '(max_length=100, blank=False)\n', (273, 302), False, 'from django.db import models\n'), ((321, 383), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': 'CHOICES_CLASSIFICATION', 'null': '(True)'}), '(choices=CHOICES_CLASSIFICATION, null=True)\n', (340, 383), False, 'from django.db import models\n')] |
import re
from datetime import datetime
from PyInquirer import Validator, ValidationError
from prompt_toolkit.document import Document
# This file contains functions used for validation and Validator classes that use them.
# Validators are used by questions.
def non_empty(document: Document) -> bool:
if not document.text:
raise ValidationError(
message="Please enter a non-empty value.",
cursor_position=len(document.text),
)
return True
def valid_date(document: Document) -> bool:
try:
datetime.strptime(document.text, "%Y-%m-%d")
return True
except ValueError:
raise ValidationError(
message="Please enter a valid yyyy-mm-dd date.",
cursor_position=len(document.text),
)
email_regex = r"^(\w|\d|\.|\_|\-)+$"
def valid_email_prefix(document: Document) -> bool:
try:
assert re.match(email_regex, document.text)
return True
except AssertionError:
raise ValidationError(
message="Please enter a valid email prefix (e.g. james.f).",
cursor_position=len(document.text),
)
def valid_email_prefix_list(document: Document) -> bool:
try:
for prefix in document.text.split(","):
assert re.match(email_regex, prefix.strip())
return True
except AssertionError:
raise ValidationError(
message="Please enter a comma seperated list of valid email prefixes (e.g. james.f).",
cursor_position=len(document.text),
)
url_regex = (
r"^(?:http(s)?:\/\/)?[\w.-]+(?:\.[\w\.-]+)+[\w\-\._~:/?#[\]@!\$&'\(\)\*\+,;=.]+$"
)
def valid_url(document: Document) -> bool:
try:
assert re.match(url_regex, document.text.strip())
return True
except AssertionError:
raise ValidationError(
message="Please enter a valid url.", cursor_position=len(document.text)
)
def valid_url_list(document: Document) -> bool:
try:
for url in document.text.split(","):
assert re.match(url_regex, url.strip())
return True
except AssertionError:
raise ValidationError(
message="Please enter a comma seperated list of valid urls.",
cursor_position=len(document.text),
)
def valid_cron(document: Document) -> bool:
# Cron supports lots of advanced features such as ranges, so the regex is very long.
cron_regex = r"^(\*|([0-9]|0[0-9]|1[0-9]|2[0-9]|3[0-9]|4[0-9]|5[0-9])|\*\/([0-9]|0[0-9]|1[0-9]|2[0-9]|3[0-9]|4[0-9]|5[0-9])) (\*|([0-9]|0[0-9]|1[0-9]|2[0-3])|\*\/([0-9]|0[0-9]|1[0-9]|2[0-3])) (\*|([1-9]|0[0-9]|1[0-9]|2[0-9]|3[0-1])|\*\/([1-9]|0[0-9]|1[0-9]|2[0-9]|3[0-1])) (\*|([1-9]|0[0-9]|1[0-2])|\*\/([1-9]|0[0-9]|1[0-2])) (\*|([0-6])|\*\/([0-6]))$"
try:
if document.text.strip() != "null":
assert re.match(cron_regex, document.text.strip())
return True
except AssertionError:
raise ValidationError(
message="Please enter a valid cron or null.",
cursor_position=len(document.text),
)
def valid_directory(document: Document) -> bool:
directory_regex = r"^(/)?([^/\0]+(/)?)+$"
try:
assert re.match(directory_regex, document.text.strip())
return True
except AssertionError:
raise ValidationError(
message="Please enter a valid unix directory.",
cursor_position=len(document.text),
)
class ValidNonEmpty(Validator):
def validate(self, document: Document) -> bool:
"""Return True with no errors for a non-empty value."""
return non_empty(document)
class ValidEmailPrefix(Validator):
def validate(self, document: Document) -> bool:
"""Return True with no errors for a valid email prefix."""
return non_empty(document) and valid_email_prefix(document)
class ValidEmailPrefixList(Validator):
def validate(self, document: Document) -> bool:
return non_empty(document) and valid_email_prefix_list(document)
class ValidClientIds(Validator):
def validate(self, document: Document) -> bool:
"""Return True with no errors for a syntaxtically valid client id list."""
# Checkboxes don't support validation yet.
# https://github.com/CITGuru/PyInquirer/issues/46
return True
class ValidDate(Validator):
def validate(self, document: Document) -> bool:
"""Return True with no errors for a valid yyyy-mm-dd date."""
return non_empty(document) and valid_date(document)
class ValidOptionalUrl(Validator):
def validate(self, document: Document) -> bool:
"""Return True with no errors for a syntaxtically valid url."""
return document.text == "" or valid_url(document)
class ValidUrl(Validator):
def validate(self, document: Document) -> bool:
"""Return True with no errors for a syntaxtically valid url."""
return non_empty(document) and valid_url(document)
class ValidUrlList(Validator):
def validate(self, document: Document) -> bool:
"""Return True with no errors for a syntaxtically valid url list."""
return non_empty(document) and valid_url_list(document)
class ValidOptionalUrlList(Validator):
def validate(self, document: Document) -> bool:
"""Return True with no errors for a syntaxtically valid url list."""
return document.text == "" or valid_url_list(document)
class ValidCron(Validator):
def validate(self, document: Document) -> bool:
"""Return True with no errors for a syntaxtically valid crontab style string."""
return non_empty(document) and valid_cron(document)
class ValidDirectory(Validator):
def validate(self, document: Document) -> bool:
"""Return True with no errors for a syntaxtically valid unix path."""
return non_empty(document) and valid_directory(document)
class ValidOptionalDirectory(Validator):
def validate(self, document: Document) -> bool:
"""Return True with no errors for a syntaxtically valid unix path."""
return document.text == "" and valid_directory(document)
| [
"datetime.datetime.strptime",
"re.match"
] | [((555, 599), 'datetime.datetime.strptime', 'datetime.strptime', (['document.text', '"""%Y-%m-%d"""'], {}), "(document.text, '%Y-%m-%d')\n", (572, 599), False, 'from datetime import datetime\n'), ((910, 946), 're.match', 're.match', (['email_regex', 'document.text'], {}), '(email_regex, document.text)\n', (918, 946), False, 'import re\n')] |
import filecmp
import os
import sys
import shutil
import subprocess
import time
import unittest
if (sys.version_info > (3, 0)):
import urllib.request, urllib.parse, urllib.error
else:
import urllib
from optparse import OptionParser
from PyQt4 import QtCore,QtGui
parser = OptionParser()
parser.add_option("-r", "--root", dest="web_root",
default="http://portal.nersc.gov/project/visit/",
help="Root of web URL where baselines are")
parser.add_option("-d", "--date", dest="web_date",
help="Date of last good run, in YYMonDD form")
parser.add_option("-m", "--mode", dest="mode",
help="Mode to run in: serial, parallel, sr")
parser.add_option("-w", "--web-url", dest="web_url",
help="Manual URL specification; normally generated "
"automatically based on (-r, -d, -m)")
parser.add_option("-g", "--git", dest="git", action="store_true",
help="Use git to ignore images with local modifications")
parser.add_option("-s", "--svn", dest="svn", action="store_true",
help="Use svn to ignore images with local modifications")
(options, args) = parser.parse_args()
if options.web_url is not None:
uri = options.web_url
else:
uri = options.web_root + options.web_date + "/"
mode = ""
if options.mode == "sr" or options.mode == "scalable,parallel" or \
options.mode == "scalable_parallel":
mode="davinci_scalable_parallel_icet"
else:
mode="".join([ s for s in ("davinci_", options.mode) ])
uri += mode + "/"
parser.destroy()
print("uri:", uri)
class MW(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
def real_dirname(path):
"""Python's os.path.dirname is not dirname."""
return path.rsplit('/', 1)[0]
def real_basename(path):
"""Python's os.path.basename is not basename."""
if path.rsplit('/', 1)[1] is '': return None
return path.rsplit('/', 1)[1]
def baseline_current(serial_baseline):
"""Given the path to the serial baseline image, determine if there is a mode
specific baseline. Return a 2-tuple of the baseline image and the path to
the 'current' image."""
dname = real_dirname(serial_baseline)
bname = real_basename(serial_baseline)
baseline = serial_baseline
if options.mode is not None:
# Check for a mode specific baseline.
mode_spec = os.path.join(dname + "/", options.mode + "/", bname)
if os.path.exists(mode_spec):
baseline = mode_spec
# `Current' image never has a mode-specific path; filename/dir is always
# based on the serial baseline's directory.
no_baseline = serial_baseline.split('/', 1) # path without "baseline/"
current = os.path.join("current/", no_baseline[1])
return (baseline, current)
def mode_specific(baseline):
"""Given a baseline image path, return a path to the mode specific baseline,
even if said baseline does not exist (yet)."""
if options.mode is None or options.mode == "serial":
return baseline
dname = real_dirname(baseline)
bname = real_basename(baseline)
if options.mode == "parallel":
if baseline.find("/parallel") != -1:
# It's already got parallel in the path; this IS a mode specific
# baseline.
return baseline
return os.path.join(dname, options.mode, bname)
if options.mode.find("scalable") != -1:
if baseline.find("scalable_parallel") != -1:
# Already is mode-specific.
return baseline
return os.path.join(dname, "scalable_parallel", bname)
# Ruh roh. options.mode must be garbage.
raise NotImplementedError("Unknown mode '%s'" % options.mode)
def local_modifications_git(file):
vcs_diff = subprocess.call(["git", "diff", "--quiet", file])
if vcs_diff == 1:
return True
return False
def local_modifications_svn(file):
svnstat = subprocess.Popen("svn stat %s" % file, shell=True,
stdout=subprocess.PIPE)
diff = svnstat.communicate()[0]
if diff != '':
return True
return False
def local_modifications(filepath):
"""Returns true if the file has local modifications. Always false if the
user did not supply the appropriate VCS option."""
if options.git: return local_modifications_git(filepath)
if options.svn: return local_modifications_svn(filepath)
return False
def equivalent(baseline, image):
"""True if the files are the same."""
if not os.path.exists(image): return False
# Note this is `shallow' by default, but that's fine for our usage.
return filecmp.cmp(baseline, image)
def trivial_pass(baseline, image):
"""True if we can determine that this image is OK without querying the
network."""
return equivalent(baseline, image) or local_modifications(baseline)
class RebaselinePTests(unittest.TestCase):
def test_dirname(self):
input_and_results = [
("baseline/category/test/a.png", "baseline/category/test"),
("b/c/t/q.png", "b/c/t"),
("b/c/t/longfn.png", "b/c/t"),
("b/c/t/", "b/c/t")
]
for tst in input_and_results:
self.assertEqual(real_dirname(tst[0]), tst[1])
def test_basename(self):
input_and_results = [
("baseline/category/test/a.png", "a.png"),
("b/c/t/q.png", "q.png"),
("b/c/t/longfn.png", "longfn.png"),
("b/c/t/", None)
]
for tst in input_and_results:
self.assertEqual(real_basename(tst[0]), tst[1])
class Image(QtGui.QWidget):
def __init__(self, path, parent=None):
self._filename = path
self._parent = parent
self._display = QtGui.QLabel(self._parent)
self._load()
def _load(self):
pixmap = QtGui.QPixmap(300,300)
pixmap.load(self._filename)
self._display.resize(pixmap.size())
self._display.setPixmap(pixmap)
def widget(self): return self._display
def width(self): return self._display.width()
def height(self): return self._display.height()
def update(self, path):
self._filename = path
self._load()
class Layout(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self._mainwin = parent
self._mainwin.statusBar().insertPermanentWidget(0,QtGui.QLabel())
self.status("Initializing...")
quit = QtGui.QPushButton('Quit', self)
quit.setMaximumWidth(80)
if parent is None: parent = self
parent.connect(quit, QtCore.SIGNAL('clicked()'), QtGui.qApp,
QtCore.SLOT('quit()'))
parent.connect(self, QtCore.SIGNAL('closeApp()'), self._die)
self._init_signals()
self._bugs = [] # list which keeps track of which images we think are bugs.
# guess an initial size; we don't know a real size until we've downloaded
# images.
self.resize_this_and_mainwin(600, 600)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.setFocus()
self._baseline = None
self._current = None
self._diff = None
self._images = [None, None, None]
self._next_set_of_images()
self._images[0] = Image(self._baseline, self)
self._images[1] = Image(self._current, self)
self._images[2] = Image(self._diff, self)
grid = QtGui.QGridLayout()
label_baseline = QtGui.QLabel(grid.widget())
label_current = QtGui.QLabel(grid.widget())
label_diff = QtGui.QLabel(grid.widget())
label_baseline.setText("Baseline image:")
label_current.setText("Davinci's current:")
label_diff.setText("difference between them:")
label_baseline.setMaximumSize(QtCore.QSize(160,35))
label_current.setMaximumSize(QtCore.QSize(160,35))
label_diff.setMaximumSize(QtCore.QSize(200,35))
label_directions = QtGui.QLabel(grid.widget())
label_directions.setText("Keyboard shorcuts:\n\n"
"y: yes, rebaseline\n"
"n: no, current image is wrong\n"
"u: unknown, I can't/don't want to decide now\n"
"q: quit")
label_directions.setMaximumSize(QtCore.QSize(300,300))
grid.addWidget(label_baseline, 0,0)
grid.addWidget(label_current, 0,1)
grid.addWidget(self._images[0].widget(), 1,0)
grid.addWidget(self._images[1].widget(), 1,1)
grid.addWidget(label_diff, 2,0)
grid.addWidget(quit, 2,1)
grid.addWidget(self._images[2].widget(), 3,0)
grid.addWidget(label_directions, 3,1)
rows = (
(0, (label_baseline, label_current)),
(1, (self._images[0], self._images[1])),
(2, (label_diff, quit)),
(3, (self._images[2], label_directions))
)
cols = (
(0, (label_baseline, self._images[0], label_diff, self._images[2])),
(1, (label_current, self._images[1], quit, label_directions))
)
for r in rows:
grid.setRowMinimumHeight(r[0], max([x.height() for x in r[1]]))
for c in cols:
grid.setColumnMinimumWidth(c[0], max([x.height() for x in c[1]]))
self.setLayout(grid)
self.resize_this_and_mainwin(self.calc_width(), self.calc_height())
self.show()
self.setFocus()
def resize_this_and_mainwin(self, w, h):
self.resize(w,h)
# make sure it can't shrink too much
self._mainwin.setMinimumWidth(w)
self._mainwin.setMinimumHeight(h+30) # +30: for the status bar
# try not to resize the mainwin if we don't need to; it's annoying.
cur_w = self._mainwin.width()
cur_h = self._mainwin.height()
self._mainwin.resize(max(w,cur_w), max(h,cur_h))
self._mainwin.update()
def _die(self):
print("You thought these test results were bugs:")
for f in self._bugs:
print("\t", f)
self._mainwin.close()
def calc_width(self):
w = 0
for col in range(0,self.layout().columnCount()):
w += self.layout().columnMinimumWidth(col)
return w
def calc_height(self):
h = 0
for row in range(0,self.layout().rowCount()):
h += self.layout().rowMinimumHeight(row)
return h
def _update_images(self):
self._images[0].update(self._baseline)
self._images[1].update(self._current)
self._images[2].update(self._diff)
self.resize_this_and_mainwin(self.calc_width(), self.calc_height())
self.update()
def _rebaseline(self):
self.status("".join(["rebaselining ", self._current, "..."]))
baseline = mode_specific(self._baseline)
print("moving", self._current, "on top of", baseline)
# We might be creating the first mode specific baseline for that test. If
# so, it'll be missing the baseline specific dir.
if not os.path.exists(real_dirname(baseline)):
print(real_dirname(baseline), "does not exist, creating...")
os.mkdir(real_dirname(baseline))
shutil.move(self._current, baseline) # do the rebaseline!
self._next_set_of_images()
self._update_images()
def _ignore(self):
self.status("".join(["ignoring ", self._baseline, "..."]))
self._bugs.append(self._baseline)
self._next_set_of_images()
self._update_images()
def _unknown(self):
self.status("".join(["unknown ", self._baseline, "..."]))
self._next_set_of_images()
self._update_images()
def status(self, msg):
self._mainwin.statusBar().showMessage(msg)
self._mainwin.statusBar().update()
QtCore.QCoreApplication.processEvents() # we're single threaded
def _next_set_of_images(self):
"""Figures out the next set of images to display. Downloads 'current' and
'diff' results from davinci. Sets filenames corresponding to baseline,
current and diff images."""
if self._baseline is None: # first call, build list.
self._imagelist = []
print("Building initial file list... please wait.")
self.status("Building initial file list... please wait.")
for root, dirs, files in os.walk("baseline"):
for f in files:
fn, ext = os.path.splitext(f)
if ext == ".png":
# In some cases, we can trivially reject a file. Don't bother
# adding it to our list in that case.
serial_baseline_fn = os.path.join(root, f)
# Does this path contain "parallel" or "scalable_parallel"? Then
# we've got a mode specific baseline. We'll handle those based on
# the serial filenames, so ignore them for now.
if serial_baseline_fn.find("parallel") != -1: continue
baseline_fn, current_fn = baseline_current(serial_baseline_fn)
assert os.path.exists(baseline_fn)
if not trivial_pass(baseline_fn, current_fn):
self._imagelist.append(baseline_fn)
try:
while len(self._imagelist) > 0:
self._baseline = self._imagelist.pop()
# now derive other filenames based on that one.
filename = None
# os.path.split fails if there's no /
try:
filename = os.path.split(self._baseline)
filename = filename[1]
except AttributeError as e:
self.status("No slash!")
break
current_url = uri + "/c_" + filename
if (sys.version_info > (3, 0)):
f,info = urllib.request.urlretrieve(current_url, "local_current.png")
else:
f,info = urllib.urlretrieve(current_url, "local_current.png")
self.status("".join(["Checking ", current_url, "..."]))
if info.getheader("Content-Type").startswith("text/html"):
# then it's a 404 or other error; skip this image.
continue
else:
# We found the next image.
self._current = "local_current.png"
diff_url = uri + "/d_" + filename
if (sys.version_info > (3, 0)):
f,info = urllib.request.urlretrieve(diff_url, "local_diff.png")
else:
f,info = urllib.urlretrieve(diff_url, "local_diff.png")
if info.getheader("Content-Type").startswith("text/html"):
raise Exception("Could not download diff image.")
self._diff = "local_diff.png"
self.status("Waiting for input on " + filename)
break
except KeyError as e:
print(e)
print("No more images!")
self.emit(QtCore.SIGNAL('closeApp()'))
def _init_signals(self):
self.connect(self, QtCore.SIGNAL('rebaseline()'), self._rebaseline)
self.connect(self, QtCore.SIGNAL('ignore()'), self._ignore)
self.connect(self, QtCore.SIGNAL('unknown()'), self._unknown)
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Q:
self.emit(QtCore.SIGNAL('closeApp()'))
if event.key() == QtCore.Qt.Key_Y:
self.emit(QtCore.SIGNAL('rebaseline()'))
if event.key() == QtCore.Qt.Key_N:
self.emit(QtCore.SIGNAL('ignore()'))
if event.key() == QtCore.Qt.Key_U:
self.emit(QtCore.SIGNAL('unknown()'))
QtCore.QCoreApplication.processEvents()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(RebaselinePTests)
results = unittest.TextTestRunner(verbosity=2).run(suite)
if not results.wasSuccessful():
print("Tests failed, bailing.")
sys.exit(1)
app = QtGui.QApplication(sys.argv)
mw = MW()
mw.show()
mw.setWindowTitle("visit rebaseline -p")
layout = Layout(mw)
layout.show()
sys.exit(app.exec_())
| [
"urllib.urlretrieve",
"PyQt4.QtGui.QLabel",
"PyQt4.QtGui.QPushButton",
"PyQt4.QtCore.SLOT",
"PyQt4.QtGui.QWidget.__init__",
"sys.exit",
"PyQt4.QtCore.QSize",
"unittest.TextTestRunner",
"os.walk",
"os.path.exists",
"shutil.move",
"urllib.request.urlretrieve",
"subprocess.Popen",
"os.path.sp... | [((282, 296), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (294, 296), False, 'from optparse import OptionParser\n'), ((2737, 2777), 'os.path.join', 'os.path.join', (['"""current/"""', 'no_baseline[1]'], {}), "('current/', no_baseline[1])\n", (2749, 2777), False, 'import os\n'), ((3713, 3762), 'subprocess.call', 'subprocess.call', (["['git', 'diff', '--quiet', file]"], {}), "(['git', 'diff', '--quiet', file])\n", (3728, 3762), False, 'import subprocess\n'), ((3862, 3936), 'subprocess.Popen', 'subprocess.Popen', (["('svn stat %s' % file)"], {'shell': '(True)', 'stdout': 'subprocess.PIPE'}), "('svn stat %s' % file, shell=True, stdout=subprocess.PIPE)\n", (3878, 3936), False, 'import subprocess\n'), ((4547, 4575), 'filecmp.cmp', 'filecmp.cmp', (['baseline', 'image'], {}), '(baseline, image)\n', (4558, 4575), False, 'import filecmp\n'), ((15047, 15075), 'PyQt4.QtGui.QApplication', 'QtGui.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (15065, 15075), False, 'from PyQt4 import QtCore, QtGui\n'), ((1685, 1725), 'PyQt4.QtGui.QMainWindow.__init__', 'QtGui.QMainWindow.__init__', (['self', 'parent'], {}), '(self, parent)\n', (1711, 1725), False, 'from PyQt4 import QtCore, QtGui\n'), ((2416, 2468), 'os.path.join', 'os.path.join', (["(dname + '/')", "(options.mode + '/')", 'bname'], {}), "(dname + '/', options.mode + '/', bname)\n", (2428, 2468), False, 'import os\n'), ((2476, 2501), 'os.path.exists', 'os.path.exists', (['mode_spec'], {}), '(mode_spec)\n', (2490, 2501), False, 'import os\n'), ((3308, 3348), 'os.path.join', 'os.path.join', (['dname', 'options.mode', 'bname'], {}), '(dname, options.mode, bname)\n', (3320, 3348), False, 'import os\n'), ((3507, 3554), 'os.path.join', 'os.path.join', (['dname', '"""scalable_parallel"""', 'bname'], {}), "(dname, 'scalable_parallel', bname)\n", (3519, 3554), False, 'import os\n'), ((4432, 4453), 'os.path.exists', 'os.path.exists', (['image'], {}), '(image)\n', (4446, 4453), False, 'import os\n'), ((5558, 5584), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['self._parent'], {}), '(self._parent)\n', (5570, 5584), False, 'from PyQt4 import QtCore, QtGui\n'), ((5635, 5658), 'PyQt4.QtGui.QPixmap', 'QtGui.QPixmap', (['(300)', '(300)'], {}), '(300, 300)\n', (5648, 5658), False, 'from PyQt4 import QtCore, QtGui\n'), ((6045, 6081), 'PyQt4.QtGui.QWidget.__init__', 'QtGui.QWidget.__init__', (['self', 'parent'], {}), '(self, parent)\n', (6067, 6081), False, 'from PyQt4 import QtCore, QtGui\n'), ((6226, 6257), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['"""Quit"""', 'self'], {}), "('Quit', self)\n", (6243, 6257), False, 'from PyQt4 import QtCore, QtGui\n'), ((7104, 7123), 'PyQt4.QtGui.QGridLayout', 'QtGui.QGridLayout', ([], {}), '()\n', (7121, 7123), False, 'from PyQt4 import QtCore, QtGui\n'), ((10644, 10680), 'shutil.move', 'shutil.move', (['self._current', 'baseline'], {}), '(self._current, baseline)\n', (10655, 10680), False, 'import shutil\n'), ((11197, 11236), 'PyQt4.QtCore.QCoreApplication.processEvents', 'QtCore.QCoreApplication.processEvents', ([], {}), '()\n', (11234, 11236), False, 'from PyQt4 import QtCore, QtGui\n'), ((14752, 14791), 'PyQt4.QtCore.QCoreApplication.processEvents', 'QtCore.QCoreApplication.processEvents', ([], {}), '()\n', (14789, 14791), False, 'from PyQt4 import QtCore, QtGui\n'), ((15026, 15037), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (15034, 15037), False, 'import sys\n'), ((6163, 6177), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', ([], {}), '()\n', (6175, 6177), False, 'from PyQt4 import QtCore, QtGui\n'), ((6349, 6375), 'PyQt4.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""clicked()"""'], {}), "('clicked()')\n", (6362, 6375), False, 'from PyQt4 import QtCore, QtGui\n'), ((6408, 6429), 'PyQt4.QtCore.SLOT', 'QtCore.SLOT', (['"""quit()"""'], {}), "('quit()')\n", (6419, 6429), False, 'from PyQt4 import QtCore, QtGui\n'), ((6456, 6483), 'PyQt4.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""closeApp()"""'], {}), "('closeApp()')\n", (6469, 6483), False, 'from PyQt4 import QtCore, QtGui\n'), ((7446, 7467), 'PyQt4.QtCore.QSize', 'QtCore.QSize', (['(160)', '(35)'], {}), '(160, 35)\n', (7458, 7467), False, 'from PyQt4 import QtCore, QtGui\n'), ((7501, 7522), 'PyQt4.QtCore.QSize', 'QtCore.QSize', (['(160)', '(35)'], {}), '(160, 35)\n', (7513, 7522), False, 'from PyQt4 import QtCore, QtGui\n'), ((7553, 7574), 'PyQt4.QtCore.QSize', 'QtCore.QSize', (['(200)', '(35)'], {}), '(200, 35)\n', (7565, 7574), False, 'from PyQt4 import QtCore, QtGui\n'), ((7950, 7972), 'PyQt4.QtCore.QSize', 'QtCore.QSize', (['(300)', '(300)'], {}), '(300, 300)\n', (7962, 7972), False, 'from PyQt4 import QtCore, QtGui\n'), ((11727, 11746), 'os.walk', 'os.walk', (['"""baseline"""'], {}), "('baseline')\n", (11734, 11746), False, 'import os\n'), ((14199, 14228), 'PyQt4.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""rebaseline()"""'], {}), "('rebaseline()')\n", (14212, 14228), False, 'from PyQt4 import QtCore, QtGui\n'), ((14271, 14296), 'PyQt4.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""ignore()"""'], {}), "('ignore()')\n", (14284, 14296), False, 'from PyQt4 import QtCore, QtGui\n'), ((14335, 14361), 'PyQt4.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""unknown()"""'], {}), "('unknown()')\n", (14348, 14361), False, 'from PyQt4 import QtCore, QtGui\n'), ((14830, 14851), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (14849, 14851), False, 'import unittest\n'), ((14904, 14940), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (14927, 14940), False, 'import unittest\n'), ((14468, 14495), 'PyQt4.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""closeApp()"""'], {}), "('closeApp()')\n", (14481, 14495), False, 'from PyQt4 import QtCore, QtGui\n'), ((14552, 14581), 'PyQt4.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""rebaseline()"""'], {}), "('rebaseline()')\n", (14565, 14581), False, 'from PyQt4 import QtCore, QtGui\n'), ((14638, 14663), 'PyQt4.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""ignore()"""'], {}), "('ignore()')\n", (14651, 14663), False, 'from PyQt4 import QtCore, QtGui\n'), ((14720, 14746), 'PyQt4.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""unknown()"""'], {}), "('unknown()')\n", (14733, 14746), False, 'from PyQt4 import QtCore, QtGui\n'), ((11792, 11811), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (11808, 11811), False, 'import os\n'), ((12792, 12821), 'os.path.split', 'os.path.split', (['self._baseline'], {}), '(self._baseline)\n', (12805, 12821), False, 'import os\n'), ((13048, 13108), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['current_url', '"""local_current.png"""'], {}), "(current_url, 'local_current.png')\n", (13074, 13108), False, 'import urllib\n'), ((13144, 13196), 'urllib.urlretrieve', 'urllib.urlretrieve', (['current_url', '"""local_current.png"""'], {}), "(current_url, 'local_current.png')\n", (13162, 13196), False, 'import urllib\n'), ((14119, 14146), 'PyQt4.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""closeApp()"""'], {}), "('closeApp()')\n", (14132, 14146), False, 'from PyQt4 import QtCore, QtGui\n'), ((11998, 12019), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (12010, 12019), False, 'import os\n'), ((12400, 12427), 'os.path.exists', 'os.path.exists', (['baseline_fn'], {}), '(baseline_fn)\n', (12414, 12427), False, 'import os\n'), ((13629, 13683), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['diff_url', '"""local_diff.png"""'], {}), "(diff_url, 'local_diff.png')\n", (13655, 13683), False, 'import urllib\n'), ((13727, 13773), 'urllib.urlretrieve', 'urllib.urlretrieve', (['diff_url', '"""local_diff.png"""'], {}), "(diff_url, 'local_diff.png')\n", (13745, 13773), False, 'import urllib\n')] |
import datetime
import logging
from typing import Optional
from .types import CheckerTaskMessage, EnoLogMessage
LOGGING_PREFIX = "##ENOLOGMESSAGE "
class ELKFormatter(logging.Formatter):
def format(self, record: logging.LogRecord) -> str:
if type(record.args) is tuple and len(record.args) > 0:
record.msg = record.msg % record.args
return LOGGING_PREFIX + self.create_message(record).json(by_alias=True)
def to_level(self, levelname: str) -> int:
if levelname == "CRITICAL":
return 4
if levelname == "ERROR":
return 3
if levelname == "WARNING":
return 2
if levelname == "INFO":
return 1
if levelname == "DEBUG":
return 0
return 0
def create_message(self, record: logging.LogRecord) -> EnoLogMessage:
checker_task: Optional[CheckerTaskMessage] = getattr(
record, "checker_task", None
)
checker_name: Optional[str] = getattr(record, "checker_name", None)
return EnoLogMessage(
tool="enochecker3",
type="infrastructure",
severity=record.levelname,
severity_level=self.to_level(record.levelname),
timestamp=datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
message=record.msg,
module=record.module,
function=record.funcName,
service_name=checker_name,
task_id=getattr(checker_task, "task_id", None),
method=getattr(checker_task, "method", None),
team_id=getattr(checker_task, "team_id", None),
team_name=getattr(checker_task, "team_name", None),
current_round_id=getattr(checker_task, "current_round_id", None),
related_round_id=getattr(checker_task, "related_round_id", None),
flag=getattr(checker_task, "flag", None),
variant_id=getattr(checker_task, "variant_id", None),
task_chain_id=getattr(checker_task, "task_chain_id", None),
)
| [
"datetime.datetime.utcnow"
] | [((1263, 1289), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (1287, 1289), False, 'import datetime\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 30 15:28:09 2018
@author: dataquanty
"""
import numpy as np
from math import sqrt, pi, acos,cos
from matplotlib import pyplot as plt
from scipy.misc import imsave
from bisect import bisect_left
h , w = 1000, 1000
img = np.ones((h,w))
center = (500,500)
r = [20,80,200,300,400,500,600]
r = np.exp(range(1,8)).astype(int)
lagval = [0,pi,0,pi,0,pi,0]
maxd = 810
r = range(10,maxd,20)
lagval = [0,pi]*int(len(r)/2)
lagval = np.random.rand(len(r))*pi
lagval = [-pi/4,pi/3]*int(len(r)/2)
lagval = [0,0.05]*int(len(r)/2)
def dist(a,b):
return sqrt((a[0]-b[0])**2+(a[1]-b[1])**2)
for i in range(h):
for j in range(w):
if (i,j) == center:
img[i][j]=0
else:
d = dist((i,j),center)
k = bisect_left(list(r),d)
#dist((i,j),center)<= r1:
val = (j-center[1])/d
img[i][j] = cos(acos(val)-lagval[k])
"""
angle = acos((j-center[1])/dist((i,j),center))
if i > center[0]:
angle = 2*pi - angle
val = ((angle - lagrad)%(2*pi))/2*pi
img[i][j] = val
"""
#imsave('figLag_pi_s2.png',img)
plt.figure(figsize=(10,10))
plt.imshow(img,cmap='gray')
#interpolation='nearest'
plt.show() | [
"matplotlib.pyplot.imshow",
"numpy.ones",
"math.acos",
"math.sqrt",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show"
] | [((293, 308), 'numpy.ones', 'np.ones', (['(h, w)'], {}), '((h, w))\n', (300, 308), True, 'import numpy as np\n'), ((1265, 1293), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (1275, 1293), True, 'from matplotlib import pyplot as plt\n'), ((1300, 1328), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'cmap': '"""gray"""'}), "(img, cmap='gray')\n", (1310, 1328), True, 'from matplotlib import pyplot as plt\n'), ((1354, 1364), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1362, 1364), True, 'from matplotlib import pyplot as plt\n'), ((618, 663), 'math.sqrt', 'sqrt', (['((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)'], {}), '((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)\n', (622, 663), False, 'from math import sqrt, pi, acos, cos\n'), ((975, 984), 'math.acos', 'acos', (['val'], {}), '(val)\n', (979, 984), False, 'from math import sqrt, pi, acos, cos\n')] |
import os
import sys
sys.path.append('..')
import mitogen
VERSION = '%s.%s.%s' % mitogen.__version__
author = u'<NAME>'
copyright = u'2018, <NAME>'
exclude_patterns = ['_build']
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinxcontrib.programoutput']
html_show_sourcelink = False
html_show_sphinx = False
html_sidebars = {'**': ['globaltoc.html', 'github.html']}
html_static_path = ['_static']
html_theme = 'alabaster'
html_theme_options = {
'font_family': "Georgia, serif",
'head_font_family': "Georgia, serif",
}
htmlhelp_basename = 'mitogendoc'
intersphinx_mapping = {'python': ('https://docs.python.org/2', None)}
language = None
master_doc = 'toc'
project = u'Mitogen'
pygments_style = 'sphinx'
release = VERSION
source_suffix = '.rst'
templates_path = ['_templates']
todo_include_todos = False
version = VERSION
rst_epilog = """
.. |mitogen_version| replace:: %(VERSION)s
.. |mitogen_url| replace:: `mitogen-%(VERSION)s.tar.gz <https://files.pythonhosted.org/packages/source/m/mitogen/mitogen-%(VERSION)s.tar.gz>`__
""" % locals()
| [
"sys.path.append"
] | [((22, 43), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (37, 43), False, 'import sys\n')] |
import pytest
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from core.models import Identity
from organizations.models import Organization
@pytest.fixture
def valid_identity():
id_string = '<EMAIL>'
user = get_user_model().objects.create(username='test', email=id_string)
Identity.objects.create(user=user, identity=id_string)
yield id_string
@pytest.fixture
def invalid_identity():
yield '<EMAIL>'
@pytest.fixture
def master_identity():
id_string = '<EMAIL>'
user = get_user_model().objects.create(username='master')
Identity.objects.create(user=user, identity=id_string)
user.organizations.add(
Organization.objects.get_or_create(
internal_id=settings.MASTER_ORGANIZATIONS[0],
defaults=dict(
ext_id=1235711,
parent=None,
ico='12345',
name_cs='šéf',
name_en='boss',
short_name='master',
),
)[0]
)
yield id_string
@pytest.fixture
def admin_identity():
id_string = '<EMAIL>'
user = get_user_model().objects.create(username='admin', is_superuser=True)
Identity.objects.create(user=user, identity=id_string)
yield id_string
@pytest.fixture
def authenticated_client(client, valid_identity):
client.defaults[settings.EDUID_IDENTITY_HEADER] = valid_identity
client.user = Identity.objects.get(identity=valid_identity).user
yield client
@pytest.fixture
def master_client(client, master_identity):
client.defaults[settings.EDUID_IDENTITY_HEADER] = master_identity
yield client
@pytest.fixture
def unauthenticated_client(client, invalid_identity):
client.defaults[settings.EDUID_IDENTITY_HEADER] = invalid_identity
yield client
@pytest.fixture
def authentication_headers():
def fn(identity):
return {settings.EDUID_IDENTITY_HEADER: identity}
return fn
@pytest.fixture
def site():
return Site.objects.get_or_create(
id=settings.SITE_ID, defaults={'name': 'Celus test', 'domain': 'test.celus.net'}
)[0]
__all__ = [
'admin_identity',
'master_client',
'master_identity',
'authentication_headers',
'authentication_headers',
'authenticated_client',
'unauthenticated_client',
'valid_identity',
'invalid_identity',
]
| [
"django.contrib.sites.models.Site.objects.get_or_create",
"django.contrib.auth.get_user_model",
"core.models.Identity.objects.get",
"core.models.Identity.objects.create"
] | [((366, 420), 'core.models.Identity.objects.create', 'Identity.objects.create', ([], {'user': 'user', 'identity': 'id_string'}), '(user=user, identity=id_string)\n', (389, 420), False, 'from core.models import Identity\n'), ((636, 690), 'core.models.Identity.objects.create', 'Identity.objects.create', ([], {'user': 'user', 'identity': 'id_string'}), '(user=user, identity=id_string)\n', (659, 690), False, 'from core.models import Identity\n'), ((1242, 1296), 'core.models.Identity.objects.create', 'Identity.objects.create', ([], {'user': 'user', 'identity': 'id_string'}), '(user=user, identity=id_string)\n', (1265, 1296), False, 'from core.models import Identity\n'), ((1472, 1517), 'core.models.Identity.objects.get', 'Identity.objects.get', ([], {'identity': 'valid_identity'}), '(identity=valid_identity)\n', (1492, 1517), False, 'from core.models import Identity\n'), ((2033, 2145), 'django.contrib.sites.models.Site.objects.get_or_create', 'Site.objects.get_or_create', ([], {'id': 'settings.SITE_ID', 'defaults': "{'name': 'Celus test', 'domain': 'test.celus.net'}"}), "(id=settings.SITE_ID, defaults={'name':\n 'Celus test', 'domain': 'test.celus.net'})\n", (2059, 2145), False, 'from django.contrib.sites.models import Site\n'), ((296, 312), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (310, 312), False, 'from django.contrib.auth import get_user_model\n'), ((581, 597), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (595, 597), False, 'from django.contrib.auth import get_user_model\n'), ((1169, 1185), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (1183, 1185), False, 'from django.contrib.auth import get_user_model\n')] |
#! /usr/bin/env python3
'''
Examine a CherryTree SQLite database and print out the tree in proper heirarchical form and sequence.
'''
import argparse
import colorama
from colorama import Fore, Back, Style
import sqlite3
from ct2ad import *
def print_xc_node(xc_node, level):
'''
Print the node information to the console in a nice format
'''
indent = '--' * level
s = get_expanded_child_seq(xc_node)
n = get_expanded_child_node(xc_node)
print(f'{Style.DIM}|{indent} {Style.NORMAL}{s:03}: {Style.BRIGHT+Fore.YELLOW}\'{get_node_name(n)}\' {Fore.RESET}{Style.DIM}: [node_id = {get_node_id(n)}]')
# setup argument parsing...
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('sqlite3_db', action='store')
args = parser.parse_args()
colorama.init(autoreset=True)
# load the database and party on!
con = sqlite3.connect(args.sqlite3_db)
sql_get_tables(con)
# all_nodes are a dict with each key being the unique node_id
all_nodes = sql_get_all_nodes(con)
# all_children are a list of tuples
all_children = sql_get_all_children(con)
xc_roots = []
for child in all_children:
xc_root = expand_child(child, all_nodes)
if get_expanded_child_father(xc_root) == None: xc_roots.append(xc_root)
print()
count = 0
for xc_root in sorted(xc_roots, key=sequence_order):
count = count + 1
print_xc_node(xc_root, 0)
for xc, level in dig(xc_root, all_children, all_nodes, 1):
print_xc_node(xc, level)
count = count + 1
print(f'\n{count} nodes iterated over')
| [
"sqlite3.connect",
"argparse.ArgumentParser",
"colorama.init"
] | [((663, 707), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (686, 707), False, 'import argparse\n'), ((788, 817), 'colorama.init', 'colorama.init', ([], {'autoreset': '(True)'}), '(autoreset=True)\n', (801, 817), False, 'import colorama\n'), ((859, 891), 'sqlite3.connect', 'sqlite3.connect', (['args.sqlite3_db'], {}), '(args.sqlite3_db)\n', (874, 891), False, 'import sqlite3\n')] |
#!/usr/bin/env python
import sys
sys.path.append('/opt/lib/python2.7/site-packages/')
import math
import numpy as np
import pylab
import nest
import nest.raster_plot
import nest.voltage_trace
import nest.topology as tp
import ggplot
t_sim = 500
populations = [1, 100]
no_recurrent = True
neuron_model = 'iaf_psc_exp'
model_params = {
'tau_m': 10., # membrane time constant (ms)
'tau_syn_ex': 0.5, # excitatory synaptic time constant (ms)
'tau_syn_in': 0.5, # inhibitory synaptic time constant (ms)
't_ref': 2., # absolute refractory period (ms)
'E_L': -65., # resting membrane potential (mV)
'V_th': -50., # spike threshold (mV)
'C_m': 250., # membrane capacitance (pF)
'V_reset': -65. # reset potential (mV)
}
wt_e = 1.
extent = 1.
delay_max = 2.
delay_min = 1.
ac_amp = 3000.0
ac_freq = 4.0
nest.ResetKernel()
nest.SetKernelStatus({'local_num_threads': 8})
nest.CopyModel('iaf_psc_exp', 'exp_nrn', model_params)
layers = []
spike_detector = tp.CreateLayer({
'rows': 1,
'columns': 1,
'elements': 'spike_detector',
'extent': [extent, extent]
})
spike_detector_nrn = (spike_detector[0]+1,)
voltmeter = tp.CreateLayer({
'rows': 1,
'columns': 1,
'elements': 'voltmeter',
'extent': [extent, extent]
})
voltmeter_nrn = (voltmeter[0]+1,)
nest.CopyModel('ac_generator', 'ac', {'amplitude': ac_amp, 'frequency': ac_freq})
input = tp.CreateLayer({
'rows': 1,
'columns': 1,
'elements': 'ac',
'extent': [extent, extent]
})
for pop in populations:
rows = columns = int(math.sqrt(pop))
l = tp.CreateLayer({
'rows': rows,
'columns': columns,
'extent': [extent, extent],
'elements': 'exp_nrn',
# 'edge_wrap': False
})
layers.append(l)
conn = {
'connection_type': 'divergent',
'synapse_model': 'static_synapse',
'weights': {
'gaussian': {'p_center': wt_e, 'sigma': extent}
},
'delays': {
'uniform': { 'min': delay_min, 'max': delay_max }
}
}
for source in layers:
for target in layers:
if no_recurrent:
if source != target:
tp.ConnectLayers(source, target, conn)
else:
tp.ConnectLayers(source, target, conn)
for layer in layers:
tp.ConnectLayers(layer, spike_detector, {'connection_type': 'convergent'})
tp.ConnectLayers(voltmeter, layer, {'connection_type': 'divergent'})
tp.ConnectLayers(input, layers[0], {'connection_type': 'convergent'})
nest.Simulate(t_sim)
nest.raster_plot.from_device(spike_detector_nrn)
# nest.voltage_trace.from_device(voltmeter_nrn)
pylab.show()
| [
"nest.SetKernelStatus",
"nest.topology.ConnectLayers",
"nest.ResetKernel",
"math.sqrt",
"nest.raster_plot.from_device",
"nest.topology.CreateLayer",
"nest.CopyModel",
"nest.Simulate",
"sys.path.append",
"pylab.show"
] | [((34, 86), 'sys.path.append', 'sys.path.append', (['"""/opt/lib/python2.7/site-packages/"""'], {}), "('/opt/lib/python2.7/site-packages/')\n", (49, 86), False, 'import sys\n'), ((869, 887), 'nest.ResetKernel', 'nest.ResetKernel', ([], {}), '()\n', (885, 887), False, 'import nest\n'), ((888, 934), 'nest.SetKernelStatus', 'nest.SetKernelStatus', (["{'local_num_threads': 8}"], {}), "({'local_num_threads': 8})\n", (908, 934), False, 'import nest\n'), ((936, 990), 'nest.CopyModel', 'nest.CopyModel', (['"""iaf_psc_exp"""', '"""exp_nrn"""', 'model_params'], {}), "('iaf_psc_exp', 'exp_nrn', model_params)\n", (950, 990), False, 'import nest\n'), ((1022, 1125), 'nest.topology.CreateLayer', 'tp.CreateLayer', (["{'rows': 1, 'columns': 1, 'elements': 'spike_detector', 'extent': [extent,\n extent]}"], {}), "({'rows': 1, 'columns': 1, 'elements': 'spike_detector',\n 'extent': [extent, extent]})\n", (1036, 1125), True, 'import nest.topology as tp\n'), ((1189, 1287), 'nest.topology.CreateLayer', 'tp.CreateLayer', (["{'rows': 1, 'columns': 1, 'elements': 'voltmeter', 'extent': [extent, extent]}"], {}), "({'rows': 1, 'columns': 1, 'elements': 'voltmeter', 'extent':\n [extent, extent]})\n", (1203, 1287), True, 'import nest.topology as tp\n'), ((1329, 1414), 'nest.CopyModel', 'nest.CopyModel', (['"""ac_generator"""', '"""ac"""', "{'amplitude': ac_amp, 'frequency': ac_freq}"], {}), "('ac_generator', 'ac', {'amplitude': ac_amp, 'frequency':\n ac_freq})\n", (1343, 1414), False, 'import nest\n'), ((1420, 1512), 'nest.topology.CreateLayer', 'tp.CreateLayer', (["{'rows': 1, 'columns': 1, 'elements': 'ac', 'extent': [extent, extent]}"], {}), "({'rows': 1, 'columns': 1, 'elements': 'ac', 'extent': [\n extent, extent]})\n", (1434, 1512), True, 'import nest.topology as tp\n'), ((2354, 2423), 'nest.topology.ConnectLayers', 'tp.ConnectLayers', (['input', 'layers[0]', "{'connection_type': 'convergent'}"], {}), "(input, layers[0], {'connection_type': 'convergent'})\n", (2370, 2423), True, 'import nest.topology as tp\n'), ((2425, 2445), 'nest.Simulate', 'nest.Simulate', (['t_sim'], {}), '(t_sim)\n', (2438, 2445), False, 'import nest\n'), ((2447, 2495), 'nest.raster_plot.from_device', 'nest.raster_plot.from_device', (['spike_detector_nrn'], {}), '(spike_detector_nrn)\n', (2475, 2495), False, 'import nest\n'), ((2544, 2556), 'pylab.show', 'pylab.show', ([], {}), '()\n', (2554, 2556), False, 'import pylab\n'), ((1589, 1695), 'nest.topology.CreateLayer', 'tp.CreateLayer', (["{'rows': rows, 'columns': columns, 'extent': [extent, extent], 'elements':\n 'exp_nrn'}"], {}), "({'rows': rows, 'columns': columns, 'extent': [extent, extent\n ], 'elements': 'exp_nrn'})\n", (1603, 1695), True, 'import nest.topology as tp\n'), ((2207, 2281), 'nest.topology.ConnectLayers', 'tp.ConnectLayers', (['layer', 'spike_detector', "{'connection_type': 'convergent'}"], {}), "(layer, spike_detector, {'connection_type': 'convergent'})\n", (2223, 2281), True, 'import nest.topology as tp\n'), ((2284, 2352), 'nest.topology.ConnectLayers', 'tp.ConnectLayers', (['voltmeter', 'layer', "{'connection_type': 'divergent'}"], {}), "(voltmeter, layer, {'connection_type': 'divergent'})\n", (2300, 2352), True, 'import nest.topology as tp\n'), ((1567, 1581), 'math.sqrt', 'math.sqrt', (['pop'], {}), '(pop)\n', (1576, 1581), False, 'import math\n'), ((2143, 2181), 'nest.topology.ConnectLayers', 'tp.ConnectLayers', (['source', 'target', 'conn'], {}), '(source, target, conn)\n', (2159, 2181), True, 'import nest.topology as tp\n'), ((2088, 2126), 'nest.topology.ConnectLayers', 'tp.ConnectLayers', (['source', 'target', 'conn'], {}), '(source, target, conn)\n', (2104, 2126), True, 'import nest.topology as tp\n')] |