input stringlengths 2.65k 237k | output stringclasses 1 value |
|---|---|
import numpy as np
from tqdm.autonotebook import tqdm
import gc
import warnings
import sklearn.utils
_remove_cache = {}
def remove_retrain(nmask, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is retrained for each test sample with the important features set to a constant.
If you want to know how important a set of features is you can ask how the model would be
different if those features had never existed. To determine this we can mask those features
across the entire training and test datasets, then retrain the model. If we apply compare the
output of this retrained model to the original model we can see the effect produced by knowning
the features we masked. Since for individualized explanation methods each test sample has a
different set of most important features we need to retrain the model for every test sample
to get the change in model performance when a specified fraction of the most important features
are withheld.
"""
warnings.warn("The retrain based measures can incorrectly evaluate models in some cases!")
# see if we match the last cached call
global _remove_cache
args = (X_train, y_train, X_test, y_test, model_generator, metric)
cache_match = False
if "args" in _remove_cache:
if all(a is b for a,b in zip(_remove_cache["args"], args)) and np.all(_remove_cache["attr_test"] == attr_test):
cache_match = True
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# this is the model we will retrain many times
model_masked = model_generator()
# mask nmask top features and re-train the model for each test explanation
X_train_tmp = np.zeros(X_train.shape)
X_test_tmp = np.zeros(X_test.shape)
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1]) * 1e-6
last_nmask = _remove_cache.get("nmask", None)
last_yp_masked_test = _remove_cache.get("yp_masked_test", None)
for i in tqdm(range(len(y_test)), "Retraining for the 'remove' metric"):
if cache_match and last_nmask[i] == nmask[i]:
yp_masked_test[i] = last_yp_masked_test[i]
elif nmask[i] == 0:
yp_masked_test[i] = trained_model.predict(X_test[i:i+1])[0]
else:
# mask out the most important features for this test instance
X_train_tmp[:] = X_train
X_test_tmp[:] = X_test
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_train_tmp[:,ordering[:nmask[i]]] = X_train[:,ordering[:nmask[i]]].mean()
X_test_tmp[i,ordering[:nmask[i]]] = X_train[:,ordering[:nmask[i]]].mean()
# retrain the model and make a prediction
model_masked.fit(X_train_tmp, y_train)
yp_masked_test[i] = model_masked.predict(X_test_tmp[i:i+1])[0]
# save our results so the next call to us can be faster when there is redundancy
_remove_cache["nmask"] = nmask
_remove_cache["yp_masked_test"] = yp_masked_test
_remove_cache["attr_test"] = attr_test
_remove_cache["args"] = args
return metric(y_test, yp_masked_test)
def remove_mask(nmask, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" Each test sample is masked by setting the important features to a constant.
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# mask nmask top features for each test explanation
X_test_tmp = X_test.copy()
tie_breaking_noise = const_rand(X_train.shape[1], random_state) * 1e-6
mean_vals = X_train.mean(0)
for i in range(len(y_test)):
if nmask[i] > 0:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_test_tmp[i,ordering[:nmask[i]]] = mean_vals[ordering[:nmask[i]]]
yp_masked_test = trained_model.predict(X_test_tmp)
return metric(y_test, yp_masked_test)
def remove_impute(nmask, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is revaluated for each test sample with the important features set to an imputed value.
Note that the imputation is done using a multivariate normality assumption on the dataset. This depends on
being able to estimate the full data covariance matrix (and inverse) accuractly. So X_train.shape[0] should
be significantly bigger than X_train.shape[1].
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# keep nkeep top features for each test explanation
C = np.cov(X_train.T)
C += np.eye(C.shape[0]) * 1e-6
X_test_tmp = X_test.copy()
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1], random_state) * 1e-6
mean_vals = X_train.mean(0)
for i in range(len(y_test)):
if nmask[i] > 0:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
observe_inds = ordering[nmask[i]:]
impute_inds = ordering[:nmask[i]]
# impute missing data assuming it follows a multivariate normal distribution
Coo_inv = np.linalg.inv(C[observe_inds,:][:,observe_inds])
Cio = C[impute_inds,:][:,observe_inds]
impute = mean_vals[impute_inds] + Cio @ Coo_inv @ (X_test[i, observe_inds] - mean_vals[observe_inds])
X_test_tmp[i, impute_inds] = impute
yp_masked_test = trained_model.predict(X_test_tmp)
return metric(y_test, yp_masked_test)
def remove_resample(nmask, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is revaluated for each test sample with the important features set to resample background values.
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# how many samples to take
nsamples = 100
# keep nkeep top features for each test explanation
N,M = X_test.shape
X_test_tmp = np.tile(X_test, [1, nsamples]).reshape(nsamples * N, M)
tie_breaking_noise = const_rand(M) * 1e-6
inds = sklearn.utils.resample(np.arange(N), n_samples=nsamples, random_state=random_state)
for i in range(N):
if nmask[i] > 0:
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_test_tmp[i*nsamples:(i+1)*nsamples, ordering[:nmask[i]]] = X_train[inds, :][:, ordering[:nmask[i]]]
yp_masked_test = trained_model.predict(X_test_tmp)
yp_masked_test = np.reshape(yp_masked_test, (N, nsamples)).mean(1) # take the mean output over all samples
return metric(y_test, yp_masked_test)
def batch_remove_retrain(nmask_train, nmask_test, X_train, y_train, X_test, y_test, attr_train, attr_test, model_generator, metric):
""" An approximation of holdout that only retraines the model once.
This is alse called ROAR (RemOve And Retrain) in work by Google. It is much more computationally
efficient that the holdout method because it masks the most important features in every sample
and then retrains the model once, instead of retraining the model for every test sample like
the holdout metric.
"""
warnings.warn("The retrain based measures can incorrectly evaluate models in some cases!")
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# mask nmask top features for each explanation
X_train_tmp = X_train.copy()
X_train_mean = X_train.mean(0)
tie_breaking_noise = const_rand(X_train.shape[1]) * 1e-6
for i in range(len(y_train)):
if nmask_train[i] > 0:
ordering = np.argsort(-attr_train[i, :] + tie_breaking_noise)
X_train_tmp[i, ordering[:nmask_train[i]]] = X_train_mean[ordering[:nmask_train[i]]]
X_test_tmp = X_test.copy()
for i in range(len(y_test)):
if nmask_test[i] > 0:
ordering = np.argsort(-attr_test[i, :] + tie_breaking_noise)
X_test_tmp[i, ordering[:nmask_test[i]]] = X_train_mean[ordering[:nmask_test[i]]]
# train the model with all the given features masked
model_masked = model_generator()
model_masked.fit(X_train_tmp, y_train)
yp_test_masked = model_masked.predict(X_test_tmp)
return metric(y_test, yp_test_masked)
_keep_cache = {}
def keep_retrain(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state):
""" The model is retrained for each test sample with the non-important features set to a constant.
If you want to know how important a set of features is you can ask how the model would be
different if only those features had existed. To determine this we can mask the other features
across the entire training and test datasets, then retrain the model. If we apply compare the
output of this retrained model to the original model we can see the effect produced by only
knowning the important features. Since for individualized explanation methods each test sample
has a different set of most important features we need to retrain the model for every test sample
to get the change in model performance when a specified fraction of the most important features
are retained.
"""
warnings.warn("The retrain based measures can incorrectly evaluate models in some cases!")
# see if we match the last cached call
global _keep_cache
args = (X_train, y_train, X_test, y_test, model_generator, metric)
cache_match = False
if "args" in _keep_cache:
if all(a is b for a,b in zip(_keep_cache["args"], args)) and np.all(_keep_cache["attr_test"] == attr_test):
cache_match = True
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# this is the model we will retrain many times
model_masked = model_generator()
# keep nkeep top features and re-train the model for each test explanation
X_train_tmp = np.zeros(X_train.shape)
X_test_tmp = np.zeros(X_test.shape)
yp_masked_test = np.zeros(y_test.shape)
tie_breaking_noise = const_rand(X_train.shape[1]) * 1e-6
last_nkeep = _keep_cache.get("nkeep", None)
last_yp_masked_test = _keep_cache.get("yp_masked_test", None)
for i in tqdm(range(len(y_test)), "Retraining for the 'keep' metric"):
if cache_match and last_nkeep[i] == nkeep[i]:
yp_masked_test[i] = last_yp_masked_test[i]
elif nkeep[i] == attr_test.shape[1]:
yp_masked_test[i] = trained_model.predict(X_test[i:i+1])[0]
else:
# mask out the most important features for this test instance
X_train_tmp[:] = X_train
X_test_tmp[:] = X_test
ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise)
X_train_tmp[:,ordering[nkeep[i]:]] = X_train[:,ordering[nkeep[i]:]].mean()
X_test_tmp[i,ordering[nkeep[i]:]] = X_train[:,ordering[nkeep[i]:]].mean()
# retrain the model and make a prediction
model_masked.fit(X_train_tmp, y_train)
yp_masked_test[i] = model_masked.predict(X_test_tmp[i:i+1])[0]
# save our results so the next call to us can be faster when there is redundancy
_keep_cache["nkeep"] = nkeep
_keep_cache["yp_masked_test"] = yp_masked_test
| |
import numpy as np
import torch
import torch.nn.functional as F
import os
from skimage.io import imsave
import cv2
from PIL import Image
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
def dict2obj(d):
if isinstance(d, list):
d = [dict2obj(x) for x in d]
if not isinstance(d, dict):
return d
class C(object):
pass
o = C()
for k in d:
o.__dict__[k] = dict2obj(d[k])
return o
def check_mkdir(path):
if not os.path.exists(path):
print('making %s' % path)
os.makedirs(path)
def l2_distance(verts1, verts2):
return torch.sqrt(((verts1 - verts2) ** 2).sum(2)).mean(1).mean()
def quat2mat(quat):
"""Convert quaternion coefficients to rotation matrix.
Args:
quat: size = [B, 4] 4 <===>(w, x, y, z)
Returns:
Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]
"""
norm_quat = quat
norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True)
w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:, 2], norm_quat[:, 3]
B = quat.size(0)
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w * x, w * y, w * z
xy, xz, yz = x * y, x * z, y * z
rotMat = torch.stack([w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz,
2 * wz + 2 * xy, w2 - x2 + y2 - z2, 2 * yz - 2 * wx,
2 * xz - 2 * wy, 2 * wx + 2 * yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)
return rotMat
def batch_rodrigues(theta):
# theta N x 3
batch_size = theta.shape[0]
l1norm = torch.norm(theta + 1e-8, p=2, dim=1)
angle = torch.unsqueeze(l1norm, -1)
normalized = torch.div(theta, angle)
angle = angle * 0.5
v_cos = torch.cos(angle)
v_sin = torch.sin(angle)
quat = torch.cat([v_cos, v_sin * normalized], dim=1)
return quat2mat(quat)
def batch_orth_proj(X, camera):
'''
X is N x num_points x 3
'''
camera = camera.clone().view(-1, 1, 3)
X_trans = X[:, :, :2] + camera[:, :, 1:]
X_trans = torch.cat([X_trans, X[:, :, 2:]], 2)
shape = X_trans.shape
# Xn = (camera[:, :, 0] * X_trans.view(shape[0], -1)).view(shape)
Xn = (camera[:, :, 0:1] * X_trans)
return Xn
def batch_persp_proj(vertices, cam, f, t, orig_size=256, eps=1e-9):
'''
Calculate projective transformation of vertices given a projection matrix
Input parameters:
f: torch tensor of focal length
t: batch_size * 1 * 3 xyz translation in world coordinate
K: batch_size * 3 * 3 intrinsic camera matrix
R, t: batch_size * 3 * 3, batch_size * 1 * 3 extrinsic calibration parameters
dist_coeffs: vector of distortion coefficients
orig_size: original size of image captured by the camera
Returns: For each point [X,Y,Z] in world coordinates [u,v,z] where u,v are the coordinates of the projection in
pixels and z is the depth
'''
device = vertices.device
K = torch.tensor([f, 0., cam['c'][0], 0., f, cam['c'][1], 0., 0., 1.]).view(3, 3)[None, ...].repeat(
vertices.shape[0], 1).to(device)
R = batch_rodrigues(cam['r'][None, ...].repeat(vertices.shape[0], 1)).to(device)
dist_coeffs = cam['k'][None, ...].repeat(vertices.shape[0], 1).to(device)
vertices = torch.matmul(vertices, R.transpose(2, 1)) + t
x, y, z = vertices[:, :, 0], vertices[:, :, 1], vertices[:, :, 2]
x_ = x / (z + eps)
y_ = y / (z + eps)
# Get distortion coefficients from vector
k1 = dist_coeffs[:, None, 0]
k2 = dist_coeffs[:, None, 1]
p1 = dist_coeffs[:, None, 2]
p2 = dist_coeffs[:, None, 3]
k3 = dist_coeffs[:, None, 4]
# we use x_ for x' and x__ for x'' etc.
r = torch.sqrt(x_ ** 2 + y_ ** 2)
x__ = x_ * (1 + k1 * (r ** 2) + k2 * (r ** 4) + k3 * (r ** 6)) + 2 * p1 * x_ * y_ + p2 * (r ** 2 + 2 * x_ ** 2)
y__ = y_ * (1 + k1 * (r ** 2) + k2 * (r ** 4) + k3 * (r ** 6)) + p1 * (r ** 2 + 2 * y_ ** 2) + 2 * p2 * x_ * y_
vertices = torch.stack([x__, y__, torch.ones_like(z)], dim=-1)
vertices = torch.matmul(vertices, K.transpose(1, 2))
u, v = vertices[:, :, 0], vertices[:, :, 1]
v = orig_size - v
# map u,v from [0, img_size] to [-1, 1] to be compatible with the renderer
u = 2 * (u - orig_size / 2.) / orig_size
v = 2 * (v - orig_size / 2.) / orig_size
vertices = torch.stack([u, v, z], dim=-1)
return vertices
def face_vertices(vertices, faces):
"""
:param vertices: [batch size, number of vertices, 3]
:param faces: [batch size, number of faces, 3]
:return: [batch size, number of faces, 3, 3]
"""
assert (vertices.ndimension() == 3)
assert (faces.ndimension() == 3)
assert (vertices.shape[0] == faces.shape[0])
assert (vertices.shape[2] == 3)
assert (faces.shape[2] == 3)
bs, nv = vertices.shape[:2]
bs, nf = faces.shape[:2]
device = vertices.device
faces = faces + (torch.arange(bs, dtype=torch.int32).to(device) * nv)[:, None, None]
vertices = vertices.reshape((bs * nv, 3))
# pytorch only supports long and byte tensors for indexing
return vertices[faces.long()]
def vertex_normals(vertices, faces):
"""
:param vertices: [batch size, number of vertices, 3]
:param faces: [batch size, number of faces, 3]
:return: [batch size, number of vertices, 3]
"""
assert (vertices.ndimension() == 3)
assert (faces.ndimension() == 3)
assert (vertices.shape[0] == faces.shape[0])
assert (vertices.shape[2] == 3)
assert (faces.shape[2] == 3)
bs, nv = vertices.shape[:2]
bs, nf = faces.shape[:2]
device = vertices.device
normals = torch.zeros(bs * nv, 3).to(device)
faces = faces + (torch.arange(bs, dtype=torch.int32).to(device) * nv)[:, None, None] # expanded faces
vertices_faces = vertices.reshape((bs * nv, 3))[faces.long()]
faces = faces.view(-1, 3)
vertices_faces = vertices_faces.view(-1, 3, 3)
normals.index_add_(0, faces[:, 1].long(),
torch.cross(vertices_faces[:, 2] - vertices_faces[:, 1], vertices_faces[:, 0] - vertices_faces[:, 1]))
normals.index_add_(0, faces[:, 2].long(),
torch.cross(vertices_faces[:, 0] - vertices_faces[:, 2], vertices_faces[:, 1] - vertices_faces[:, 2]))
normals.index_add_(0, faces[:, 0].long(),
torch.cross(vertices_faces[:, 1] - vertices_faces[:, 0], vertices_faces[:, 2] - vertices_faces[:, 0]))
normals = F.normalize(normals, eps=1e-6, dim=1)
normals = normals.reshape((bs, nv, 3))
# pytorch only supports long and byte tensors for indexing
return normals
def tensor_vis_landmarks(images, landmarks, gt_landmarks=None, color='g', isScale=True):
# visualize landmarks
vis_landmarks = []
images = images.cpu().numpy()
predicted_landmarks = landmarks.detach().cpu().numpy()
if gt_landmarks is not None:
gt_landmarks_np = gt_landmarks.detach().cpu().numpy()
for i in range(images.shape[0]):
image = images[i]
image = image.transpose(1, 2, 0)[:, :, [2, 1, 0]].copy();
image = (image * 255)
if isScale:
predicted_landmark = predicted_landmarks[i] * image.shape[0] / 2 + image.shape[0] / 2
else:
predicted_landmark = predicted_landmarks[i]
if predicted_landmark.shape[0] == 68:
image_landmarks = plot_kpts(image, predicted_landmark, color)
if gt_landmarks is not None:
image_landmarks = plot_verts(image_landmarks,
gt_landmarks_np[i] * image.shape[0] / 2 + image.shape[0] / 2, 'r')
else:
image_landmarks = plot_verts(image, predicted_landmark, color)
if gt_landmarks is not None:
image_landmarks = plot_verts(image_landmarks,
gt_landmarks_np[i] * image.shape[0] / 2 + image.shape[0] / 2, 'r')
vis_landmarks.append(image_landmarks)
vis_landmarks = np.stack(vis_landmarks)
vis_landmarks = torch.from_numpy(
vis_landmarks[:, :, :, [2, 1, 0]].transpose(0, 3, 1, 2)) / 255. # , dtype=torch.float32)
return vis_landmarks
end_list = np.array([17, 22, 27, 42, 48, 31, 36, 68], dtype = np.int32) - 1
def plot_kpts(image, kpts, color = 'r'):
''' Draw 68 key points
Args:
image: the input image
kpt: (68, 3).
'''
if color == 'r':
c = (255, 0, 0)
elif color == 'g':
c = (0, 255, 0)
elif color == 'b':
c = (255, 0, 0)
image = image.copy()
kpts = kpts.copy()
for i in range(kpts.shape[0]):
st = kpts[i, :2]
if kpts.shape[1]==4:
if kpts[i, 3] > 0.5:
c = (0, 255, 0)
else:
c = (0, 0, 255)
image = cv2.circle(image,(int(st[0]), int(st[1])), 1, c, 2)
if i in end_list:
continue
ed = kpts[i + 1, :2]
image = cv2.line(image, (int(st[0]), int(st[1])), (int(ed[0]), int(ed[1])), (255, 255, 255), 1)
return image
def save_obj(filename, vertices, faces, textures=None, uvcoords=None, uvfaces=None, texture_type='surface'):
assert vertices.ndimension() == 2
assert faces.ndimension() == 2
assert texture_type in ['surface', 'vertex']
# assert texture_res >= 2
if textures is not None and texture_type == 'surface':
textures =textures.detach().cpu().numpy().transpose(1,2,0)
filename_mtl = filename[:-4] + '.mtl'
filename_texture = filename[:-4] + '.png'
material_name = 'material_1'
# texture_image, vertices_textures = create_texture_image(textures, texture_res)
texture_image = textures
texture_image = texture_image.clip(0, 1)
texture_image = (texture_image * 255).astype('uint8')
imsave(filename_texture, texture_image)
faces = faces.detach().cpu().numpy()
with open(filename, 'w') as f:
f.write('# %s\n' % os.path.basename(filename))
f.write('#\n')
| |
# Copyright (c) 2014, <NAME>
#
# See the LICENSE file for legal information regarding use of this file.
"""Implementation of the TLS Record Layer protocol"""
import socket
import errno
from .utils import tlshashlib as hashlib
from .constants import ContentType, CipherSuite
from .messages import RecordHeader3, RecordHeader2, Message
from .utils.cipherfactory import createAESGCM, createAES, createRC4, \
createTripleDES, createCHACHA20
from .utils.codec import Parser, Writer
from .utils.compat import compatHMAC
from .utils.cryptomath import getRandomBytes, MD5
from .utils.constanttime import ct_compare_digest, ct_check_cbc_mac_and_pad
from .errors import TLSRecordOverflow, TLSIllegalParameterException,\
TLSAbruptCloseError, TLSDecryptionFailed, TLSBadRecordMAC
from .mathtls import createMAC_SSL, createHMAC, PRF_SSL, PRF, PRF_1_2, \
PRF_1_2_SHA384
class RecordSocket(object):
"""Socket wrapper for reading and writing TLS Records"""
def __init__(self, sock):
"""
Assign socket to wrapper
:type sock: socket.socket
"""
self.sock = sock
self.version = (0, 0)
def _sockSendAll(self, data):
"""
Send all data through socket
:type data: bytearray
:param data: data to send
:raises socket.error: when write to socket failed
"""
while 1:
try:
bytesSent = self.sock.send(data)
except socket.error as why:
if why.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
yield 1
continue
raise
if bytesSent == len(data):
return
data = data[bytesSent:]
yield 1
def send(self, msg, padding=0):
"""
Send the message through socket.
:type msg: bytearray
:param msg: TLS message to send
:type padding: int
:param padding: amount of padding to specify for SSLv2
:raises socket.error: when write to socket failed
"""
data = msg.write()
if self.version in ((2, 0), (0, 2)):
header = RecordHeader2().create(len(data),
padding)
else:
header = RecordHeader3().create(self.version,
msg.contentType,
len(data))
data = header.write() + data
for result in self._sockSendAll(data):
yield result
def _sockRecvAll(self, length):
"""
Read exactly the amount of bytes specified in L{length} from raw socket.
:rtype: generator
:returns: generator that will return 0 or 1 in case the socket is non
blocking and would block and bytearray in case the read finished
:raises TLSAbruptCloseError: when the socket closed
"""
buf = bytearray(0)
if length == 0:
yield buf
while True:
try:
socketBytes = self.sock.recv(length - len(buf))
except socket.error as why:
if why.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
yield 0
continue
else:
raise
#if the connection closed, raise socket error
if len(socketBytes) == 0:
raise TLSAbruptCloseError()
buf += bytearray(socketBytes)
if len(buf) == length:
yield buf
def _recvHeader(self):
"""Read a single record header from socket"""
#Read the next record header
buf = bytearray(0)
ssl2 = False
result = None
for result in self._sockRecvAll(1):
if result in (0, 1):
yield result
else: break
assert result is not None
buf += result
if buf[0] in ContentType.all:
ssl2 = False
# SSLv3 record layer header is 5 bytes long, we already read 1
result = None
for result in self._sockRecvAll(4):
if result in (0, 1):
yield result
else: break
assert result is not None
buf += result
else:
# if header has no pading the header is 2 bytes long, 3 otherwise
# at the same time we already read 1 byte
ssl2 = True
if buf[0] & 0x80:
readLen = 1
else:
readLen = 2
result = None
for result in self._sockRecvAll(readLen):
if result in (0, 1):
yield result
else: break
assert result is not None
buf += result
#Parse the record header
if ssl2:
record = RecordHeader2().parse(Parser(buf))
# padding can't be longer than overall length and if it is present
# the overall size must be a multiple of cipher block size
if ((record.padding > record.length) or
(record.padding and record.length % 8)):
raise TLSIllegalParameterException(\
"Malformed record layer header")
else:
record = RecordHeader3().parse(Parser(buf))
yield record
def recv(self):
"""
Read a single record from socket, handle SSLv2 and SSLv3 record layer
:rtype: generator
:returns: generator that returns 0 or 1 in case the read would be
blocking or a tuple containing record header (object) and record
data (bytearray) read from socket
:raises socket.error: In case of network error
:raises TLSAbruptCloseError: When the socket was closed on the other
side in middle of record receiving
:raises TLSRecordOverflow: When the received record was longer than
allowed by TLS
:raises TLSIllegalParameterException: When the record header was
malformed
"""
record = None
for record in self._recvHeader():
if record in (0, 1):
yield record
else: break
assert record is not None
#Check the record header fields
# 18432 = 2**14 (basic record size limit) + 1024 (maximum compression
# overhead) + 1024 (maximum encryption overhead)
if record.length > 18432:
raise TLSRecordOverflow()
#Read the record contents
buf = bytearray(0)
result = None
for result in self._sockRecvAll(record.length):
if result in (0, 1):
yield result
else: break
assert result is not None
buf += result
yield (record, buf)
class ConnectionState(object):
"""Preserve the connection state for reading and writing data to records"""
def __init__(self):
"""Create an instance with empty encryption and MACing contexts"""
self.macContext = None
self.encContext = None
self.fixedNonce = None
self.seqnum = 0
def getSeqNumBytes(self):
"""Return encoded sequence number and increment it."""
writer = Writer()
writer.add(self.seqnum, 8)
self.seqnum += 1
return writer.bytes
class RecordLayer(object):
"""
Implementation of TLS record layer protocol
:ivar version: the TLS version to use (tuple encoded as on the wire)
:ivar sock: underlying socket
:ivar client: whether the connection should use encryption
:ivar encryptThenMAC: use the encrypt-then-MAC mechanism for record
integrity
:ivar handshake_finished: used in SSL2, True if handshake protocol is over
"""
def __init__(self, sock):
self.sock = sock
self._recordSocket = RecordSocket(sock)
self._version = (0, 0)
self.client = True
self._writeState = ConnectionState()
self._readState = ConnectionState()
self._pendingWriteState = ConnectionState()
self._pendingReadState = ConnectionState()
self.fixedIVBlock = None
self.encryptThenMAC = False
self.handshake_finished = False
@property
def blockSize(self):
"""Return the size of block used by current symmetric cipher (R/O)"""
return self._writeState.encContext.block_size
@property
def version(self):
"""Return the TLS version used by record layer"""
return self._version
@version.setter
def version(self, val):
"""Set the TLS version used by record layer"""
self._version = val
self._recordSocket.version = val
def getCipherName(self):
"""
Return the name of the bulk cipher used by this connection
:rtype: str
:returns: The name of the cipher, like 'aes128', 'rc4', etc.
"""
if self._writeState.encContext is None:
return None
return self._writeState.encContext.name
def getCipherImplementation(self):
"""
Return the name of the implementation used for the connection
'python' for tlslite internal implementation, 'openssl' for M2crypto
and 'pycrypto' for pycrypto
:rtype: str
:returns: Name of cipher implementation used, None if not initialised
"""
if self._writeState.encContext is None:
return None
return self._writeState.encContext.implementation
def shutdown(self):
"""Clear read and write states"""
self._writeState = ConnectionState()
self._readState = ConnectionState()
self._pendingWriteState = ConnectionState()
self._pendingReadState = ConnectionState()
def isCBCMode(self):
"""Returns true if cipher uses CBC mode"""
if self._writeState and self._writeState.encContext and \
self._writeState.encContext.isBlockCipher:
return True
else:
return False
#
# sending messages
#
def addPadding(self, data):
"""Add padding to data so that it is multiple of block size"""
currentLength = len(data)
blockLength = self.blockSize
paddingLength = blockLength - 1 - (currentLength % blockLength)
paddingBytes = bytearray([paddingLength] * (paddingLength+1))
data += paddingBytes
return data
def calculateMAC(self, mac, seqnumBytes, contentType, data):
"""Calculate the SSL/TLS version of a MAC"""
mac.update(compatHMAC(seqnumBytes))
mac.update(compatHMAC(bytearray([contentType])))
assert self.version in ((3, 0), (3, 1), (3, 2), (3, 3))
if self.version != (3, 0):
mac.update(compatHMAC(bytearray([self.version[0]])))
mac.update(compatHMAC(bytearray([self.version[1]])))
mac.update(compatHMAC(bytearray([len(data)//256])))
mac.update(compatHMAC(bytearray([len(data)%256])))
mac.update(compatHMAC(data))
return bytearray(mac.digest())
def _macThenEncrypt(self, data, contentType):
"""MAC, pad then encrypt data"""
if self._writeState.macContext:
seqnumBytes = self._writeState.getSeqNumBytes()
mac = self._writeState.macContext.copy()
macBytes = self.calculateMAC(mac, seqnumBytes, contentType, data)
data += macBytes
#Encrypt for Block or Stream Cipher
if self._writeState.encContext:
#Add padding (for Block Cipher):
if self._writeState.encContext.isBlockCipher:
#Add TLS 1.1 fixed block
if self.version >= (3, 2):
data = self.fixedIVBlock + data
data = self.addPadding(data)
#Encrypt
data = self._writeState.encContext.encrypt(data)
return data
def _encryptThenMAC(self, buf, contentType):
"""Pad, encrypt and then MAC the data"""
if self._writeState.encContext:
# add IV for TLS1.1+
if self.version >= (3, 2):
buf = self.fixedIVBlock + buf
buf = self.addPadding(buf)
buf = self._writeState.encContext.encrypt(buf)
# add MAC
if self._writeState.macContext:
seqnumBytes = self._writeState.getSeqNumBytes()
mac = self._writeState.macContext.copy()
# append MAC
macBytes = self.calculateMAC(mac, seqnumBytes, contentType, buf)
buf += macBytes
return buf
@staticmethod
def _getNonce(state, seqnum):
"""Calculate a nonce for a given enc/dec context"""
# ChaCha is using the draft-TLS1.3-like nonce derivation
if state.encContext.name == "chacha20-poly1305" and \
len(state.fixedNonce) == 12:
# 4 byte nonce is used by the draft cipher
nonce = bytearray(i ^ j for i, | |
<filename>rig/rigbase.py
'''Rig base classes.
These classes outline a build process that consists of a preparation phase
(begin), the building of the rig (build), and a cleanup (end) phase. They also
implement all the helper methods required to make that process consistent,
while allowing you to customize rig setup to suit your facility.
Ideally, after extending these or the other base classes to conform to your pipeline,
your character TDs need only to write a subclass that implements the 'build' method.
This script is their 'character rig', and should be run any time a rig change is needed.
To build a rig their sub class should be instanced in Maya and a call made to
the .create() method.
'''
import os
import getpass
import logging
import maya.cmds as cmds
import mpyr.lib.dag as mpDag
import mpyr.lib.ctrl as mpCtrl
import mpyr.lib.attr as mpAttr
import mpyr.lib.name as mpName
import mpyr.lib.rig as mpRig
import mpyr.lib.cache as mpCache
import mpyr.rig.limb.generic as limbGen
RIGLOG = logging.getLogger('rig')
class Rig(object):
'''This is a virtual base class for all other rigs, and is where most of the generic
rigging code is implemented. Actual rigs should inherit from a more specific subclass
such as AnimRig
Attrs:
- rigScale: Used by various functions to scale cosmetic appearances. Defaults to 1.
- limbs: a list populated during build of all limbs in the rig. The first limb
is always the world offset.
- rigNode: the top group node of the rig, created during build.
- rigName: the default name of the rig, set either by the TD or by setRigNameDefault()
- rootJoint: the rootJoint of the rig's skeleton, set automatically when importSkeleton
is called.
- geoPath: An optional path used to import a geometry.ma file during build. Set by a
TD before build.
- weightPath: an optional path to import deformer weights. Set by a TD before build.
- skeletonPath: a path to the skeleton. Set by a TD before build.
- rigVersion: an optional attribute that will be stored on the rig for bookkeeping.
Set by a TD before build.
'''
def __init__(self):
object.__init__(self)
RIGLOG.debug('init')
#This is a cosmetic setting for ctrl sizes and whatnot.
self.rigScale = 1
#this list gets appended to when limbs are added
self.limbs = []
#These properties are filled in as the rig builds
self.rigNode = None #the top transform for the rig, all things parented under this
self.rigName = None #the name of the rig. Default set in setRigNameDefault
self.rootJoint = '' #the root joint of the skeleton
self.limbNode = None #The transform that limbs are parented under
self.geoNode = None #The transform that geometry is parented under
self.skeletonNode = None #The transform that the skeleton is parented under
self.masterSet = None #The top level object set
self.ctrlSet = None #The object set that will hold all the ctrls that are built
self.cacheSet = None #The object set that will hold all cacheable nodes
self.loadSet = None #The object set that will hold all nodes that can receive cache
#Attrs set before build, used to import files/weights/etc. Can be set automatically based
#on pipeline standards or a database.
self.geoPath = ''
self.weightPath = ''
self.skeletonPath = ''
self.rigVersion = '' #bookkeeping for pipeline, stored as an attr on the rigNode. Set if
def __repr__(self):
return '%s %s' % (self.__class__.__name__, self.rigName)
def create(self):
'''Builds the rig by calling the creation methods in the correct
order.
'''
RIGLOG.info('beginning rig build')
self.begin()
RIGLOG.info('building rig')
self.build()
RIGLOG.info('ending build')
self.end()
RIGLOG.info('rig complete')
def begin(self):
'''Pre build actions'''
cmds.file(new=True, f=True)
RIGLOG.debug('making rig nodes')
self.setRigNameDefault()
self.addRigNode()
self.addMasterSet()
#Make some top level nodes
self.limbNode = cmds.createNode('transform', n='limbs', p=self.rigNode)
self.geoNode = cmds.createNode('transform', n='geo', p=self.rigNode)
self.skeletonNode = cmds.createNode('transform', n='skel', p=self.rigNode)
# Setup visibility attrs
plumin = cmds.createNode('plusMinusAverage', n=self.rigNode+'Vis_plumin')
#This kind of geo/skeleton/mesh hiding or showing mechanism is optional.
#Some animators like it, some prefer ctrl+h, some prefer layers, etc.
#geo
geoattr = self.rigNode+'.geoVis'
cmds.addAttr(self.rigNode, ln='geoVis', at='enum', en='off:on:template:reference:seg', k=True,dv=1 )
cmds.setAttr( self.geoNode+'.overrideEnabled', 1 )
cmds.connectAttr( geoattr, self.geoNode+'.overrideVisibility' )
cmds.setAttr( plumin+'.operation', 2 )
cmds.setAttr( plumin+'.input1D[1]', 1 )
cmds.connectAttr( geoattr, plumin+'.input1D[0]' )
cmds.connectAttr( plumin+'.output1D', self.geoNode+'.overrideDisplayType' )
cmds.setAttr(geoattr, 1 )
# rig
rigattr = self.rigNode+'.rigVis'
cmds.addAttr(self.rigNode, ln='rigVis', at='long', max=1, min=0, k=True,dv=1)
cmds.setAttr(rigattr, 1)
cmds.setAttr(self.limbNode+'.overrideEnabled', 1)
cmds.connectAttr(rigattr, self.limbNode+'.overrideVisibility')
# skel
skelattr = self.rigNode+'.skelVis'
cmds.addAttr(self.rigNode, ln='skelVis', at='long', max=1, min=0, k=True,dv=1)
cmds.setAttr(self.skeletonNode+'.overrideEnabled', 1)
cmds.connectAttr(skelattr, self.skeletonNode+'.overrideVisibility')
#create the world offset
offset = limbGen.WorldOffset()
self.addLimb(offset)
# rig book keeping attrs.
# these can be useful later when debugging scenes, to know
# where things came from and when.
cmds.addAttr(self.rigNode,
ln='rigVersion',
dt='string',
keyable=False
)
cmds.setAttr(self.rigNode+'.rigVersion', self.rigVersion, type='string')
cmds.addAttr(self.rigNode,
ln='buildDate',
dt='string',
keyable=False
)
cmds.setAttr(self.rigNode+'.buildDate', cmds.date(), type='string')
cmds.addAttr(self.rigNode,
ln='builtBy',
dt='string',
keyable=False
)
cmds.setAttr(self.rigNode+'.builtBy', getpass.getuser(), type='string')
def build(self):
'''All build actions. This is a virtual method, it should be implemented by
rig build classes that inherit from this object'''
raise NotImplementedError("You must implement a 'build' method in your Rig class")
def end(self):
'''Post build actions'''
#Add object sets for caching
self.addCacheSet()
self.addLoadSet()
#Add object sets used for animation tools
self.addLimbSets()
self.addAllCtrlSet()
#lock and cleanup
self.lock()
self.cleanupDanglingLimbs()
#add mirror info to every ctrl since we are in root pose now
for limbObj in self.limbs:
for ctrlNode in limbObj.ctrls:
mpRig.addMirrorInfo(ctrlNode)
def setRigNameDefault(self):
'''If nothing has set rigName this sets it to the class name. Override this function if
you want the default name for your studio's rigs to be different.
'''
if not self.rigName:
default = self.__class__.__name__
RIGLOG.warning('rig name not set, using default class name %s', default)
self.rigName = default
def addRigNode(self):
'''Creates the top level group node for a rig. All other rig DAG nodes will be
parented underneath this node. When this node is made it is set to self.rigNode.
This node is named after the rig.rigName attribute.
'''
if cmds.objExists(self.rigName):
raise RuntimeError('Could not create main rig node. Node of name %s already exists.', self.rigName)
self.rigNode = cmds.createNode('transform',n=self.rigName)
mpAttr.addAttrSwitch(self.rigNode+'.isRig',keyable=False,type='bool',value=1)
RIGLOG.debug('created root node %s', self.rigNode)
return self.rigNode
def importSkeleton(self):
'''Import the file specified with .skeletonPath'''
self.getFile(self.skeletonPath,underGroup=True,underGroupName=self.skeletonNode)
#set root joint attr if not yet set
if not self.rootJoint:
children = cmds.listRelatives(self.skeletonNode,type='joint') or []
for child in children:
if mpName.ROOTJOINT in child.lower():
self.rootJoint = child
break
if not self.rootJoint:
RIGLOG.warning('root joint not found in imported skeleton %s', self.skeletonPath)
def importGeo(self):
'''Import the file specified with .geoPath'''
if self.geoPath:
self.getFile(self.geoPath,underGroup=True,underGroupName=self.geoNode)
def getFile(self,fileName,underGroup=True,underGroupName=None):
'''Manage file imports in Maya. Given a path, import that file. This is also the
place to implement resource types. For example, if you want users to be able to call
getFile('skeleton'), implement the code here that finds skeleton files in your
production database.
By default this method groups the imported nodes, which it puts under the rig group.
The underGroupName flag can be used to override the name of this new group,
otherwise the file name is used. If it is set to False then no group is made. The
new nodes are still parented under the main rig node.
If no group is made the root nodes are returned. If a group is made the group is
returned.
'''
RIGLOG.info('importing file %s', fileName)
if not os.path.exists(fileName):
raise IOError('File path %s not found, cannot getFile', fileName)
fullPath = fileName
filePath,fileName = os.path.split(fullPath)
splitFileName = fileName.split('.')
fileName = '.'.join(splitFileName[:-1]) #some people use dots in fileNames a lot
#Import file
nodeList = self.fileImport(fullPath)
rootNodes = mpDag.getRootNodes(nodeList)
RIGLOG.debug('file import done, %s new root nodes', len(rootNodes))
#if underGroup=False simply return a list of new nodes that are root nodes
if not underGroup:
for node in rootNodes:
cmds.parent(node,self.rigNode)
RIGLOG.debug('file imported under rigNode %s', self.rigNode)
return rootNodes
#If underGroup=True we must make the group. First, determine what name it will be.
if not underGroupName:
underGroupName = fileName
#Create group and parent nodes
if not cmds.objExists(underGroupName):
cmds.group(n=underGroupName,em=True,p=self.rigNode)
for node in rootNodes:
| |
depth4 else tmp687)
tmp724 = (tmp725 if depth3 else tmp699)
tmp723 = (tmp724 if depth2 else tmp699)
tmp722 = (tmp723 if depth1 else tmp719)
tmp717 = (tmp718 if s0 else tmp722)
tmp730 = (tmp700 if depth4 else tmp694)
tmp729 = (tmp730 if depth3 else tmp699)
tmp728 = (tmp729 if depth2 else tmp720)
tmp727 = (tmp728 if depth1 else tmp723)
tmp733 = (tmp700 if depth3 else tmp699)
tmp732 = (tmp733 if depth2 else tmp724)
tmp731 = (tmp732 if depth1 else tmp728)
tmp726 = (tmp727 if s0 else tmp731)
tmp716 = (tmp717 if s1 else tmp726)
tmp701 = (tmp702 if s2 else tmp716)
tmp659 = (tmp660 if s3 else tmp701)
tmp596 = (tmp597 if s4 else tmp659)
tmp742 = (tmp700 if depth3 else tmp721)
tmp741 = (tmp742 if depth2 else tmp729)
tmp740 = (tmp741 if depth1 else tmp732)
tmp745 = (tmp700 if depth3 else tmp725)
tmp744 = (tmp745 if depth2 else tmp733)
tmp743 = (tmp744 if depth1 else tmp741)
tmp739 = (tmp740 if s0 else tmp743)
tmp749 = (tmp700 if depth3 else tmp730)
tmp748 = (tmp749 if depth2 else tmp742)
tmp747 = (tmp748 if depth1 else tmp744)
tmp751 = (tmp700 if depth2 else tmp745)
tmp750 = (tmp751 if depth1 else tmp748)
tmp746 = (tmp747 if s0 else tmp750)
tmp738 = (tmp739 if s1 else tmp746)
tmp758 = (tmp635 if depth5 else tmp682)
tmp757 = (tmp758 if depth4 else tmp700)
tmp756 = (tmp757 if depth3 else tmp700)
tmp755 = (tmp756 if depth2 else tmp749)
tmp754 = (tmp755 if depth1 else tmp751)
tmp763 = (tmp635 if depth5 else tmp688)
tmp762 = (tmp763 if depth4 else tmp700)
tmp761 = (tmp762 if depth3 else tmp700)
tmp760 = (tmp761 if depth2 else tmp700)
tmp759 = (tmp760 if depth1 else tmp755)
tmp753 = (tmp754 if s0 else tmp759)
tmp769 = (tmp635 if depth5 else tmp695)
tmp768 = (tmp769 if depth4 else tmp700)
tmp767 = (tmp768 if depth3 else tmp700)
tmp766 = (tmp767 if depth2 else tmp756)
tmp765 = (tmp766 if depth1 else tmp760)
tmp773 = (tmp635 if depth4 else tmp700)
tmp772 = (tmp773 if depth3 else tmp700)
tmp771 = (tmp772 if depth2 else tmp761)
tmp770 = (tmp771 if depth1 else tmp766)
tmp764 = (tmp765 if s0 else tmp770)
tmp752 = (tmp753 if s1 else tmp764)
tmp737 = (tmp738 if s2 else tmp752)
tmp779 = (tmp773 if depth3 else tmp757)
tmp778 = (tmp779 if depth2 else tmp767)
tmp777 = (tmp778 if depth1 else tmp771)
tmp782 = (tmp773 if depth3 else tmp762)
tmp781 = (tmp782 if depth2 else tmp772)
tmp780 = (tmp781 if depth1 else tmp778)
tmp776 = (tmp777 if s0 else tmp780)
tmp786 = (tmp773 if depth3 else tmp768)
tmp785 = (tmp786 if depth2 else tmp779)
tmp784 = (tmp785 if depth1 else tmp781)
tmp788 = (tmp773 if depth2 else tmp782)
tmp787 = (tmp788 if depth1 else tmp785)
tmp783 = (tmp784 if s0 else tmp787)
tmp775 = (tmp776 if s1 else tmp783)
tmp794 = (tmp635 if depth4 else tmp758)
tmp793 = (tmp794 if depth3 else tmp773)
tmp792 = (tmp793 if depth2 else tmp786)
tmp791 = (tmp792 if depth1 else tmp788)
tmp798 = (tmp635 if depth4 else tmp763)
tmp797 = (tmp798 if depth3 else tmp773)
tmp796 = (tmp797 if depth2 else tmp773)
tmp795 = (tmp796 if depth1 else tmp792)
tmp790 = (tmp791 if s0 else tmp795)
tmp803 = (tmp635 if depth4 else tmp769)
tmp802 = (tmp803 if depth3 else tmp773)
tmp801 = (tmp802 if depth2 else tmp793)
tmp800 = (tmp801 if depth1 else tmp796)
tmp806 = (tmp635 if depth3 else tmp773)
tmp805 = (tmp806 if depth2 else tmp797)
tmp804 = (tmp805 if depth1 else tmp801)
tmp799 = (tmp800 if s0 else tmp804)
tmp789 = (tmp790 if s1 else tmp799)
tmp774 = (tmp775 if s2 else tmp789)
tmp736 = (tmp737 if s3 else tmp774)
tmp813 = (tmp635 if depth3 else tmp794)
tmp812 = (tmp813 if depth2 else tmp802)
tmp811 = (tmp812 if depth1 else tmp805)
tmp816 = (tmp635 if depth3 else tmp798)
tmp815 = (tmp816 if depth2 else tmp806)
tmp814 = (tmp815 if depth1 else tmp812)
tmp810 = (tmp811 if s0 else tmp814)
tmp820 = (tmp635 if depth3 else tmp803)
tmp819 = (tmp820 if depth2 else tmp813)
tmp818 = (tmp819 if depth1 else tmp815)
tmp822 = (tmp635 if depth2 else tmp816)
tmp821 = (tmp822 if depth1 else tmp819)
tmp817 = (tmp818 if s0 else tmp821)
tmp809 = (tmp810 if s1 else tmp817)
tmp831 = (tmp636 if depth7 else tmp649)
tmp830 = (tmp831 if depth6 else tmp635)
tmp829 = (tmp830 if depth5 else tmp635)
tmp828 = (tmp829 if depth4 else tmp635)
tmp827 = (tmp828 if depth3 else tmp635)
tmp826 = (tmp827 if depth2 else tmp820)
tmp825 = (tmp826 if depth1 else tmp822)
tmp838 = (tmp636 if depth7 else tmp641)
tmp837 = (tmp838 if depth6 else tmp635)
tmp836 = (tmp837 if depth5 else tmp635)
tmp835 = (tmp836 if depth4 else tmp635)
tmp834 = (tmp835 if depth3 else tmp635)
tmp833 = (tmp834 if depth2 else tmp635)
tmp832 = (tmp833 if depth1 else tmp826)
tmp824 = (tmp825 if s0 else tmp832)
tmp846 = (tmp636 if depth7 else tmp640)
tmp845 = (tmp846 if depth6 else tmp635)
tmp844 = (tmp845 if depth5 else tmp635)
tmp843 = (tmp844 if depth4 else tmp635)
tmp842 = (tmp843 if depth3 else tmp635)
tmp841 = (tmp842 if depth2 else tmp827)
tmp840 = (tmp841 if depth1 else tmp833)
tmp852 = (tmp636 if depth6 else tmp635)
tmp851 = (tmp852 if depth5 else tmp635)
tmp850 = (tmp851 if depth4 else tmp635)
tmp849 = (tmp850 if depth3 else tmp635)
tmp848 = (tmp849 if depth2 else tmp834)
tmp847 = (tmp848 if depth1 else tmp841)
tmp839 = (tmp840 if s0 else tmp847)
tmp823 = (tmp824 if s1 else tmp839)
tmp808 = (tmp809 if s2 else tmp823)
tmp858 = (tmp850 if depth3 else tmp828)
tmp857 = (tmp858 if depth2 else tmp842)
tmp856 = (tmp857 if depth1 else tmp848)
tmp861 = (tmp850 if depth3 else tmp835)
tmp860 = (tmp861 if depth2 else tmp849)
tmp859 = (tmp860 if depth1 else tmp857)
tmp855 = (tmp856 if s0 else tmp859)
tmp865 = (tmp850 if depth3 else tmp843)
tmp864 = (tmp865 if depth2 else tmp858)
tmp863 = (tmp864 if depth1 else tmp860)
tmp867 = (tmp850 if depth2 else tmp861)
tmp866 = (tmp867 if depth1 else tmp864)
tmp862 = (tmp863 if s0 else tmp866)
tmp854 = (tmp855 if s1 else tmp862)
tmp873 = (tmp851 if depth4 else tmp829)
tmp872 = (tmp873 if depth3 else tmp850)
tmp871 = (tmp872 if depth2 else tmp865)
tmp870 = (tmp871 if depth1 else tmp867)
tmp877 = (tmp851 if depth4 else tmp836)
tmp876 = (tmp877 if depth3 else tmp850)
tmp875 = (tmp876 if depth2 else tmp850)
tmp874 = (tmp875 if depth1 else tmp871)
tmp869 = (tmp870 if s0 else tmp874)
tmp882 = (tmp851 if depth4 else tmp844)
tmp881 = (tmp882 if depth3 else tmp850)
tmp880 = (tmp881 if depth2 else tmp872)
tmp879 = (tmp880 if depth1 else tmp875)
tmp885 = (tmp851 if depth3 else tmp850)
tmp884 = (tmp885 if depth2 else tmp876)
tmp883 = (tmp884 if depth1 else tmp880)
tmp878 = (tmp879 if s0 else tmp883)
tmp868 = (tmp869 if s1 else tmp878)
tmp853 = (tmp854 if s2 else tmp868)
tmp807 = (tmp808 if s3 else tmp853)
tmp735 = (tmp736 if s4 else tmp807)
tmp893 = (tmp851 if depth3 else tmp873)
tmp892 = (tmp893 if depth2 else tmp881)
tmp891 = (tmp892 if depth1 else tmp884)
tmp896 = (tmp851 if depth3 else tmp877)
tmp895 = (tmp896 if depth2 else tmp885)
tmp894 = (tmp895 if depth1 else tmp892)
tmp890 = (tmp891 if s0 else tmp894)
tmp900 = (tmp851 if depth3 else tmp882)
tmp899 = (tmp900 if depth2 else tmp893)
tmp898 = (tmp899 if depth1 else tmp895)
tmp902 = (tmp851 if depth2 else tmp896)
tmp901 = (tmp902 if depth1 else tmp899)
tmp897 = (tmp898 if s0 else tmp901)
tmp889 = (tmp890 if s1 else tmp897)
tmp909 = (tmp852 if depth5 else tmp830)
tmp908 = (tmp909 | |
* x: (L, 14, D)
* mask: boolean mask of (L, 14)
Returns unit vectors and norm.
"""
x = x - x[:, 1].unsqueeze(1)
norm = torch.norm(x, dim=-1, keepdim=True)
x_norm = x / (norm+eps)
if mask:
return x_norm[mask], norm[mask]
return x_norm, norm
def orient_aa(x, mask=None, eps=1e-7):
""" Calculates unit vectors and norms of features for backbone.
Inputs:
* x: (L, 14, D). Cordinates in Sidechainnet format.
Returns unit vectors (5) and norms (3).
"""
# get tensor info
device, precise = x.device, x.type()
vec_wrap = torch.zeros(5, x.shape[0], 3, device=device) # (feats, L, dims+1)
norm_wrap = torch.zeros(3, x.shape[0], device=device)
# first feat is CB-CA
vec_wrap[0] = x[:, 4] - x[:, 1]
norm_wrap[0] = torch.norm(vec_wrap[0], dim=-1)
vec_wrap[0] /= norm_wrap[0].unsqueeze(dim=-1) + eps
# second is CA+ - CA :
vec_wrap[1, :-1] = x[:-1, 1] - x[1:, 1]
norm_wrap[1, :-1] = torch.norm(vec_wrap[1, :-1], dim=-1)
vec_wrap[1, :-1] /= norm_wrap[1, :-1].unsqueeze(dim=-1) + eps
# same but reverse vectors
vec_wrap[2] = (-1)*vec_wrap[1]
# third is CA - CA-
vec_wrap[3, 1:] = x[:-1, 1] - x[1:, 1]
norm_wrap[2, 1:] = torch.norm(vec_wrap[3, 1:], dim=-1)
vec_wrap[3, 1:] /= norm_wrap[2, 1:].unsqueeze(dim=-1) + eps
# now vectors in reverse order
vec_wrap[4] = (-1)*vec_wrap[3]
return vec_wrap, norm_wrap
def chain2atoms(x, mask=None):
""" Expand from (L, other) to (L, C, other). """
device, precise = x.device, x.type()
# get mask
wrap = torch.ones(x.shape[0], 14, *x.shape[1:]).type(precise).to(device)
# assign
wrap = wrap * x.unsqueeze(1)
if mask is not None:
return wrap[mask]
return wrap
def from_encode_to_pred(whole_point_enc, use_fourier=False, embedd_info=None, needed_info=None, vec_dim=3):
""" Turns the encoding from the above func into a label / prediction format.
Containing only the essential for position recovery (radial unit vec + norm)
Inputs: input_tuple containing:
* whole_point_enc: (atoms, vector_dims+scalar_dims)
Same shape from the function above.
Radial unit vector must be be the first vector dims
* embedd_info: dict. contains the number of scalar and vector feats.
"""
vec_dims = vec_dim * embedd_info["point_n_vectors"]
start_pos = 2*len(needed_info["atom_pos_scales"])+vec_dims
if use_fourier:
decoded_dist = decode_dist( whole_point_enc[:, vec_dims:start_pos+1],
scales=needed_info["atom_pos_scales"],
include_self=False)
else:
decoded_dist = whole_point_enc[:, start_pos:start_pos+1]
return torch.cat([# unit radial vector
whole_point_enc[:, :3],
# vector norm
decoded_dist
], dim=-1)
def encode_whole_bonds(x, x_format="coords", embedd_info={},
needed_info = {"cutoffs": [2,5,10],
"bond_scales": [.5, 1, 2],
"adj_degree": 1},
free_mem=False, eps=1e-7):
""" Given some coordinates, and the needed info,
encodes the bonds from point information.
* x: (N, 3) or prediction format
* x_format: one of ["coords" or "prediction"]
* embedd_info: dict. contains the needed embedding info
* needed_info: dict. contains additional needed info
{ cutoffs: list. cutoff distances for bonds.
can be a string for the k closest (ex: "30_closest"),
empty list for just covalent.
bond_scales: list. fourier encodings
adj_degree: int. degree of adj (2 means adj of adj is my adj)
0 for no adjacency
}
* free_mem: whether to delete variables
* eps: constant for numerical stability
"""
device, precise = x.device, x.type()
# convert to 3d coords if passed as preds
if x_format == "encode":
pred_x = from_encode_to_pred(x, embedd_info=embedd_info, needed_info=needed_info)
x = pred_x[:, :3] * pred_x[:, 3:4]
# encode bonds
# 1. BONDS: find the covalent bond_indices - allow arg -> DRY
native = None
if "prot_covalent_bond" in needed_info.keys():
native = True
native_bonds = needed_info["covalent_bond"]
elif needed_info["adj_degree"]:
native = True
native_bonds = prot_covalent_bond(needed_info["seq"], needed_info["adj_degree"])
if native:
native_idxs, native_attrs = native_bonds[0].to(device), native_bonds[1].to(device)
# determine kind of cutoff (hard distance threhsold or closest points)
closest = None
if len(needed_info["cutoffs"]) > 0:
cutoffs = needed_info["cutoffs"].copy()
if sum( isinstance(ci, str) for ci in cutoffs ) > 0:
cutoffs = [-1e-3] # negative so no bond is taken
closest = int( needed_info["cutoffs"][0].split("_")[0] )
# points under cutoff = d(i - j) < X
cutoffs = torch.tensor(cutoffs, device=device).type(precise)
dist_mat = torch.cdist(x, x, p=2)
# normal buckets
bond_buckets = torch.zeros(*x.shape[:-1], x.shape[-2], device=device).type(precise)
if len(needed_info["cutoffs"]) > 0 and not closest:
# count from latest degree of adjacency given
bond_buckets = torch.bucketize(dist_mat, cutoffs)
bond_buckets[native_idxs[0], native_idxs[1]] = cutoffs.shape[0]
# find the indexes - symmetric and we dont want the diag
bond_buckets += cutoffs.shape[0] * torch.eye(bond_buckets.shape[0], device=device).long()
close_bond_idxs = ( bond_buckets < cutoffs.shape[0] ).nonzero().t()
# move away from poses reserved for native
bond_buckets[close_bond_idxs[0], close_bond_idxs[1]] += needed_info["adj_degree"]+1
# the K closest (covalent bonds excluded) are considered bonds
elif closest:
k = closest
# copy dist_mat and mask the covalent bonds out
masked_dist_mat = dist_mat.clone()
masked_dist_mat += torch.eye(masked_dist_mat.shape[0], device=device) * torch.amax(masked_dist_mat)
masked_dist_mat[native_idxs[0], native_idxs[1]] = masked_dist_mat[0,0].clone()
# argsort by distance || *(-1) so min is first
_, sorted_col_idxs = torch.topk(-masked_dist_mat, k=k, dim=-1)
# cat idxs and repeat row idx to match number of column idx
sorted_col_idxs = rearrange(sorted_col_idxs, '... n k -> ... (n k)')
sorted_row_idxs = torch.repeat_interleave( torch.arange(dist_mat.shape[0]).long(), repeats=k ).to(device)
close_bond_idxs = torch.stack([ sorted_row_idxs, sorted_col_idxs ], dim=0)
# move away from poses reserved for native
bond_buckets = torch.ones_like(dist_mat) * (needed_info["adj_degree"]+1)
# merge all bonds
if len(needed_info["cutoffs"]) > 0:
if close_bond_idxs.shape[0] > 0:
whole_bond_idxs = torch.cat([native_idxs, close_bond_idxs], dim=-1)
else:
whole_bond_idxs = native_idxs
# 2. ATTRS: encode bond -> attrs
bond_vecs = x[ whole_bond_idxs[0] ] - x[ whole_bond_idxs[1] ]
bond_norms = torch.norm(bond_vecs, dim=-1)
bond_vecs /= (bond_norms + eps).unsqueeze(-1)
bond_norms_enc = encode_dist(bond_norms, scales=needed_info["bond_scales"]).squeeze()
if native:
bond_buckets[native_idxs[0], native_idxs[1]] = native_attrs
bond_attrs = bond_buckets[whole_bond_idxs[0] , whole_bond_idxs[1]]
# pack scalars and vectors - extra token for covalent bonds
bond_n_vectors = 1
bond_n_scalars = (2 * len(needed_info["bond_scales"]) + 1) + 1 # last one is an embedd of size 1+len(cutoffs)
whole_bond_enc = torch.cat([bond_vecs, # 1 vector - no need of reverse - we do 2x bonds (symmetry)
# scalars
bond_norms_enc, # 2 * len(scales)
(bond_attrs-1).unsqueeze(-1) # 1
], dim=-1)
# free gpu mem
if free_mem:
del bond_buckets, bond_norms_enc, bond_vecs, dist_mat,\
close_bond_idxs, native_bond_idxs
if closest:
del masked_dist_mat, sorted_col_idxs, sorted_row_idxs
embedd_info = {"bond_n_vectors": bond_n_vectors,
"bond_n_scalars": bond_n_scalars,
"bond_embedding_nums": [ len(needed_info["cutoffs"]) + needed_info["adj_degree"] ]} # extra one for covalent (default)
return whole_bond_idxs, whole_bond_enc, embedd_info
def encode_whole_protein(seq, true_coords, angles, padding_seq,
needed_info = { "cutoffs": [2, 5, 10],
"bond_scales": [0.5, 1, 2]}, free_mem=False):
""" Encodes a whole protein. In points + vectors. """
device, precise = true_coords.device, true_coords.type()
#################
# encode points #
#################
cloud_mask = torch.tensor(scn_cloud_mask(seq[:-padding_seq or None])).bool().to(device)
flat_mask = rearrange(cloud_mask, 'l c -> (l c)')
# embedd everything
# general position embedding
center_coords = true_coords - true_coords.mean(dim=0)
pos_unit_norms = torch.norm(center_coords, dim=-1, keepdim=True)
pos_unit_vecs = center_coords / pos_unit_norms
pos_unit_norms_enc = encode_dist(pos_unit_norms, scales=needed_info["atom_pos_scales"]).squeeze()
# reformat coordinates to scn (L, 14, 3) - TODO: solve if padding=0
coords_wrap = rearrange(center_coords, '(l c) d -> l c d', c=14)[:-padding_seq or None]
# position in backbone embedding
aa_pos = encode_dist( torch.arange(len(seq[:-padding_seq or None]), device=device).float(), scales=needed_info["aa_pos_scales"])
atom_pos = chain2atoms(aa_pos)[cloud_mask]
# atom identity embedding
atom_id_embedds = torch.stack([SUPREME_INFO[k]["atom_id_embedd"] for k in seq[:-padding_seq or None]],
dim=0)[cloud_mask].to(device)
# aa embedding
seq_int = torch.tensor([AAS2NUM[aa] for aa in seq[:-padding_seq or None]], device=device).long()
aa_id_embedds = chain2atoms(seq_int, mask=cloud_mask)
# CA - SC distance
dist2ca_vec, dist2ca_norm = dist2ca(coords_wrap)
dist2ca_norm_enc = encode_dist(dist2ca_norm, scales=needed_info["dist2ca_norm_scales"]).squeeze()
# BACKBONE feats
vecs, norms = orient_aa(coords_wrap)
bb_vecs_atoms = chain2atoms(torch.transpose(vecs, 0, 1), mask=cloud_mask)
bb_norms_atoms = chain2atoms(torch.transpose(norms, 0, 1), mask=cloud_mask)
bb_norms_atoms_enc = encode_dist(bb_norms_atoms, scales=[0.5])
################
# encode bonds #
################
bond_info = encode_whole_bonds(x = coords_wrap[cloud_mask],
x_format = "coords",
embedd_info = {},
needed_info = needed_info )
whole_bond_idxs, whole_bond_enc, bond_embedd_info = bond_info
#########
# merge #
#########
# concat so that final is [vector_dims, scalar_dims]
point_n_vectors = 1 + 1 + 5
point_n_scalars = 2*len(needed_info["atom_pos_scales"]) + 1 +\
2*len(needed_info["aa_pos_scales"]) + 1 +\
2*len(needed_info["dist2ca_norm_scales"]) + 1+\
rearrange(bb_norms_atoms_enc, 'atoms feats encs -> atoms (feats encs)').shape[1] +\
2 # the last 2 are to be embedded yet
whole_point_enc = torch.cat([ pos_unit_vecs[ :-padding_seq*14 or None ][ flat_mask ], # 1
dist2ca_vec[cloud_mask], # 1
rearrange(bb_vecs_atoms, 'atoms n d -> atoms (n d)'), # 5
# scalars
pos_unit_norms_enc[ :-padding_seq*14 or None ][ flat_mask ], # 2n+1
atom_pos, # 2n+1
dist2ca_norm_enc[cloud_mask], # 2n+1
rearrange(bb_norms_atoms_enc, 'atoms feats encs -> atoms (feats encs)'), # 2n+1
atom_id_embedds.unsqueeze(-1),
aa_id_embedds.unsqueeze(-1) ], dim=-1) # the last 2 are yet to be embedded
if free_mem:
del pos_unit_vecs, dist2ca_vec, bb_vecs_atoms, pos_unit_norms_enc, cloud_mask,\
atom_pos, dist2ca_norm_enc, bb_norms_atoms_enc, | |
<filename>vermouth/tests/test_edge_tuning.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 University of Groningen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the :mod:`vermouth.edge_tuning` module.
"""
# The redefined-outer-name check from pylint wrongly catches the use of pytest
# fixtures.
# pylint: disable=redefined-outer-name
import copy
import pytest
import numpy as np
import networkx as nx
import vermouth
from vermouth import edge_tuning
from vermouth.molecule import Choice
from vermouth.utils import distance
@pytest.fixture
def molecule_for_pruning():
"""
Build arbitrary graph to be pruned.
"""
graph = nx.Graph([
['A', 'B'],
['C', 'A'],
['A', 'D'],
['B', 'D'],
['C', 'D'],
['E', 'B'],
['D', 'F'],
['E', 'F'],
['G', 'A'],
['G', 'C'],
])
for key, value in graph.nodes.items():
value['name'] = key
return graph
@pytest.fixture
def simple_protein():
"""
Build a protein-like molecule graph with possible cystein bridges.
The molecule does not have coordinates.
"""
graph = nx.Graph()
graph.add_nodes_from((
(0, {'atomname': 'BB', 'resname': 'CYS'}),
(1, {'atomname': 'SG', 'resname': 'CYS'}),
(2, {'atomname': 'BB', 'resname': 'OTHER'}),
(3, {'atomname': 'SG', 'resname': 'OTHER'}),
(4, {'atomname': 'BB', 'resname': 'CYS'}),
(5, {'atomname': 'SG', 'resname': 'CYS'}),
(6, {'atomname': 'BB', 'resname': 'CYS'}),
(7, {'atomname': 'SG', 'resname': 'CYS'}),
(8, {'atomname': 'BB', 'resname': 'CYS'}),
(9, {'atomname': 'SG', 'resname': 'CYS'}),
(10, {'atomname': 'BB', 'resname': 'CYS'}),
(11, {'atomname': 'SG', 'resname': 'CYS'}),
(12, {'atomname': 'BB', 'resname': 'CYS'}),
(13, {'atomname': 'SG', 'resname': 'CYS'}),
(14, {'atomname': 'BB', 'resname': 'OTHER'}),
(15, {'atomname': 'SG', 'resname': 'OTHER'}),
(16, {'atomname': 'BB', 'resname': 'OTHER'}),
(17, {'atomname': 'SG', 'resname': 'OTHER'}),
))
graph.add_edges_from((
# Chain edges connecting the backbone and the side chains nodes
(0, 1), (2, 3), (2, 4), (4, 5), (4, 6), (6, 7), (6, 8), (8, 9),
(8, 10), (10, 11), (10, 12), (12, 13), (12, 14), (14, 15),
(14, 16), (16, 17),
# Bridges, including that should stay
(1, 17), (3, 15), (5, 13), (7, 11),
))
return graph
@pytest.fixture
def coordinate_array():
"""
Build an array of coordinates for 36 points.
The coordinates are random, but preset in the sense that they were rolled
once and the same array is always returned.
"""
# This array of coordinates was generated using:
# coordinates = (
# np.random.uniform(low=-2, high=2, size=(36, 3))
# .astype(np.float32)
# .round(2)
# )
# coordinates.tolist()
return np.array([
[0.5400000214576721, 1.690000057220459, 1.590000033378601],
[1.7799999713897705, -0.10000000149011612, 0.41999998688697815],
[1.9700000286102295, 1.409999966621399, 0.6000000238418579],
[0.7300000190734863, 1.059999942779541, 1.1100000143051147],
[1.3700000047683716, -2.0, 0.7799999713897705],
[0.5899999737739563, 1.4299999475479126, 1.809999942779541],
[-1.7400000095367432, -0.5400000214576721, 0.8500000238418579],
[0.4300000071525574, -1.309999942779541, -1.7899999618530273],
[-1.3300000429153442, 1.5399999618530273, -0.25],
[0.8799999952316284, -1.9299999475479126, 1.2899999618530273],
[1.7799999713897705, 1.5299999713897705, 1.690000057220459],
[-1.7100000381469727, 0.20999999344348907, 0.8199999928474426],
[0.07999999821186066, -1.1200000047683716, 1.399999976158142],
[-0.3199999928474426, -0.49000000953674316, 0.9599999785423279],
[0.7400000095367432, -1.8200000524520874, 1.1799999475479126],
[1.0399999618530273, -1.7000000476837158, -1.6399999856948853],
[0.4000000059604645, -1.0700000524520874, 1.25],
[1.2200000286102295, 1.440000057220459, -0.6399999856948853],
[-0.47999998927116394, -1.3799999952316284, 1.3200000524520874],
[-0.46000000834465027, -0.07000000029802322, -0.5199999809265137],
[0.05000000074505806, 0.5799999833106995, 1.659999966621399],
[1.090000033378601, -0.17000000178813934, -0.8600000143051147],
[1.159999966621399, 0.6499999761581421, 0.15000000596046448],
[-0.3400000035762787, 0.9200000166893005, -0.07000000029802322],
[0.17000000178813934, 1.2300000190734863, 0.23999999463558197],
[1.559999942779541, 1.5800000429153442, -0.3100000023841858],
[1.059999942779541, -0.6700000166893005, -0.4300000071525574],
[-1.059999942779541, -0.3499999940395355, 1.9299999475479126],
[-1.6299999952316284, 0.5299999713897705, -0.5299999713897705],
[1.3799999952316284, -1.2699999809265137, 0.7400000095367432],
[-1.4900000095367432, 1.6799999475479126, 0.41999998688697815],
[1.159999966621399, -1.809999942779541, 1.2000000476837158],
[-0.17000000178813934, -0.28999999165534973, 1.7899999618530273],
[-1.9500000476837158, -1.25, 1.2200000286102295],
[-0.05000000074505806, 0.8899999856948853, -1.350000023841858],
[1.5099999904632568, 1.3300000429153442, 0.11999999731779099],
])
@pytest.fixture
def multi_molecules(coordinate_array):
"""
Build 6 molecules with 6 atoms each. Coordinates are populated unter the
`coords` attribute key with the coordinates produced by
:func:`coordinate_array`.
"""
molecules = []
for _ in range(6):
molecule = vermouth.molecule.Molecule()
molecule.add_nodes_from([(idx, {'atomid': idx, 'resid': 1})
for idx in range(6)])
molecules.append(molecule)
iter_nodes = (
node
for molecule in molecules
for node in molecule.nodes.values()
)
for idx, (node, coords) in enumerate(zip(iter_nodes, coordinate_array)):
node['coords'] = coords
node['serial'] = idx
return molecules
class TestPruneEdgesBetweenSelection:
"""
Tests for :func:`edge_tuning.prune_edges_between_selections`.
"""
@staticmethod
@pytest.fixture
def molecule_pruned(molecule_for_pruning):
"""
Graph with edges pruned by
:func:`edge_tuning.prune_edges_between_selections`.
"""
graph = copy.deepcopy(molecule_for_pruning)
selection_a = ['A', 'B', 'G']
selection_b = ['C', 'D', 'G']
edge_tuning.prune_edges_between_selections(
graph, selection_a, selection_b
)
return graph
@staticmethod
@pytest.mark.parametrize('edge', [
['A', 'C'], ['A', 'D'], ['B', 'D']
])
def test_prune_edges_between_selections_removed(molecule_pruned, edge):
"""
Make sure that the edges pruned by
:func:`edge_tuning.prune_edges_between_selections` are not present
in the final graph.
"""
assert edge not in molecule_pruned.edges
@staticmethod
@pytest.mark.parametrize('edge', [
['A', 'B'], # Both in selection_a
['C', 'D'], # Both in selection_b
['B', 'E'], # E not in selections
['D', 'F'], # F not in selections
['E', 'F'], # None of E and F in selections
])
def test_prune_edges_between_selections_kept(molecule_pruned, edge):
"""
Make sure edges that should not be pruned by
edge_tuning.prune_edges_between_selections` are still present in
the final graph.
"""
assert edge in molecule_pruned.edges
class TestPrudeEdgesWithSelectors:
"""
Tests for :func:`edge_tuning.prune_edges_with_selectors`.
"""
@staticmethod
@pytest.fixture
def molecule_pruned_one_selector(molecule_for_pruning):
"""
Graph with edges pruned by :func:`edge_tuning.prune_edges_with_selectors`
called with only :func:`dummy_selector_a`.
"""
edge_tuning.prune_edges_with_selectors(molecule_for_pruning, dummy_selector_a)
return molecule_for_pruning
@staticmethod
@pytest.fixture
def molecule_pruned_two_selectors(molecule_for_pruning):
"""
Graph with edges pruned by :func:`edge_tuning.prune_edges_with_selectors`
called with both :func:`dummy_selector_a` and :func:`dummy_selector_b`.
"""
edge_tuning.prune_edges_with_selectors(
molecule_for_pruning, dummy_selector_a, dummy_selector_b
)
return molecule_for_pruning
@staticmethod
@pytest.mark.parametrize('edge', [
['A', 'B'], ['A', 'G'],
])
def test_prune_edges_with_one_selector_removed(molecule_pruned_one_selector, edge):
"""
Make sure that the edges pruned by
:func:`edge_tuning.prune_edges_with_selectors` with a single selector
provided are not present in the final graph.
"""
assert edge not in molecule_pruned_one_selector.edges
@staticmethod
@pytest.mark.parametrize('edge', [
['A', 'C'], ['A', 'D'], ['B', 'D'],
])
def test_prune_edges_with_two_selectors_removed(molecule_pruned_two_selectors, edge):
"""
Make sure that the edges pruned by
:func:`edge_tuning.prune_edges_with_selectors` with two selectors provided
are not present in the final graph.
"""
assert edge not in molecule_pruned_two_selectors.edges
@staticmethod
@pytest.mark.parametrize('edge', [
['A', 'B'], # Both in selection_a
['C', 'D'], # Both in selection_b
['B', 'E'], # E not in selections
['D', 'F'], # F not in selections
['E', 'F'], # None of E and F in selections
])
def test_prune_edges_with_two_selectors_kept(molecule_pruned_two_selectors, edge):
"""
Make sure edges that should not be pruned by
edge_tuning.prune_edges_with_selectors` with two selectors are still
present in the final graph.
"""
assert edge in molecule_pruned_two_selectors.edges
class TestAddEdgesAtDistance:
"""
Tests for :func:`edge_tuning.add_edges_at_distance`.
"""
@staticmethod
@pytest.fixture
def protein_with_coords(coordinate_array):
"""
Build a graph with coordinates and edges placed by
:func:`edge_tuning.add_edges_at_distance`.
"""
graph = nx.Graph()
graph.add_nodes_from([
(index, {'coord': position})
for index, position in enumerate(coordinate_array)
])
edge_tuning.add_edges_at_distance(
graph, 2.0, range(6), range(6, 37), attribute='coord'
)
return graph
@staticmethod
@pytest.mark.parametrize('edge', [
(0, 10), (0, 20), (0, 22), (0, 24), (0, 35), (1, 16), (1, 17), (1, 21),
(1, 22), (1, 25), (1, 26), (1, 29), (1, 31), (1, 35), (2, 10), (2, 17),
(2, 22), (2, 24), (2, 25), (2, 35), (3, 10), (3, 13), (3, 17), (3, 20),
(3, 22), (3, 23), (3, 24), (3, 25), (3, 32), (3, 35), (4, 9), (4, 12),
(4, 14), (4, 16), (4, 26), (4, 29), (4, 31), (5, 10), (5, 20), (5, 22),
(5, 24), (5, 32), (5, 35)
])
def test_add_edges_at_distance(protein_with_coords, edge):
"""
Make sure all the expected edges are added by
:func:`edge_tuning.add_edges_at_distance`.
"""
assert edge in protein_with_coords.edges
@staticmethod
def test_add_edges_at_distance_num(protein_with_coords):
"""
Make sure that :func:`edge_tuning.add_edges_at_distance` adds the
expected number of edges.
This test passing is only relevant if :func:`test_add_edges_at_distance`
passes as well.
"""
assert len(protein_with_coords.edges) == 43
class TestAddInterMoleculeEdges:
"""
Tests for :func:`add_inter_molecule_edges`.
"""
@staticmethod
@pytest.fixture
def multi_molecules_linked(multi_molecules):
"""
Merge the molecules from :func:`multi_molecules` using
:func:`edge_tuning.add_inter_molecule_edges`.
"""
edges = [
((0, 1), (4, 2)),
((4, 3), (5, 0)),
((2, 0), (3, 1)),
((3, 2), (2, 4)),
((1, 1), (1, 2)),
]
return edge_tuning.add_inter_molecule_edges(multi_molecules, edges)
@staticmethod
def test_add_inter_molecule_edges_nmols(multi_molecules_linked):
"""
Test that :func:`edge_tuning.add_inter_molecule_edges` produces
the expected number of molecules.
"""
print(multi_molecules_linked)
assert len(multi_molecules_linked) == 3
@staticmethod
@pytest.mark.parametrize('mol, edge', (
(0, (1, 8)),
(0, (9, 12)),
(2, (0, 7)),
(2, (8, 4)),
(1, (1, 2)),
))
def test_add_inter_molecule_edges_edges(multi_molecules_linked, mol, edge):
"""
Test that :func:`edge_tuning.add_inter_molecule_edges` creates the
expected edges.
"""
assert edge in multi_molecules_linked[mol].edges
class TestPairsUnderThreshold:
"""
Tests for :func:`edge_tuning.pairs_under_threshold`.
"""
@staticmethod
@pytest.fixture
def pair_selected(multi_molecules):
"""
Call :func:`edge_tuning.pairs_under_threshold` with twice the same
selection.
"""
selection = [
[0, 1],
[0, 2],
[1, 4],
[2, 5],
[3, 1],
[5, 4],
]
return edge_tuning.pairs_under_threshold(
multi_molecules, 2.0, selection, selection, attribute='coords'
)
@staticmethod
@pytest.fixture
def assymetric_pair_selected(multi_molecules):
"""
Call :func:`edge_tuning.pairs_under_threshold` with two different
selections.
"""
selection_a = [
[0, 1],
[1, 4],
[3, 1],
]
selection_b = [
[0, 2],
[2, 5],
[5, 4],
]
| |
<gh_stars>0
"""App views module."""
from datetime import datetime
from flask import (
Blueprint,
abort,
current_app,
jsonify,
make_response,
redirect,
render_template,
request,
)
from flask_jwt_extended import (
create_access_token,
get_jwt_identity,
jwt_optional,
jwt_required,
)
from werkzeug.datastructures import MultiDict
from aggrep import cache
from aggrep.api.email import send_email
from aggrep.api.forms import (
ConfirmEmailForm,
LoginForm,
RegisterForm,
RequestResetForm,
ResetPasswordForm,
UpdateEmailForm,
UpdatePasswordForm,
)
from aggrep.api.posts import (
filter_user_categories,
filter_user_sources,
get_all_posts,
get_posts_by_category,
get_posts_by_search,
get_posts_by_source,
get_similar_posts,
limit_posts,
sort_posts,
)
from aggrep.constants import LATEST, N_RECENT_POSTS, POPULAR, RELEVANT
from aggrep.models import (
Bookmark,
Category,
Feed,
Post,
PostAction,
PostView,
Source,
Status,
User,
)
from aggrep.utils import get_cache_key
app = Blueprint("app", __name__, template_folder="templates")
api = Blueprint("api", __name__, url_prefix="/v1", template_folder="templates")
@api.before_app_request
@jwt_optional
def before_request():
"""Perform tasks before processing a request."""
current_user = User.get_user_from_identity(get_jwt_identity())
if current_user is not None:
current_user.update(last_seen=datetime.utcnow())
@api.errorhandler(400)
def error_handler_400(e):
"""Jsonify an error message on 400."""
return make_response(jsonify(msg=e.description), 400)
# === Route Helpers === #
def register_impression(post_id):
"""Register post impressions."""
pa = PostAction.query.filter(PostAction.post_id == post_id).first()
pa.impressions += 1
pa.save()
return True
def register_click(post_id):
"""Register post clicks."""
pa = PostAction.query.filter(PostAction.post_id == post_id).first()
pa.clicks += 1
pa.save()
return True
# === Post Routes === #
@app.route("/<uid>")
def follow_redirect(uid):
"""Follow a post redirect."""
p = Post.from_uid(uid)
register_click(p.id)
return redirect(p.link)
@api.route("/posts")
@jwt_optional
def all_posts():
"""Get front page posts."""
page = request.args.get("page", 1, type=int)
per_page = request.args.get("per_page", 20, type=int)
sort = request.args.get("sort", POPULAR, type=str)
identity = get_jwt_identity()
current_user = User.get_user_from_identity(identity)
cache_key = get_cache_key("all_posts", identity, page, per_page, sort)
cached = cache.get(cache_key)
if cached is None:
posts = get_all_posts()
if current_user:
posts = filter_user_categories(posts, current_user)
posts = filter_user_sources(posts, current_user)
posts = limit_posts(posts)
posts = sort_posts(posts, sort)
if sort == POPULAR:
title = "Popular Posts"
else:
title = "Latest Posts"
cached = dict(**Post.to_collection_dict(posts, page, per_page), title=title)
cache.set(cache_key, cached, timeout=180)
for item in cached["items"]:
register_impression(item["id"])
return jsonify(**cached), 200
@api.route("/source/<source>")
@jwt_optional
def posts_by_source(source):
"""Get posts by a source."""
page = request.args.get("page", 1, type=int)
per_page = request.args.get("per_page", 20, type=int)
sort = request.args.get("sort", POPULAR, type=str)
identity = get_jwt_identity()
current_user = User.get_user_from_identity(identity)
cache_key = get_cache_key(
"posts_by_source", identity, page, per_page, sort, route_arg=source
)
cached = cache.get(cache_key)
if cached is None:
src = Source.query.filter_by(slug=source).first()
if src is None:
abort(400, "Source '{}' does not exist.".format(source))
posts = get_posts_by_source(src)
if current_user:
posts = filter_user_categories(posts, current_user)
posts = limit_posts(posts)
posts = sort_posts(posts, sort)
if sort == POPULAR:
title = "Popular Posts by {}".format(src.title)
else:
title = "Latest Posts by {}".format(src.title)
cached = dict(**Post.to_collection_dict(posts, page, per_page), title=title)
cache.set(cache_key, cached, timeout=180)
for item in cached["items"]:
register_impression(item["id"])
return jsonify(**cached), 200
@api.route("/category/<category>")
@jwt_optional
def posts_by_category(category):
"""Get posts by a category."""
page = request.args.get("page", 1, type=int)
per_page = request.args.get("per_page", 20, type=int)
sort = request.args.get("sort", POPULAR, type=str)
identity = get_jwt_identity()
current_user = User.get_user_from_identity(identity)
cache_key = get_cache_key(
"posts_by_category", identity, page, per_page, sort, route_arg=category
)
cached = cache.get(cache_key)
if cached is None:
cat = Category.query.filter_by(slug=category).first()
if cat is None:
abort(400, "Category '{}' does not exist.".format(category))
posts = get_posts_by_category(cat)
if current_user:
posts = filter_user_sources(posts, current_user)
posts = limit_posts(posts)
posts = sort_posts(posts, sort)
if sort == POPULAR:
title = "Popular Posts in {}".format(cat.title)
else:
title = "Latest Posts in {}".format(cat.title)
cached = dict(**Post.to_collection_dict(posts, page, per_page), title=title)
cache.set(cache_key, cached, timeout=180)
for item in cached["items"]:
register_impression(item["id"])
return jsonify(**cached), 200
@api.route("/similar/<uid>")
@jwt_optional
def similar_posts(uid):
"""Get similar posts."""
page = request.args.get("page", 1, type=int)
per_page = request.args.get("per_page", 20, type=int)
sort = LATEST
identity = get_jwt_identity()
cache_key = get_cache_key(
"similar_posts", identity, page, per_page, sort, route_arg=uid
)
cached = cache.get(cache_key)
if cached is None:
posts = get_similar_posts(uid)
posts = sort_posts(posts, sort)
title = "More Coverage"
cached = dict(**Post.to_collection_dict(posts, page, per_page), title=title)
cache.set(cache_key, cached, timeout=180)
for item in cached["items"]:
register_impression(item["id"])
return jsonify(**cached), 200
@api.route("/search")
@jwt_optional
def search_posts():
"""Search posts."""
page = request.args.get("page", 1, type=int)
per_page = request.args.get("per_page", 20, type=int)
term = request.args.get("query", None, type=str)
sort = RELEVANT
if term is None:
return abort(400, "No search terms provided.")
identity = get_jwt_identity()
cache_key = get_cache_key(
"search_posts", identity, page, per_page, sort, route_arg=term
)
cached = cache.get(cache_key)
if cached is None:
posts = get_posts_by_search(term)
title = "Search Results"
cached = dict(**Post.to_collection_dict(posts, page, per_page), title=title)
cache.set(cache_key, cached, timeout=180)
for item in cached["items"]:
register_impression(item["id"])
return jsonify(**cached), 200
@api.route("/bookmarks")
@jwt_required
def bookmarked_posts():
"""Manage a user's bookmarked posts."""
current_user = User.get_user_from_identity(get_jwt_identity())
if request.method == "GET":
page = request.args.get("page", 1, type=int)
per_page = request.args.get("per_page", 20, type=int)
posts = current_user.bookmarks
for p in posts:
register_impression(p.id)
title = "Bookmarked Posts"
return (
jsonify(**Post.to_collection_dict(posts, page, per_page), title=title),
200,
)
@api.route("/bookmarks/ids", methods=["GET", "POST", "DELETE"])
@jwt_required
def bookmarked_post_ids():
"""Manage a user's bookmarked post IDs."""
current_user = User.get_user_from_identity(get_jwt_identity())
if request.method == "GET":
return jsonify(bookmarks=[b.uid for b in current_user.bookmarks]), 200
elif request.method == "POST":
payload = request.get_json() or {}
uid = payload.get("uid")
if uid is None:
return abort(400, "No post UID provided.")
post = Post.from_uid(uid)
if post is None:
return abort(400, "Post UID is invalid.")
is_bookmarked = Bookmark.query.filter_by(
user_id=current_user.id, post_id=post.id
).first()
if not is_bookmarked:
Bookmark.create(user_id=current_user.id, post_id=post.id)
return (
jsonify(
dict(
bookmarks=[b.uid for b in current_user.bookmarks],
msg="Bookmark saved!",
)
),
200,
)
elif request.method == "DELETE":
payload = request.get_json() or {}
uid = payload.get("uid")
if uid is None:
return abort(400, "No post UID provided.")
post = Post.from_uid(uid)
if post is None:
return abort(400, "Post UID is invalid.")
instance = Bookmark.query.filter_by(
user_id=current_user.id, post_id=post.id
).first()
instance.delete()
return (
jsonify(
dict(
bookmarks=[b.uid for b in current_user.bookmarks],
msg="Bookmark removed!",
)
),
200,
)
@api.route("/views", methods=["GET", "POST"])
@jwt_required
def viewed_posts():
"""Manage a user's viewed posts."""
current_user = User.get_user_from_identity(get_jwt_identity())
if request.method == "GET":
posts = current_user.post_views.limit(N_RECENT_POSTS).from_self()
for p in posts:
register_impression(p.id)
title = "Recently Viewed Posts"
return (
jsonify(**Post.to_collection_dict(posts, 1, N_RECENT_POSTS), title=title),
200,
)
elif request.method == "POST":
payload = request.get_json() or {}
uid = payload.get("uid")
if uid is None:
return jsonify(msg="No post UID provided."), 400
post = Post.from_uid(uid)
if post is None:
return jsonify(msg="Post UID is invalid."), 400
is_viewed = PostView.query.filter_by(
user_id=current_user.id, post_id=post.id
).first()
if not is_viewed:
PostView.create(user_id=current_user.id, post_id=post.id)
return (jsonify(dict(msg="View saved.")), 200)
# === Taxonomy Routes === #
@api.route("/sources")
def sources():
"""Get all sources with at least one active feed."""
sources = []
for s in Source.query.order_by(Source.title.asc()).all():
has_active_feeds = (
Feed.query.filter(Feed.source == s)
.filter(Feed.status.has(Status.active == True)) # noqa
.count()
) > 0
if has_active_feeds:
sources.append(s.to_dict())
return jsonify(sources=sources), 200
@api.route("/categories")
def categories():
"""Get all categories."""
categories = [c.to_dict() for c in Category.query.order_by(Category.id.asc()).all()]
return jsonify(categories=categories), 200
@api.route("/manage/sources", methods=["GET", "POST"])
@jwt_required
def manage_sources():
"""Manage a user's excluded sources."""
current_user = User.get_user_from_identity(get_jwt_identity())
sources = []
all_source_ids = []
for s in Source.query.order_by(Source.title.asc()).all():
all_source_ids.append(s.id)
has_active_feeds = (
Feed.query.filter(Feed.source == s)
.filter(Feed.status.has(Status.active == True)) # noqa
.count()
) > 0
if has_active_feeds:
sources.append(s)
if request.method == "POST":
data = request.get_json()
excluded_sources = data["excluded_sources"]
excluded_objects = Source.query.filter(Source.id.in_(excluded_sources)).all()
current_user.excluded_sources = excluded_objects
current_user.save()
user_excludes = [c.id for c in current_user.excluded_sources]
user_includes = list(set(all_source_ids).difference(set(user_excludes)))
return (
jsonify(
msg="Your preferred sources have been updated.",
excluded_sources=user_excludes,
included_sources=user_includes,
),
200,
)
@api.route("/manage/categories", methods=["GET", "POST"])
@jwt_required
def manage_categories():
"""Manage a user's excluded categories."""
current_user = User.get_user_from_identity(get_jwt_identity())
categories = Category.query.order_by(Category.title.asc()).all()
all_category_ids = [c.id for c in categories]
if request.method == "POST":
data = request.get_json()
excluded_categories = data["excluded_categories"]
excluded_objects = Category.query.filter(
Category.id.in_(excluded_categories)
).all()
current_user.excluded_categories = excluded_objects
current_user.save()
user_excludes = [c.id for c in current_user.excluded_categories]
user_includes = list(set(all_category_ids).difference(set(user_excludes)))
return (
jsonify(
msg="Your preferred categories have been updated.",
excluded_categories=user_excludes,
included_categories=user_includes,
),
200,
)
# === Auth Routes === #
@api.route("auth/token/confirm")
@jwt_required
def auth_token_confirm():
"""Confirm a user's token."""
current_user = User.get_user_from_identity(get_jwt_identity())
payload = dict(
msg="Token verification successful!",
user=current_user.to_dict(),
access_token=create_access_token(identity=current_user.email),
)
return jsonify(payload), 200
@api.route("/auth/login", methods=["POST"])
def auth_login():
"""Log a user into the application."""
if get_jwt_identity():
return abort(400, "You are already logged in.")
if not request.is_json:
return abort(400, "Invalid request.")
form = LoginForm(MultiDict(request.get_json()))
if form.validate():
user = User.query.filter_by(email=form.email.data).first()
if user is None or not user.check_password(form.password.data):
return abort(400, "Invalid email address or password")
payload = dict(
msg="Login Successful",
user=user.to_dict(),
access_token=create_access_token(identity=user.email),
)
return jsonify(payload), 200
else:
return abort(400, "Unable to complete login.")
@api.route("/auth/register", methods=["POST"])
def auth_register():
"""Register a new user."""
if get_jwt_identity():
return abort(400, "You are already registered.")
if not request.is_json:
return abort(400, "Invalid request.")
form = RegisterForm(MultiDict(request.get_json()))
if form.validate():
user = User.create(email=form.email.data)
user.set_password(form.password.data)
token = user.get_email_confirm_token()
email_data = dict(
subject="[Aggregate Report] Welcome!",
recipients=[user.email],
text_body=render_template(
"email/welcome.txt",
user=user,
token=token,
ui_url=current_app.config["UI_URL"],
),
html_body=render_template(
"email/welcome.html",
user=user,
token=token,
ui_url=current_app.config["UI_URL"],
),
)
send_email(email_data)
payload = dict(
msg="Registration Successful!",
user=user.to_dict(),
access_token=create_access_token(identity=form.email.data),
)
return jsonify(payload), 200
else:
return abort(400, "Unable to complete registration.")
@api.route("/auth/email/update", methods=["POST"])
@jwt_required
def auth_email_update():
"""Update an email address."""
current_user = User.get_user_from_identity(get_jwt_identity())
if not request.is_json:
return abort(400, "Invalid request.")
form = UpdateEmailForm(MultiDict(request.get_json()))
if form.validate():
current_user.update(email=form.email.data, confirmed=False)
token = current_user.get_email_confirm_token()
email_data = dict(
subject="[Aggregate Report] Confirm your email!",
recipients=[current_user.email],
text_body=render_template(
"email/confirm_email.txt",
user=current_user,
token=token,
ui_url=current_app.config["UI_URL"],
| |
{
if( isnan(fy) ) {
IKFAST_ASSERT(!isnan(fx)); // if both are nan, probably wrong value will be returned
return IKPI_2;
}
else if( isnan(fx) ) {
return 0;
}
return atan2(fy,fx);
}
inline float IKsign(float f) {
if( f > 0 ) {
return float(1);
}
else if( f < 0 ) {
return float(-1);
}
return 0;
}
inline double IKsign(double f) {
if( f > 0 ) {
return 1.0;
}
else if( f < 0 ) {
return -1.0;
}
return 0;
}
"""%(self.version,str(datetime.datetime.now()),self.version)
code += solvertree.generate(self)
code += solvertree.end(self)
code += """
/// solves the inverse kinematics equations.
/// \param pfree is an array specifying the free joints of the chain.
IKFAST_API bool ComputeIk(const IkReal* eetrans, const IkReal* eerot, const IkReal* pfree, IkSolutionListBase<IkReal>& solutions) {
IKSolver solver;
return solver.ComputeIk(eetrans,eerot,pfree,solutions);
}
IKFAST_API const char* GetKinematicsHash() { return "%s"; }
IKFAST_API const char* GetIkFastVersion() { return IKFAST_STRINGIZE(IKFAST_VERSION); }
#ifdef IKFAST_NAMESPACE
} // end namespace
#endif
"""%(self.kinematicshash)
code += """
#ifndef IKFAST_NO_MAIN
#include <stdio.h>
#include <stdlib.h>
#ifdef IKFAST_NAMESPACE
using namespace IKFAST_NAMESPACE;
#endif
int main(int argc, char** argv)
{
if( argc != 12+GetNumFreeParameters()+1 ) {
printf("\\nUsage: ./ik r00 r01 r02 t0 r10 r11 r12 t1 r20 r21 r22 t2 free0 ...\\n\\n"
"Returns the ik solutions given the transformation of the end effector specified by\\n"
"a 3x3 rotation R (rXX), and a 3x1 translation (tX).\\n"
"There are %d free parameters that have to be specified.\\n\\n",GetNumFreeParameters());
return 1;
}
IkSolutionList<IkReal> solutions;
std::vector<IkReal> vfree(GetNumFreeParameters());
IkReal eerot[9],eetrans[3];
eerot[0] = atof(argv[1]); eerot[1] = atof(argv[2]); eerot[2] = atof(argv[3]); eetrans[0] = atof(argv[4]);
eerot[3] = atof(argv[5]); eerot[4] = atof(argv[6]); eerot[5] = atof(argv[7]); eetrans[1] = atof(argv[8]);
eerot[6] = atof(argv[9]); eerot[7] = atof(argv[10]); eerot[8] = atof(argv[11]); eetrans[2] = atof(argv[12]);
for(std::size_t i = 0; i < vfree.size(); ++i)
vfree[i] = atof(argv[13+i]);
bool bSuccess = ComputeIk(eetrans, eerot, vfree.size() > 0 ? &vfree[0] : NULL, solutions);
if( !bSuccess ) {
fprintf(stderr,"Failed to get ik solution\\n");
return -1;
}
printf("Found %d ik solutions:\\n", (int)solutions.GetNumSolutions());
std::vector<IkReal> solvalues(GetNumJoints());
for(std::size_t i = 0; i < solutions.GetNumSolutions(); ++i) {
const IkSolutionBase<IkReal>& sol = solutions.GetSolution(i);
printf("sol%d (free=%d): ", (int)i, (int)sol.GetFree().size());
std::vector<IkReal> vsolfree(sol.GetFree().size());
sol.GetSolution(&solvalues[0],vsolfree.size()>0?&vsolfree[0]:NULL);
for( std::size_t j = 0; j < solvalues.size(); ++j)
printf("%.15f, ", solvalues[j]);
printf("\\n");
}
return 0;
}
#endif
"""
return code
def getClassInit(self,node,iktype,userotation=7,usetranslation=7):
code = "IKFAST_API int GetNumFreeParameters() { return %d; }\n"%len(node.freejointvars)
if len(node.freejointvars) == 0:
code += "IKFAST_API int* GetFreeParameters() { return NULL; }\n"
else:
code += "IKFAST_API int* GetFreeParameters() { static int freeparams[] = {"
for i,freejointvar in enumerate(node.freejointvars):
code += "%d"%(freejointvar[1])
if i < len(node.freejointvars)-1:
code += ", "
code += "}; return freeparams; }\n"
code += "IKFAST_API int GetNumJoints() { return %d; }\n\n"%(len(node.freejointvars)+len(node.solvejointvars))
code += "IKFAST_API int GetIkRealSize() { return sizeof(IkReal); }\n\n"
code += 'IKFAST_API int GetIkType() { return 0x%x; }\n\n'%iktype
code += "class IKSolver {\npublic:\n"
usedvars = []
for var in node.solvejointvars:
usedvars += [var[0].name,'c'+var[0].name,'s'+var[0].name,'ht'+var[0].name]
for i in range(len(node.freejointvars)):
name = node.freejointvars[i][0].name
usedvars += [name,'c'+name,'s'+name,'ht'+name]
for i in range(3):
if userotation & (1<<i):
for j in range(3):
usedvars += ['new_r%d%d'%(i,j), 'r%d%d'%(i,j), 'rxp%d_%d'%(i,j)]
if usetranslation & 1:
usedvars += ['new_px', 'px', 'npx']
if usetranslation & 2:
usedvars += ['new_py', 'py', 'npy']
if usetranslation & 4:
usedvars += ['new_pz', 'pz', 'npz']
if usetranslation ==7:
usedvars.append('pp')
code += 'IkReal ' + ','.join(usedvars) + ';\n'
code += 'unsigned char ' + ','.join('_i%s[2], _n%s'%(var[0].name,var[0].name) for var in node.solvejointvars+node.freejointvars) + ';\n\n'
return code
def GetIkFunctionPreamble(self, node):
code = "bool ComputeIk(const IkReal* eetrans, const IkReal* eerot, const IkReal* pfree, IkSolutionListBase<IkReal>& solutions) {\n"
for var in node.solvejointvars:
code += '%s=numeric_limits<IkReal>::quiet_NaN(); _i%s[0] = -1; _i%s[1] = -1; _n%s = -1; '%(var[0].name,var[0].name,var[0].name,var[0].name)
for i in range(len(node.freejointvars)):
name = node.freejointvars[i][0].name
code += ' _i%s[0] = -1; _i%s[1] = -1; _n%s = 0; '%(name,name,name)
code += "\nfor(int dummyiter = 0; dummyiter < 1; ++dummyiter) {\n"
code += " solutions.Clear();\n"
return code
def getFKFunctionPreamble(self):
code = "/// solves the forward kinematics equations.\n"
code += "/// \\param pfree is an array specifying the free joints of the chain.\n"
code += "IKFAST_API void ComputeFk(const IkReal* j, IkReal* eetrans, IkReal* eerot) {\n"
return code
def generateChain(self, node):
self.freevars = []
self.freevardependencies = []
self.resetequations()
self.symbolgen = cse_main.numbered_symbols('x')
code = ''
if node.Tfk:
code += self.getFKFunctionPreamble()
allvars = node.solvejointvars + node.freejointvars
subexprs,reduced_exprs=customcse (node.Tfk[0:3,0:4].subs([(v[0],Symbol('j[%d]'%v[1])) for v in allvars]),self.symbolgen)
outputnames = ['eerot[0]','eerot[1]','eerot[2]','eetrans[0]','eerot[3]','eerot[4]','eerot[5]','eetrans[1]','eerot[6]','eerot[7]','eerot[8]','eetrans[2]']
fcode = ''
if len(subexprs) > 0:
vars = [var for var,expr in subexprs]
fcode = 'IkReal ' + ','.join(str(var) for var,expr in subexprs) + ';\n'
for var,expr in subexprs:
fcode += self.writeEquations(lambda k: str(var),collect(expr,vars))
for i in range(len(outputnames)):
fcode += self.writeEquations(lambda k: outputnames[i],reduced_exprs[i])
code += self.indentCode(fcode,4)
code += '}\n\n'
code += self.getClassInit(node,IkType.Transform6D)
code += self.GetIkFunctionPreamble(node)
fcode = ''
for i in range(len(node.freejointvars)):
name = node.freejointvars[i][0].name
fcode += '%s=pfree[%d]; c%s=cos(pfree[%d]); s%s=sin(pfree[%d]);\n'%(name,i,name,i,name,i)
for i in range(3):
for j in range(3):
fcode += "r%d%d = eerot[%d*3+%d];\n"%(i,j,i,j)
fcode += "px = eetrans[0]; py = eetrans[1]; pz = eetrans[2];\n\n"
psymbols = ["new_px","new_py","new_pz"]
for i in range(3):
for j in range(3):
fcode += self.writeEquations(lambda k: "new_r%d%d"%(i,j),node.Tee[4*i+j].evalf())
fcode += self.writeEquations(lambda k: psymbols[i],node.Tee[4*i+3].evalf())
for i in range(3):
for j in range(3):
fcode += "r%d%d = new_r%d%d; "%(i,j,i,j)
fcode += "px = new_px; py = new_py; pz = new_pz;\n"
if node.dictequations is not None:
for var,value in node.dictequations:
fcode += self.writeEquations(lambda k: var,value)
fcode += self.generateTree(node.jointtree)
code += self.indentCode(fcode,4) + "}\nreturn solutions.GetNumSolutions()>0;\n}\n"
# write other functions
for name,functioncode in self.functions.iteritems():
code += self.indentCode(functioncode,4)
code += "};\n"
return code
def endChain(self, node):
return ""
def generateIKChainRotation3D(self, node):
self.freevars = []
self.freevardependencies = []
self.resetequations()
self.symbolgen = cse_main.numbered_symbols('x')
code = ''
if node.Rfk:
code += self.getFKFunctionPreamble()
allvars = node.solvejointvars + node.freejointvars
subexprs,reduced_exprs=customcse (node.Rfk[0:3,0:3].subs([(v[0],Symbol('j[%d]'%v[1])) for v in allvars]),self.symbolgen)
outputnames = ['eerot[0]','eerot[1]','eerot[2]','eerot[3]','eerot[4]','eerot[5]','eerot[6]','eerot[7]','eerot[8]']
fcode = ''
if len(subexprs) > 0:
vars = [var for var,expr in subexprs]
fcode = 'IkReal ' + ','.join(str(var) for var,expr in subexprs) + ';\n'
for var,expr in subexprs:
fcode += self.writeEquations(lambda k: str(var),collect(expr,vars))
for i in range(len(outputnames)):
fcode += self.writeEquations(lambda k: outputnames[i],reduced_exprs[i])
code += self.indentCode(fcode,4)
code += '}\n\n'
code += self.getClassInit(node,IkType.Rotation3D,usetranslation=0)
code += self.GetIkFunctionPreamble(node)
fcode = ''
for i in range(len(node.freejointvars)):
name = node.freejointvars[i][0].name
fcode += '%s=pfree[%d]; c%s=cos(pfree[%d]); s%s=sin(pfree[%d]);\n'%(name,i,name,i,name,i)
for i in range(3):
for j in range(3):
fcode += "r%d%d = eerot[%d*3+%d];\n"%(i,j,i,j)
for i in range(3):
for j in range(3):
fcode += self.writeEquations(lambda k: "new_r%d%d"%(i,j),node.Ree[i,j].evalf())
for i in range(3):
for j in range(3):
fcode += "r%d%d = new_r%d%d; "%(i,j,i,j)
fcode += '\n'
if node.dictequations is not None:
for var,value in node.dictequations:
fcode += self.writeEquations(lambda k: var,value)
fcode += self.generateTree(node.jointtree)
code += self.indentCode(fcode,4) + "}\nreturn solutions.GetNumSolutions()>0;\n}\n"
# write other functions
for name,functioncode in self.functions.iteritems():
code += self.indentCode(functioncode,4)
code += "};\n"
return code
def endIKChainRotation3D(self, node):
return ""
def generateIKChainTranslation3D(self, node):
self.freevars = []
self.freevardependencies = []
self.resetequations()
self.symbolgen = cse_main.numbered_symbols('x')
code = ''
if node.Pfk:
code += self.getFKFunctionPreamble()
allvars = node.solvejointvars + node.freejointvars
allsubs = [(v[0],Symbol('j[%d]'%v[1])) for v in allvars]
eqs = []
for eq in node.Pfk[0:3]:
eqs.append(eq.subs(allsubs))
subexprs,reduced_exprs=customcse (eqs,self.symbolgen)
outputnames = ['eetrans[0]','eetrans[1]','eetrans[2]']
if node.uselocaltrans:
fcode = """
// necessary for local/global translation3d
eerot[0] = eerot[4] = eerot[8] = 0;
IkReal r00 = 0, r11 = 0, r22 = 0;
"""
else:
fcode = ''
if len(subexprs) > 0:
vars = [var for var,expr in subexprs]
fcode += 'IkReal ' + ','.join(str(var) for var,expr in subexprs) + ';\n'
for var,expr in subexprs:
fcode += self.writeEquations(lambda k: str(var),collect(expr,vars))
for i in range(len(outputnames)):
fcode += self.writeEquations(lambda k: outputnames[i],reduced_exprs[i])
code += self.indentCode(fcode,4)
code += '}\n\n'
if node.uselocaltrans:
code += self.getClassInit(node,IkType.TranslationLocalGlobal6D,userotation=7)
else:
code += self.getClassInit(node,IkType.Translation3D,userotation=0)
code += self.GetIkFunctionPreamble(node)
fcode = ''
for i in range(len(node.freejointvars)):
name = node.freejointvars[i][0].name
fcode += '%s=pfree[%d]; c%s=cos(pfree[%d]); s%s=sin(pfree[%d]);\n'%(name,i,name,i,name,i)
if node.uselocaltrans:
for i in range(3):
fcode += "r%d%d = eerot[%d];\n"%(i,i,4*i)
fcode += "px = eetrans[0]; py = eetrans[1]; pz = eetrans[2];\n\n"
psymbols = ["new_px","new_py","new_pz"]
for i in range(3):
fcode += self.writeEquations(lambda k: psymbols[i],node.Pee[i].evalf())
fcode += "px = new_px; py = new_py; pz = new_pz;\n"
if node.dictequations is not None:
for var,value in node.dictequations:
fcode += self.writeEquations(lambda k: var,value)
fcode += self.generateTree(node.jointtree)
code += self.indentCode(fcode,4) + "}\nreturn solutions.GetNumSolutions()>0;\n}\n"
# write other functions
for name,functioncode in self.functions.iteritems():
code += self.indentCode(functioncode,4)
code += "};\n"
return code
def endIKChainTranslation3D(self, node):
return ""
| |
<reponame>moule3053/mck8s<gh_stars>10-100
import kopf
import yaml, pandas as pd
from utils import findPossibleClusters, getFogAppLocations, getCloudCluster, \
createDeployment, createService, deleteDeployment, deleteService, patchDeployment, patchService, createJob, \
deleteJob, patchJob, getMaximumReplicas, findNearestClusters, getAllocatableCapacity, getFogAppClusters, getServiceClusters
import json
import time
# Create multi-cluster deployment
@kopf.on.create('fogguru.eu', 'v1', 'multiclusterdeployments')
def create_fn(body, spec, patch, **kwargs):
# Get info from multiclusterdeployments object
fogapp_name = body['metadata']['name']
fogapp_image = spec['template']['spec']['containers'][0]['image']
fogapp_replicas = spec['replicas']
fogapp_cpu_request = int(spec['template']['spec']['containers'][0]['resources']['requests']['cpu'][:-1])
#fogapp_cpu_limit = spec['template']['spec']['containers']['resources']['limits']['cpu']
fogapp_memory_request = int(spec['template']['spec']['containers'][0]['resources']['requests']['memory'][:-2])
#fogapp_memory_limit = spec['template']['spec']['containers']['resources']['limits']['memory']
#fogapp_type = spec['appType']
#fogapp_type = body['kind']
spec_text = str(spec)
# Make sure image is provided
if not fogapp_image:
raise kopf.HandlerFatalError(f"Image must be set. Got {fogapp_image}.")
if not fogapp_replicas:
raise kopf.HandlerFatalError(f"Number of replicas must be set. Got {fogapp_replicas}.")
# Get namespace
if 'namespace' in body['metadata']:
fogpapp_namespace = body['metadata']['namespace']
else:
fogpapp_namespace = "default"
# Placement policy specified by user
if 'placementPolicy' in spec:
placement_policy = spec['placementPolicy']
else: # Default placement policy is most_traffic
placement_policy = 'most_traffic'
if 'locations' in spec:
placement_policy = 'cluster_affinity'
print("The provided placement policy is ..........", placement_policy)
if 'numberOfLocations' in spec:
clusters_qty = spec['numberOfLocations']
else:
clusters_qty = 1
eligible_clusters = []
if 'locations' not in spec:
mode = 'create'
fogapp_locations = getFogAppLocations(fogapp_name, fogpapp_namespace, fogapp_cpu_request, fogapp_memory_request, fogapp_replicas, clusters_qty, placement_policy, mode)
total_replicas = clusters_qty * fogapp_replicas
if len(fogapp_locations) != 0:
eligible_clusters = []
for cluster in fogapp_locations:
if cluster['max_replicas'] > fogapp_replicas:
cluster['replicas'] = fogapp_replicas
cluster['overflow'] = 0
else:
cluster['replicas'] = cluster['max_replicas']
cluster['overflow'] = fogapp_replicas - cluster['max_replicas']
total_overflow = 0
for cluster in fogapp_locations[:clusters_qty]:
dict = {}
dict['name'] = cluster['name']
dict['replicas'] = cluster['replicas']
eligible_clusters.append(dict)
total_overflow += cluster['overflow']
print("Total overflow ...........", total_overflow)
if total_overflow > 0:
for cluster in fogapp_locations[clusters_qty:]:
if cluster['max_replicas'] > total_overflow:
dict = {}
dict['name'] = cluster['name']
dict['replicas'] = total_overflow
total_overflow = 0
eligible_clusters.append(dict)
break
else:
dict = {}
dict['name'] = cluster['name']
dict['replicas'] = cluster['max_replicas']
total_overflow = total_overflow - dict['replicas']
eligible_clusters.append(dict)
if total_overflow > 0:
for cluster in eligible_clusters:
if 'cloud' in cluster['name']:
cluster['replicas'] += total_overflow
total_overflow = 0
print("Final list of clusters .................", eligible_clusters)
print("Final overflow .................", total_overflow)
if total_overflow > 0:
dict = {}
dict['message'] = 'to_cloud'
dict['replicas'] = total_overflow
patch.status['message'] = dict
raise kopf.TemporaryError("Fog clusters not sufficient to run the app. Provisioning cloud cluster.....................",
delay=30)
else:
dict = {}
dict['message'] = 'to_cloud'
dict['replicas'] = fogapp_replicas
patch.status['message'] = dict
raise kopf.TemporaryError(
"No clusters found at the fog level. Provisioning cloud cluster.....................",
delay=30)
else:
input_clusters = spec['locations'].split(",")
fogapp_locations = []
for location in input_clusters:
fogapp_locations.append(location.strip())
print("Input list of cluster ....", fogapp_locations)
clusters_qty = len(fogapp_locations)
if 'replicaOverrides' in spec:
replicas_list = []
override_replicas = {}
if isinstance(spec['replicaOverrides'], str):
replicas = spec['replicaOverrides'].split(",")
for i in replicas:
replicas_list.append(i.strip())
elif isinstance(spec['replicaOverrides'], list):
replicas_list = spec['replicaOverrides']
print("Replica overrides ............", spec['replicaOverrides'])
for i in range(0, len(fogapp_locations)):
override_replicas[fogapp_locations[i]] = replicas_list[i]
else:
override_replicas = {}
for i in range(0, len(fogapp_locations)):
override_replicas[fogapp_locations[i]] = fogapp_replicas
total_replicas = 0
for replica in list(override_replicas.values()):
total_replicas += int(replica)
print("Total number of replicas .....", total_replicas)
fog_only_clusters = []
for cluster in fogapp_locations:
if 'cloud' not in cluster:
fog_only_clusters.append(cluster)
print("Fog only clusters ..............", fog_only_clusters)
# Compute cloud replicas
cloud_replicas = 0
for cluster in fogapp_locations:
if 'cloud' in cluster:
cloud_replicas += int(override_replicas[cluster])
if len(fog_only_clusters) > 0:
possible_clusters = findPossibleClusters(fog_only_clusters, fogapp_cpu_request, fogapp_memory_request)
else:
possible_clusters = []
print("Initial possible clusters list ............", possible_clusters)
# if node of the fog clusters have the right sized nodes
if len(possible_clusters) == 0:
eligible_clusters = []
eligible_replicas = []
cloud_cluster = getCloudCluster()
if 'cloud' in cloud_cluster:
dict = {}
dict['name'] = cloud_cluster
dict['replicas'] = total_replicas
dict['overflow'] = 0
eligible_clusters.append(dict)
#eligible_clusters.append(cloud_cluster)
#eligible_replicas.append(total_replicas)
else:
dict = {}
dict['message'] = 'to_cloud'
dict['replicas'] = total_replicas
patch.status['message'] = dict
raise kopf.TemporaryError("The application could not be scheduled on the Fog elevel. Need cloud cluster.",
delay=30)
#print("Initial eligible clusters and replicas 1111", eligible_clusters, eligible_replicas)
print("Initial eligible clusters and replicas 1111", eligible_clusters)
else:
fogapp_locations.sort()
possible_clusters.sort()
override_replicas_new = {}
# Check if possible clusters different from input clusters and assign replicas to possible replicas
# Distribute cloud replicas
for i in range(0, len(possible_clusters)):
if possible_clusters[i] in fogapp_locations:
override_replicas_new[possible_clusters[i]] = int(override_replicas[possible_clusters[i]]) + int((cloud_replicas / len(possible_clusters)))
else:
override_replicas_new[possible_clusters[i]] = int(list(override_replicas.values())[i]) + int((cloud_replicas / len(possible_clusters)))
for cluster in possible_clusters:
replicas = int(override_replicas_new[cluster])
# is_eligible = checkClusterEligibility(cluster, app_cpu_request, app_memory_request, replicas)
# The maximum number of replicas the cluster can host
maximum_replicas = getMaximumReplicas(cluster, fogapp_cpu_request, fogapp_memory_request)
#maximum_replicas = getAllocatableCapacity(cluster, fogapp_cpu_request, fogapp_memory_request)
if maximum_replicas > replicas:
dict = {}
dict['name'] = cluster
dict['max_replicas'] = maximum_replicas
dict['replicas'] = replicas
dict['overflow'] = 0
eligible_clusters.append(dict)
else:
dict = {}
dict['name'] = cluster
dict['max_replicas'] = maximum_replicas
dict['replicas'] = maximum_replicas
dict['overflow'] = replicas - maximum_replicas
eligible_clusters.append(dict)
temp_list = []
for cluster in eligible_clusters:
temp_list.append(cluster)
print("Possible list of clusters and oveflow ....", temp_list)
temp_list_2 = []
for cluster in temp_list:
temp_list_2.append(cluster['name'])
temp_list_3 = list(set(fogapp_locations + temp_list_2))
total_overflow = 0
for cluster in temp_list:
total_overflow += cluster['overflow']
maximum_replicas = {}
for cluster in temp_list:
nearest_clusters = []
overflow = cluster['overflow']
#leftover = overflow
print("Overflow from ", cluster, overflow)
if overflow > 0:
nearest_clusters = findNearestClusters(cluster, temp_list_3)
print("List of nearest clusters ....", nearest_clusters)
# Distribute overflow to nearest clusters
if len(nearest_clusters) > 0:
for c in nearest_clusters:
# print("Overflow .................", overflow)
# if overflow > 0:
maximum_replicas[c] = getMaximumReplicas(c, fogapp_cpu_request, fogapp_memory_request)
print("Maximum replicas .....", maximum_replicas)
for cluster in temp_list:
nearest_clusters = []
overflow = cluster['overflow']
if overflow > 0:
nearest_clusters = findNearestClusters(cluster, temp_list_3)
# else:
# break
if len(nearest_clusters) > 0:
for c in nearest_clusters:
if cluster['overflow'] > 0:
if maximum_replicas[c] == 0:
cluster['overflow'] = cluster['overflow']
#break
elif maximum_replicas[c] > cluster['overflow']:
dict = {}
dict['name'] = c
dict['replicas'] = cluster['overflow']
dict['overflow'] = 0
eligible_clusters.append(dict)
maximum_replicas[c] = maximum_replicas[c] - cluster['overflow']
cluster['overflow'] = 0
#break
else:
dict = {}
dict['name'] = c
dict['replicas'] = maximum_replicas[c]
dict['overflow'] = 0
cluster['overflow'] = cluster['overflow'] - maximum_replicas[c]
eligible_clusters.append(dict)
maximum_replicas[c] = 0
# Group clusters and replicas
eligible_clusters = (pd.DataFrame(eligible_clusters)
.groupby(['name'], as_index=False)
.agg({'replicas': 'sum', 'overflow': 'sum'})
.to_dict('r'))
print("Preliminary list of eligible clusters ...", eligible_clusters)
# Compute leftover to be deployed on cloud cluster
leftover = 0
for cluster in eligible_clusters:
if cluster['overflow'] > 0:
leftover += cluster['overflow']
if leftover > 0:
for cluster in fogapp_locations:
if 'cloud' in cluster:
dict = {}
dict['name'] = cluster
dict['replicas'] = leftover
dict['overflow'] = 0
eligible_clusters.append(dict)
leftover = 0
print("Eligible clusters including cloud ...........", eligible_clusters)
if len(eligible_clusters) == 0:
dict = {}
dict['message'] = 'to_cloud'
dict['replicas'] = total_replicas
patch.status['message'] = dict
raise kopf.TemporaryError(
"The application could not be scheduled on the Fog elevel. Need cloud cluster.",
delay=30)
else:
if leftover > 0:
cloud_cluster = getCloudCluster()
if 'cloud' in cloud_cluster:
dict = {}
dict['name'] = cloud_cluster
dict['replicas'] = leftover
dict['overflow'] = 0
eligible_clusters.append(dict)
leftover = 0
print("Eligible clusters including cloud ...........", eligible_clusters)
else:
dict = {}
dict['message'] = 'to_cloud'
dict['replicas'] = leftover
patch.status['message'] = dict
raise kopf.TemporaryError(
"The application could not be scheduled on the Fog level. Need cloud cluster.",
delay=30)
for cluster in eligible_clusters:
if cluster['replicas'] == 0:
eligible_clusters.remove(cluster)
print("Final list of eligible clusters ...", eligible_clusters)
temp_list = []
for cluster in eligible_clusters:
temp_list.append(cluster)
eligible_clusters = []
eligible_replicas = []
print("Deploy temp list ,,,,,,,,,,,,,,,,,,", temp_list)
for cluster in temp_list:
eligible_clusters.append(cluster['name'])
eligible_replicas.append(cluster['replicas'])
# For the spec file
deployment_template = "{'apiVersion': 'apps/v1', 'kind': 'Deployment', 'metadata': {'name': '" + fogapp_name + "', 'namespace': '" + fogpapp_namespace +"'}, 'spec': "
deployment_json = deployment_template + spec_text + "}"
deployment_text = deployment_json.replace("'", "\"")
deployment_body = json.loads(deployment_text)
i = 0
for cluster in eligible_clusters:
# Update replicas per cluster
deployment_body['spec']['replicas'] = eligible_replicas[i]
createDeployment(cluster, deployment_body, fogpapp_namespace)
i += 1
dict = {}
dict['message'] = 'provisioned'
dict['replicas'] = eligible_replicas
patch.status['message'] = dict
# TO DO: per cluster overrides
return {'fogapp_name': fogapp_name, 'fogapp_namespace': fogpapp_namespace, 'input_clusters': fogapp_locations, 'input_replicas': fogapp_replicas, 'fogapp_replicas': eligible_replicas, 'fogapp_locations': eligible_clusters, 'fogapp_status': 'provisioned'}
# Update or patch initial placement (e.g., change number of replicas, Docker image for the app, locations, etc.)
@kopf.on.update('fogguru.eu', 'v1', 'multiclusterdeployments')
def update_fn(spec, status, body, namespace, logger, patch, **kwargs):
# TO DO: the case of an multiclusterdeployment which failed initially
# Update doesn't work since the child objects could not be found
# In such a case may | |
<reponame>tcsvn/pyadlml<filename>pyadlml/model_selection.py
import numbers
import time
import warnings
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from contextlib import suppress
from functools import partial
from traceback import format_exc
import numpy as np
import pandas as pd
from joblib import Parallel, delayed, logger
from itertools import product
from scipy.stats.mstats_basic import rankdata
from sklearn.base import MetaEstimatorMixin, BaseEstimator, is_classifier, clone, _is_pairwise
from sklearn.exceptions import NotFittedError, FitFailedWarning
from sklearn.metrics import check_scoring
from sklearn.metrics._scorer import _check_multimetric_scoring, _MultimetricScorer
from sklearn.model_selection import check_cv
from sklearn.model_selection._search import _check_param_grid, ParameterGrid, _normalize_score_results
from sklearn.model_selection._validation import _aggregate_score_dicts, _score
from sklearn.utils import _deprecate_positional_args, _message_with_time, _safe_indexing
from sklearn.utils.fixes import MaskedArray
from sklearn.utils.metaestimators import if_delegate_has_method# ,_safe_split
from sklearn.utils.validation import check_is_fitted, indexable, _check_fit_params, _num_samples
from pyadlml.dataset import TIME, START_TIME, END_TIME
from pyadlml.pipeline import EvalOnlyWrapper, TrainOnlyWrapper, Pipeline
from pyadlml.preprocessing import TrainSubset, TestSubset, CVSubset
def train_test_split(df_devs, df_acts, split='leave_one_day_out', return_day=False):
"""
Parameters
----------
df_devs : pd.DataFrame
todo
df_acts : pd.DataFrame
todo
split : str of {'leave_one_day_out', 'default'}, default='leave_one_day_out'
determines what
return_day : bool, default=False
when true, return the as fifth argument the day that was left out
Returns
-------
X_train, X_test, y_train, y_test : all pd.DataFrames
"""
rnd_day = _get_rnd_day(df_devs)
idx_X_train, idx_X_test = _split_devs(df_devs, rnd_day)
idx_y_train, idx_y_test = _split_acts(df_acts, rnd_day)
y_train = df_acts.iloc[idx_y_train,:]
y_test = df_acts.iloc[idx_y_test,:]
X_train = df_devs.iloc[idx_X_train,:]
X_test = df_devs.iloc[idx_X_test,:]
if return_day:
return X_train, X_test, y_train, y_test, [rnd_day, rnd_day + pd.Timedelta('1D')]
else:
return X_train, X_test, y_train, y_test
def _get_rnd_day(df_devs, retain_other_days=False):
""" Generate indices to split data into training and test set.
Parameters
----------
X : pd.DataFrame
with timeindex
retain_other_days : bool, default=False
determines whether all other days except for the random day are returned to
Returns
-------
"""
# get all days
days = list(df_devs[TIME].dt.floor('d').value_counts().index)
# select uniformly a random day
rnd_idx = np.random.randint(0, high=len(days)-1)
rnd_day = days[rnd_idx]
if retain_other_days:
return rnd_day, days.pop(rnd_idx)
else:
return rnd_day
def _split_devs(df_devs, rnd_day):
# get indicies of all data for that day and the others
rnd_dayp1 = rnd_day + pd.Timedelta('1D')
mask = (rnd_day < df_devs[TIME]) & (df_devs[TIME] < rnd_dayp1)
idxs_test = df_devs[mask].index.values
idxs_train = df_devs[~mask].index.values
return idxs_train, idxs_test
def _split_acts(df_acts, rnd_day):
# get indicies of all data for that day and the others
rnd_dayp1 = rnd_day + pd.Timedelta('1D')
mask_test = (rnd_day < df_acts[END_TIME]) & (df_acts[START_TIME] < rnd_dayp1)
mask_train = (df_acts[START_TIME] < rnd_day) | (rnd_dayp1 < df_acts[END_TIME])
idxs_test = df_acts[mask_test].index.values
idxs_train = df_acts[mask_train].index.values
return idxs_train, idxs_test
from sklearn.model_selection import TimeSeriesSplit as SklearnTSSplit, KFold as SklearnKFold
class KFold(SklearnKFold):
"""
the same class as sklearn KFold but ignores the y labels when split is called
"""
def split(self, X, y=None, groups=None):
return list(SklearnKFold.split(self, X, None, groups))
class TimeSeriesSplit(SklearnTSSplit):
"""
Parameters
----------
n_splits : int, default=5
number of splits. Must be at least 2.
max_train_size : int, default=None
Maximum size for a single training set.
test_size : int, default=None
Used to limit the size of the test set. Defaults to n_samples // (n_splits + 1), which is the maximum allowed value with gap=0.
gap : int, default=0
Number of samples to exclude from the end of each train set before the test set.
return_timestamps : bool, default=False
When true timestamp intervals are returned rather than indicies. This is
useful whenever data is upscaled or downscaled as the indicies in the testset c
can not be known beforehand.
epsilon : str, default='5ms'
the offset that is used to pad before the first and after the last interval for
the timestamps. Has only an effect if *return_timestamps* is set to *true*
time_based_split : bool, default=False
If set, the splits are made based on the time rather than on the datapoints. This
allows for rescaling of the data and applying the split afterwards.
window_type : str one of [sliding_window, expanding_window], default='expanding_window'
uses either TODO approach or TODO
https://eng.uber.com/forecasting-introduction/
Examples
--------
>>> import os
"""
def __init__(self, return_timestamp=False, epsilon='5ms', time_based_split=False, window_type='expanding_window', **kwargs):
SklearnTSSplit.__init__(self, **kwargs)
self.return_timestamp = return_timestamp
self.eps = pd.Timedelta(epsilon)
self.time_based_split = time_based_split
self.window_type = window_type
def split(self, X, y=None, groups=None):
if not self.time_based_split:
ts_generator = list(SklearnTSSplit.split(self, X, y, groups))
if not self.return_timestamp:
return ts_generator
else:
lst = []
for (train_idx, val_idx) in ts_generator:
val_st = X.iloc[val_idx[0]][TIME] - self.eps
val_et = X.iloc[val_idx[-1]][TIME] + self.eps
train_st = X.iloc[train_idx[0]][TIME] - self.eps
train_et = X.iloc[train_idx[-1]][TIME] + self.eps
lst.append(
((train_st, train_et), (val_st, val_et))
)
return lst
else:
# create time_range from first device to last device
start = X[TIME].iloc[0]
end = X[TIME].iloc[-1]
rng = end - start # pd.Timedelta
test_size = rng / (self.n_splits + 1)
train_end = end - test_size * self.n_splits
lst = []
for i in range(0, self.n_splits):
train_st = start - self.eps
train_et = train_end
val_st = train_end
val_et = val_st + test_size + self.eps
train_end += test_size
if self.return_timestamp:
lst.append(((train_st, train_et), (val_st, val_et)))
else:
train_idx = X[(train_st < X[TIME]) & (X[TIME] < train_et)].index.values
test_idx = X[(val_st < X[TIME]) & (X[TIME] < val_et)].index.values
lst.append((train_idx, test_idx))
return lst
class LeaveKDayOutSplit():
""" LeaveKDayOut cross-validator
Provides train/test indices to split data in train/test sets. Split
dataset into one day out folds.
Read more in the :ref:`User Guide <leave_one_day_out>`
Parameters
----------
k : int, default=1
The number of days to use for the test set.
n_splits : int, default=1
The number of splits. All splits are exclusive, meaning there will not be more t TODO
return_timestamps : bool, default=False
When true timestamp intervals are returned rather than indicies. This is
useful whenever data is upscaled or downscaled as the indicies in the testset c
can not be known beforehand.
epsilon : str, default='5ms'
the offset that is used to pad before the first and after the last interval for
the timestamps. Has only an effect if *return_timestamps* is set to *true*
scale_by_time : bool, default=False
If set, the splits are made based on the time rather than on the datapoints. This
allows for rescaling of the data and applying the split afterwards.
Examples
--------
>>> import os
"""
def __init__(self, k=1, n_splits=1, return_timestamps=False, epsilon='5ms'):
self.n_splits = n_splits
self.k = k
self.return_timestamp = return_timestamps
self.eps = pd.Timedelta(epsilon)
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set. This 'groups' parameter must always be specified to
calculate the number of splits, though the other parameters can be
omitted.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
def split(self, X=None, y=None, groups=None):
""" Generate indices to split data into training and test set.
Parameters
----------
X : pd.DataFrame
device dataframe
y : pd.Series
activity dataframe
Returns
-------
splits : list
Returns tuples of splits of train and test sets
example: [(train1, test1), ..., (trainn, testn)]
"""
X = X.copy()
days = np.array(list(X[TIME].dt.floor('d').value_counts().sort_index().index))
N = len(days)
res = []
for i in range(N-self.n_splits+1):
idxs_test = list(range(i, self.n_splits+i))
idxs_train =[i for i in range(N) if i not in idxs_test]
test_days = days[idxs_test]
train_days = days[idxs_train]
res.append((train_days, test_days))
return res
from sklearn.model_selection._search import BaseSearchCV as SklearnBaseSearchCV
class BaseSearchCV(SklearnBaseSearchCV):
"""Abstract base class for hyper parameter search with cross-validation.
"""
@abstractmethod
@_deprecate_positional_args
def __init__(self, estimator, *, scoring=None, n_jobs=None,
online_train_val_split=False,
refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score=np.nan,
return_train_score=True):
SklearnBaseSearchCV.__init__(self, estimator=estimator, scoring=scoring, n_jobs=n_jobs,
refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score,
return_train_score=return_train_score)
self.online_train_val_split = online_train_val_split
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples, n_output) \
or (n_samples,), default=None
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
"""
self._check_is_fitted('score')
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if isinstance(self.scorer_, dict):
if self.multimetric_:
scorer = self.scorer_[self.refit]
else:
scorer = self.scorer_
return scorer(self.best_estimator_, X, y)
# callable
score = self.scorer_(self.best_estimator_, X, | |
"""
Linux on Hyper-V and Azure Test Code, ver. 1.0.0
Copyright (c) Microsoft Corporation
All rights reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
See the Apache Version 2.0 License for specific language governing
permissions and limitations under the License.
"""
import os
import time
import logging
from utils import constants
from boto import ec2
from boto import vpc
from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
from utils.cmdshell import SSHClient
from dateutil import parser
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%y/%m/%d %H:%M:%S', level=logging.INFO)
log = logging.getLogger(__name__)
class AWSConnector:
"""
AWS EC2 connector that uses boto plugin.
"""
def __init__(self, keyid=None, secret=None, imageid=None, instancetype=None, user=None,
localpath=None, region=None, zone=None):
"""
Init AWS connector to create and configure AWS ec2 instances.
:param keyid: user key for executing remote connection
http://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html
:param secret: user secret for executing remote connection
:param imageid: AMI image id from EC2 repo
:param instancetype: instance flavor constituting resources
:param user: remote ssh user for the instance
:param localpath: localpath where the logs should be downloaded, and the
default path for other necessary tools
:param region: EC2 region to connect to
:param zone: EC2 zone where other resources should be available
"""
self.keyid = keyid
self.secret = secret
self.imageid = imageid
self.instancetype = instancetype
self.user = user
self.localpath = localpath
self.host_key_file = os.path.join(self.localpath, 'known_hosts')
if not region:
self.region = 'eu-west-1'
else:
self.region = region
if not zone:
self.zone = self.region + 'c'
else:
self.zone = zone
self.volume_type = {'ssd_gp2': 'gp2',
'ssd_io1': 'io1'}
self.key_name = 'test_ssh_key'
self.group_name = 'test_sec_group'
self.conn = None
self.security_group = None
self.vpc_conn = None
self.vpc_zone = None
self.subnet = None
self.elastic_ips = []
self.instances = []
self.ebs_vols = []
self.latestimage = None
self.device_map = BlockDeviceMapping()
def ec2_connect(self, region=None):
"""
Obtain the EC2 connector by authenticating. This also creates the
keypair and security group for the instance.
:param region: region to connect to (optional, defaults to eu-west1)
"""
self.conn = ec2.connect_to_region(region or self.region, aws_access_key_id=self.keyid,
aws_secret_access_key=self.secret)
self.create_key_pair(self.conn)
self.create_security_group(self.conn)
def ec2_create_vm(self, user_data=None):
"""
Create an EC2 instance.
:param user_data: routines to be executed upon spawning the instance
:return: EC2Instance object
"""
reservation = self.conn.run_instances(self.imageid, key_name=self.key_name,
instance_type=self.instancetype, placement=self.zone,
security_groups=[self.group_name],
user_data=user_data)
instance = reservation.instances[0]
time.sleep(5)
self.wait_for_state(instance, 'state', 'running')
# artificial wait for public ip
time.sleep(5)
instance.update()
log.info('Created instance: {}'.format(instance.id))
self.instances.append(instance)
return instance
def connect(self, region=None):
"""
Obtain the VPC EC2 connector by authenticating. This also creates the
keypair and security group for the instance.
:param region: region to connect to (optional, defaults to eu-west1)
"""
self.vpc_conn = vpc.connect_to_region(region or self.region, aws_access_key_id=self.keyid,
aws_secret_access_key=self.secret)
self.vpc_zone = self.vpc_conn.create_vpc('10.10.0.0/16')
self.vpc_conn.modify_vpc_attribute(self.vpc_zone.id, enable_dns_support=True)
self.vpc_conn.modify_vpc_attribute(self.vpc_zone.id, enable_dns_hostnames=True)
gateway = self.vpc_conn.create_internet_gateway()
self.vpc_conn.attach_internet_gateway(gateway.id, self.vpc_zone.id)
route_table = self.vpc_conn.create_route_table(self.vpc_zone.id)
self.subnet = self.vpc_conn.create_subnet(self.vpc_zone.id, '10.10.10.0/24',
availability_zone=self.zone)
self.vpc_conn.associate_route_table(route_table.id, self.subnet.id)
self.vpc_conn.create_route(route_table.id, '0.0.0.0/0', gateway.id)
self.create_security_group(self.vpc_conn, vpc_id=self.vpc_zone.id)
self.create_key_pair(self.vpc_conn)
self.latestimage = self.newest_image(self.vpc_conn, os_type = self.imageid)
def newest_image(self, conn, os_type = None):
filters = {}
if os_type == 'ubuntu_1604':
filters={'name':'ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server*', 'root_device_type':'ebs', 'owner-id':'099720109477'}
log.info("ubuntu_1604")
if os_type == 'ubuntu_1804':
if self.instancetype == "m6g.4xlarge" or self.instancetype == "a1.4xlarge" or self.instancetype == "a1.metal":
filters={'name':'ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-arm64-server*', 'root_device_type':'ebs', 'owner-id':'099720109477'}
else:
filters={'name':'ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server*', 'root_device_type':'ebs', 'owner-id':'099720109477'}
log.info("ubuntu_1804")
elif os_type == 'amazon_linux':
filters={'name':'amzn-ami-hvm-*-x86_64-gp2', 'architecture': 'x86_64','root_device_type':'ebs'}
log.info("amazon_linux")
elif os_type == 'amazon_linux_gpu':
filters={'name':'Deep Learning AMI (Amazon Linux) Version*', 'architecture': 'x86_64','root_device_type':'ebs'}
log.info("amazon_linux_gpu")
else:
log.info("os_type {} not support".format(os_type))
return
images = conn.get_all_images(filters=filters)
filters_images = []
for image in images:
if image.platform != 'windows' and "test" not in image.name:
filters_images.append(image)
latest = None
for image in filters_images:
if not latest:
latest = image
continue
if parser.parse(image.creationDate) > parser.parse(latest.creationDate):
latest = image
root_device_name = latest.root_device_name
if os_type == 'ubuntu_1604':
self.device_map[root_device_name] = BlockDeviceType(delete_on_termination = True, size = 30, volume_type = "gp2")
log.info("device_map ubuntu_1604")
if os_type == 'ubuntu_1804':
self.device_map[root_device_name] = BlockDeviceType(delete_on_termination = True, size = 30, volume_type = "gp2")
log.info("device_map ubuntu_1804")
elif os_type == 'amazon_linux':
self.device_map[root_device_name] = BlockDeviceType(delete_on_termination = True, size = 30, volume_type = "gp2")
log.info("device_map amazon_linux")
elif os_type == 'amazon_linux_gpu':
self.device_map[root_device_name] = BlockDeviceType(delete_on_termination = True, size = 75, volume_type = "gp2")
log.info("device_map amazon_linux_gpu")
else:
log.info("device_map {} not support".format(os_type))
return latest
def create_vm(self, user_data=None):
"""
Create a VPC EC2 instance.
:param user_data: routines to be executed upon spawning the instance
:return: EC2Instance object
"""
self.imageid = self.latestimage.id
log.info("Used image id {}".format(self.imageid))
log.info("Used image name {}".format(self.latestimage.name))
log.info("Used image creationDate {}".format(self.latestimage.creationDate))
reservation = self.vpc_conn.run_instances(self.imageid, key_name=self.key_name,
instance_type=self.instancetype,
block_device_map=self.device_map,
placement=self.zone,
security_group_ids=[self.security_group.id],
subnet_id=self.subnet.id, user_data=user_data)
instance = reservation.instances[0]
time.sleep(5)
self.wait_for_state(instance, 'state', 'running')
elastic_ip = self.vpc_conn.allocate_address(domain='vpc')
self.vpc_conn.associate_address(instance_id=instance.id,
allocation_id=elastic_ip.allocation_id)
self.elastic_ips.append(elastic_ip)
self.instances.append(instance)
# artificial wait for ip
time.sleep(5)
instance.update()
log.info('Created instance id: {}'.format(instance.id))
return instance
def create_key_pair(self, conn):
"""
Creates and saves a default key pair.
:param conn: EC2Connection
"""
try:
key_pair = conn.create_key_pair(self.key_name)
key_pair.save(self.localpath)
except conn.ResponseError as e:
if e.code == 'InvalidKeyPair.Duplicate':
log.info('Duplicate KeyPair {}'.format(self.key_name))
else:
raise
def create_security_group(self, conn, vpc_id=None):
"""
Creates a default security group without restrictions.
:param conn: EC2Connection
:param vpc_id: VPC id where to create the security group.
"""
cidr = '0.0.0.0/0'
try:
group = conn.create_security_group(self.group_name, 'All access', vpc_id=vpc_id)
self.security_group = group
group.authorize(ip_protocol='tcp', from_port=0, to_port=65535, cidr_ip=cidr)
group.authorize(ip_protocol='tcp', from_port=22, to_port=22, cidr_ip=cidr)
group.authorize(ip_protocol='udp', from_port=0, to_port=65535, cidr_ip=cidr)
group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1, cidr_ip=cidr)
except conn.ResponseError as e:
if e.code == 'InvalidGroup.Duplicate':
log.warning('Security Group: {} already exists.'.format(self.group_name))
elif e.code == 'InvalidPermission.Duplicate':
log.warning('Security Group: {} already authorized'.format(self.group_name))
else:
raise
def attach_disk(self, vm_instance, disk_size=10, volume_type=None, iops=None, device=None):
"""
Create and attach an EBS volume to a given instance.
:param vm_instance: Instance object to attach the volume to
:param disk_size: size in GB of the volume
:param volume_type: volume type: gp2 - SSD, st1 - HDD, sc1 - cold HDD;
defaults to magnetic disk
:param iops: IOPS to associate with this volume.
:param device: device mount location, defaults to '/dev/sdx'
:return: EBSVolume object
"""
conn = self.conn or self.vpc_conn
# Add EBS volume DONE
ebs_vol = conn.create_volume(disk_size, self.zone, volume_type=volume_type, iops=iops)
self.wait_for_state(ebs_vol, 'status', 'available')
if not device:
device = '/dev/sdx'
conn.attach_volume(ebs_vol.id, vm_instance.id, device=device)
self.ebs_vols.append(ebs_vol)
return ebs_vol
def enable_sr_iov(self, instance, ssh_client):
"""
Enable SR-IOV for a given instance.
:param instance: EC2Instance object
:param ssh_client: SSHClient
:return: SSHClient (needs to reconnect after reboot)
"""
conn = self.conn or self.vpc_conn
log.info('Enabling SR-IOV on {}'.format(instance.id))
if ssh_client:
util_path = os.path.dirname(os.path.realpath(__file__))
ssh_client.put_file(os.path.join(util_path, 'tests', 'enable_sr_iov.sh'),
'/tmp/enable_sr_iov.sh')
ssh_client.run('chmod +x /tmp/enable_sr_iov.sh')
ssh_client.run("sed -i 's/\r//' /tmp/enable_sr_iov.sh")
ssh_client.run('/tmp/enable_sr_iov.sh {}'.format(self.instancetype))
conn.stop_instances(instance_ids=[instance.id])
self.wait_for_state(instance, 'state', 'stopped')
if self.instancetype in [constants.AWS_P28XLARGE, constants.AWS_M416XLARGE]:
log.info('Enabling ENA for instance: {}'.format(self.instancetype))
import boto3
client = boto3.client('ec2', region_name=self.region, aws_access_key_id=self.keyid,
aws_secret_access_key=self.secret)
client.modify_instance_attribute(InstanceId=instance.id, Attribute='enaSupport',
Value='true')
try:
log.info(conn.get_instance_attribute(instance.id, 'enaSupport'))
except Exception as e:
log.info(e)
pass
# conn.modify_instance_attribute(instance.id, 'enaSupport', True)
# ena_status = conn.get_instance_attribute(instance.id, 'enaSupport')
# log.info('ENA status for {} instance: {}'.format(constants.AWS_P28XLARGE,
# ena_status))
elif self.instancetype == constants.AWS_D24XLARGE:
conn.modify_instance_attribute(instance.id, 'sriovNetSupport', 'simple')
sriov_status = conn.get_instance_attribute(instance.id, 'sriovNetSupport')
log.info("SR-IOV status is: {}".format(sriov_status))
else:
log.error('Instance type {} unhandled for SRIOV'.format(self.instancetype))
return None
conn.start_instances(instance_ids=[instance.id])
self.wait_for_state(instance, 'state', 'running')
return self.wait_for_ping(instance)
@staticmethod
def wait_for_state(obj, attr, state):
"""
Check when an AWS object attribute state is achieved.
:param obj: the AWS object to verify attribute status
:param attr: object attribute to be verified
:param state: attribute state to wait for
:return:
"""
log.info('Waiting for {} status {}'.format(str(obj), state))
while getattr(obj, attr) != state:
time.sleep(5)
obj.update()
def wait_for_ping(self, instance, user=None):
"""
To obtain the SSH client, we must wait for the instance to boot,
even the EC2 instance status is available.
:param instance: created ec2 instance to wait for
:param user: SSH user to use with the created key
:return: SSHClient or None on error
"""
ping_arg = '-n'
if os.name == 'posix':
ping_arg = '-c'
if not instance.public_dns_name:
log.error("Spawned instance was not allocated a public IP. Please try again.")
raise Exception("Spawned instance was not allocated a public IP. Please try again.")
ping_cmd = 'ping {} 1 {}'.format(ping_arg, instance.ip_address)
try:
timeout = 0
while os.system(ping_cmd) != 0 and timeout < 60:
time.sleep(10)
timeout += 10
# artificial wait for ssh service up status
time.sleep(60)
client = SSHClient(server=instance.ip_address, host_key_file=self.host_key_file,
user=user or self.user,
ssh_key_file=os.path.join(self.localpath, self.key_name + '.pem'))
except | |
stepper.finalize()
if i == 0 or i == 1 or i == 3 or i == 4:
self.assertAllClose(24.0, result[0])
self.assertAllClose(10.0, result[1][0])
self.assertAllClose(-4.0, result[1][1])
elif i == 2 or i == 5:
self.assertAllClose(24.0, result["e"])
self.assertAllClose(10.0, result["fz"]["f"])
self.assertAllClose(-4.0, result["fz"]["z"])
class StepperTestWithPlaceHolders(test_util.TensorFlowTestCase):
def setUp(self):
self.ph0 = array_ops.placeholder(dtypes.float32, shape=(2, 2), name="ph0")
self.ph1 = array_ops.placeholder(dtypes.float32, shape=(2, 1), name="ph1")
self.x = math_ops.matmul(self.ph0, self.ph1, name="x")
self.y = math_ops.add(self.x, self.ph1, name="y")
self.sess = session.Session()
def tearDown(self):
ops.reset_default_graph()
def testGetTensorValueWorksOnPlaceholder(self):
with NodeStepper(
self.sess,
self.y,
feed_dict={
self.ph0: [[1.0, 2.0], [-3.0, 5.0]],
self.ph1: [[-1.0], [0.5]]
}) as stepper:
self.assertAllClose([[1.0, 2.0], [-3.0, 5.0]],
stepper.get_tensor_value("ph0"))
self.assertAllClose([[1.0, 2.0], [-3.0, 5.0]],
stepper.get_tensor_value("ph0:0"))
with self.assertRaisesRegexp(
KeyError,
r"The name 'ph0:1' refers to a Tensor which does not exist"):
stepper.get_tensor_value("ph0:1")
def testIsPlaceholdersShouldGiveCorrectAnswers(self):
with NodeStepper(self.sess, self.y) as stepper:
self.assertTrue(stepper.is_placeholder(self.ph0.name))
self.assertTrue(stepper.is_placeholder(self.ph1.name))
self.assertFalse(stepper.is_placeholder(self.x.name))
self.assertFalse(stepper.is_placeholder(self.y.name))
with self.assertRaisesRegexp(ValueError,
"A is not in the transitive closure"):
self.assertFalse(stepper.is_placeholder("A"))
def testPlaceholdersShouldGiveCorrectAnswers(self):
with NodeStepper(self.sess, self.y) as stepper:
self.assertSetEqual({"ph0", "ph1"}, set(stepper.placeholders()))
def testContWithPlaceholders(self):
with NodeStepper(
self.sess,
self.y,
feed_dict={
self.ph0: [[1.0, 2.0], [-3.0, 5.0]],
self.ph1: [[-1.0], [0.5]]
}) as stepper:
self.assertEqual(4, len(stepper.sorted_nodes()))
self.assertSetEqual({"ph0:0", "ph1:0", "x:0", "y:0"},
set(stepper.closure_elements()))
result = stepper.cont(self.x)
self.assertAllClose([[0.0], [5.5]], result)
self.assertEqual({
"ph0:0": NodeStepper.FEED_TYPE_CLIENT,
"ph1:0": NodeStepper.FEED_TYPE_CLIENT,
}, stepper.last_feed_types())
self.assertEqual(["x:0"], stepper.handle_names())
self.assertSetEqual({"x"}, stepper.handle_node_names())
result = stepper.cont(self.y)
self.assertAllClose([[-1.0], [6.0]], result)
self.assertEqual({
"x:0": NodeStepper.FEED_TYPE_HANDLE,
"ph1:0": NodeStepper.FEED_TYPE_CLIENT,
}, stepper.last_feed_types())
def testAttemptToContToPlaceholderWithTensorFeedKeysShouldWork(self):
"""Continuing to a placeholder should be allowed, using client feed."""
ph0_feed = [[1.0, 2.0], [-3.0, 5.0]]
ph1_feed = [[-1.0], [0.5]]
with NodeStepper(
self.sess, self.y, feed_dict={
self.ph0: ph0_feed,
self.ph1: ph1_feed,
}) as stepper:
self.assertAllClose(ph0_feed, stepper.cont(self.ph0))
self.assertEqual({
self.ph0.name: NodeStepper.FEED_TYPE_CLIENT
}, stepper.last_feed_types())
self.assertAllClose(ph1_feed, stepper.cont(self.ph1))
self.assertEqual({
self.ph1.name: NodeStepper.FEED_TYPE_CLIENT
}, stepper.last_feed_types())
ph0_node = self.sess.graph.as_graph_element("ph0")
self.assertAllClose(ph0_feed, stepper.cont(ph0_node))
self.assertEqual({
self.ph0.name: NodeStepper.FEED_TYPE_CLIENT
}, stepper.last_feed_types())
self.assertAllClose([[-1.0], [6.0]], stepper.finalize())
def testAttemptToContToPlaceholderWithTensorNameFeedKeysShouldWork(self):
ph0_feed = [[1.0, 2.0], [-3.0, 5.0]]
ph1_feed = [[-1.0], [0.5]]
with NodeStepper(
self.sess,
self.y,
feed_dict={
self.ph0.name: ph0_feed,
self.ph1.name: ph1_feed,
}) as stepper:
self.assertAllClose(ph0_feed, stepper.cont(self.ph0))
self.assertEqual({
self.ph0.name: NodeStepper.FEED_TYPE_CLIENT
}, stepper.last_feed_types())
self.assertAllClose(ph1_feed, stepper.cont(self.ph1))
self.assertEqual({
self.ph1.name: NodeStepper.FEED_TYPE_CLIENT
}, stepper.last_feed_types())
ph0_node = self.sess.graph.as_graph_element("ph0")
self.assertAllClose(ph0_feed, stepper.cont(ph0_node))
self.assertEqual({
self.ph0.name: NodeStepper.FEED_TYPE_CLIENT
}, stepper.last_feed_types())
self.assertAllClose([[-1.0], [6.0]], stepper.finalize())
class StepperAssignAddTest(test_util.TensorFlowTestCase):
def setUp(self):
self.v = variables.Variable(10.0, name="v")
self.p = math_ops.add(self.v, self.v, name="p")
self.q = math_ops.multiply(self.p, self.p, name="q")
self.delta = constant_op.constant(2.0, name="delta")
self.v_add = state_ops.assign_add(self.v, self.delta, name="v_add")
self.v_add_plus_one = math_ops.add(self.v_add,
1.0,
name="v_add_plus_one")
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
constant_folding=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
config = config_pb2.ConfigProto(graph_options=graph_options)
self.sess = session.Session(config=config)
self.sess.run(self.v.initializer)
def tearDown(self):
ops.reset_default_graph()
def testLastUpdatedVariablesReturnsNoneBeforeAnyContCalls(self):
with NodeStepper(self.sess, [self.q, self.v_add]) as stepper:
self.assertIsNone(stepper.last_updated())
def testContToUpdateInvalidatesDumpedIntermediates(self):
with NodeStepper(self.sess, [self.q, self.v_add]) as stepper:
self.assertAllClose(400.0, stepper.cont("q:0"))
self.assertItemsEqual(["v/read:0", "p:0"],
stepper.intermediate_tensor_names())
self.assertAllClose(10.0, stepper.get_tensor_value("v/read:0"))
self.assertAllClose(20.0, stepper.get_tensor_value("p:0"))
self.assertAllClose(
12.0, stepper.cont(
self.v_add, invalidate_from_updated_variables=True))
self.assertAllClose(12.0, self.sess.run(self.v))
self.assertSetEqual({self.v.name}, stepper.last_updated())
self.assertItemsEqual(["v:0"], stepper.dirty_variables())
# Updating the value of v by calling v_add should have invalidated the
# dumped intermediate tensors for v/read:0 and p:0.
self.assertItemsEqual(["delta:0"], stepper.intermediate_tensor_names())
with self.assertRaisesRegexp(
ValueError,
r"This stepper instance does not have access to the value of tensor "
r"\"p:0\""):
stepper.get_tensor_value("p:0")
# The next cont to q should not have used any dumped intermediate tensors
# and its result should reflect the updated value.
self.assertAllClose(576.0, stepper.cont("q:0"))
self.assertSetEqual(set(), stepper.last_updated())
self.assertEqual({}, stepper.last_feed_types())
def testOverridingUpstreamTensorInvalidatesDumpedIntermediates(self):
with NodeStepper(self.sess, self.q) as stepper:
self.assertAllClose(400.0, stepper.cont("q:0"))
self.assertItemsEqual(["v/read:0", "p:0"],
stepper.intermediate_tensor_names())
self.assertAllClose(10.0, stepper.get_tensor_value("v/read:0"))
self.assertAllClose(20.0, stepper.get_tensor_value("p:0"))
stepper.override_tensor("v/read:0", 11.0)
self.assertItemsEqual(["v/read:0"], stepper.override_names())
# Overriding the upstream v/read:0 should have invalidated the dumped
# intermediate tensor for the downstream p:0.
self.assertItemsEqual([], stepper.intermediate_tensor_names())
# The next cont to q should not have used any dumped intermediate tensors
# and its result should reflect the overriding value.
self.assertAllClose(484.0, stepper.cont("q:0"))
self.assertEqual({
"v/read:0": NodeStepper.FEED_TYPE_OVERRIDE
}, stepper.last_feed_types())
def testRemovingOverrideToUpstreamTensorInvalidatesDumpedIntermediates(self):
with NodeStepper(self.sess, self.q) as stepper:
stepper.override_tensor("v/read:0", 9.0)
self.assertItemsEqual(["v/read:0"], stepper.override_names())
self.assertAllClose(324.0, stepper.cont(self.q))
self.assertItemsEqual(["p:0"], stepper.intermediate_tensor_names())
stepper.remove_override("v/read:0")
self.assertItemsEqual([], stepper.override_names())
# Removing the pre-existing override to v/read:0 should have invalidated
# the dumped intermediate tensor.
self.assertItemsEqual([], stepper.intermediate_tensor_names())
def testRepeatedCallsToAssignAddDoesNotUpdateVariableAgain(self):
with NodeStepper(self.sess, self.v_add) as stepper:
stepper.cont(self.v_add)
self.assertSetEqual({self.v.name}, stepper.last_updated())
self.assertAllClose(12.0, stepper.cont(self.v))
stepper.cont(self.v_add)
self.assertSetEqual(set(), stepper.last_updated())
self.assertEqual({"v_add:0": NodeStepper.FEED_TYPE_HANDLE},
stepper.last_feed_types())
self.assertAllClose(12.0, stepper.cont(self.v))
def testRepeatedCallsToAssignAddDownStreamDoesNotUpdateVariableAgain(self):
with NodeStepper(self.sess, self.v_add_plus_one) as stepper:
stepper.cont(self.v_add_plus_one)
self.assertSetEqual({self.v.name}, stepper.last_updated())
self.assertAllClose(12.0, stepper.cont(self.v))
stepper.cont(self.v_add_plus_one)
self.assertSetEqual(set(), stepper.last_updated())
self.assertEqual({"v_add_plus_one:0": NodeStepper.FEED_TYPE_HANDLE},
stepper.last_feed_types())
self.assertAllClose(12.0, stepper.cont(self.v))
class StepperBackwardRunTest(test_util.TensorFlowTestCase):
def setUp(self):
"""Test setup.
Structure of the forward graph:
f
| |
----- -----
| |
d e
| | | |
--- --------- ---
| | |
a b c
Construct a backward graph using the GradientDescentOptimizer.
"""
self.a = variables.Variable(1.0, name="a")
self.b = variables.Variable(2.0, name="b")
self.c = variables.Variable(4.0, name="c")
self.d = math_ops.multiply(self.a, self.b, name="d")
self.e = math_ops.multiply(self.b, self.c, name="e")
self.f = math_ops.multiply(self.d, self.e, name="f")
# Gradient descent optimizer that minimizes g.
gradient_descent.GradientDescentOptimizer(0.01).minimize(
self.f, name="optim")
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
constant_folding=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
config = config_pb2.ConfigProto(graph_options=graph_options)
self.sess = session.Session(config=config)
self.sess.run(variables.global_variables_initializer())
def tearDown(self):
ops.reset_default_graph()
def testContToUpdateA(self):
with NodeStepper(self.sess, "optim") as stepper:
result = stepper.cont("a:0")
self.assertAllClose(1.0, result)
self.assertEqual({}, stepper.last_feed_types())
result = stepper.cont("optim/learning_rate:0")
self.assertAllClose(0.01, result)
self.assertEqual({}, stepper.last_feed_types())
# Before any cont calls on ApplyGradientDescent, there should be no
# "dirty" variables.
self.assertEqual(set(), stepper.dirty_variables())
# First, all the two control inputs to optim.
result = stepper.cont("optim/update_a/ApplyGradientDescent",
invalidate_from_updated_variables=True)
# Now variable a should have been marked as dirty due to the update
# by optim/update_a/ApplyGradientDescent.
self.assertSetEqual({"a:0"}, stepper.last_updated())
self.assertEqual({"a:0"}, stepper.dirty_variables())
self.assertIsNone(result)
self.assertEqual({
"optim/learning_rate:0": NodeStepper.FEED_TYPE_HANDLE
}, stepper.last_feed_types())
# Check that Variable "a" has been updated properly, but "b", "c" and "d"
# remain the same.
# For backprop on Variable a:
# Because f = a * b * b * c, df / da = b * b * c.
# 1.0 - learning_rate * b * b * c
# = 1.0 - 0.01 * 2.0 * 2.0 * 4.0 = 0.84.
self.assertAllClose(0.84, self.sess.run(self.a))
self.assertAllClose(2.0, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
def testContToUpdateB(self):
with NodeStepper(self.sess, "optim") as stepper:
result = stepper.cont("optim/update_b/ApplyGradientDescent",
invalidate_from_updated_variables=True)
self.assertIsNone(result)
self.assertSetEqual({"b:0"}, stepper.last_updated())
self.assertEqual(set(["b:0"]), stepper.dirty_variables())
# For backprop on Variable b:
# Because f = a * b * b * c, df / da = 2 * a * b * c.
# 2.0 - learning_rate * 2 * a * b * c
# = 2.0 - 0.01 * 2 * 1.0 * 2.0 * 4.0 = 1.84
self.assertAllClose(1.0, self.sess.run(self.a))
self.assertAllClose(1.84, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
def testContAfterUpdateWithoutRestoringVariableValue(self):
with NodeStepper(self.sess, "optim") as stepper:
# First, update Variable a from 1.0 to 0.84.
result = stepper.cont(
"optim/update_a/ApplyGradientDescent",
invalidate_from_updated_variables=True,
restore_variable_values=True)
self.assertIsNone(result)
self.assertSetEqual({"a:0"}, stepper.last_updated())
self.assertEqual(set(["a:0"]), stepper.dirty_variables())
self.assertAllClose(0.84, self.sess.run(self.a))
self.assertAllClose(2.0, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
# Tracking of the updated variables should have invalidated all
# intermediate tensors downstream to a:0.
self.assertNotIn("a/read:0", stepper.intermediate_tensor_names())
self.assertNotIn("d:0", stepper.intermediate_tensor_names())
# Second, update Variable b without the default restore_variable_values.
result = stepper.cont(
"optim/update_b/ApplyGradientDescent", restore_variable_values=False)
self.assertIsNone(result)
# For the backprop on Variable b under the updated value of a:
# 2.0 - learning_rate * 2 * a' * b * c
# = 2.0 - 0.01 * 2 * 0.84 * 2.0 * 4.0 = 1.8656
self.assertAllClose(0.84, self.sess.run(self.a))
self.assertAllClose(1.8656, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
def testContNotInvalidatingFromVariableUpdatesWorksForNextUpdate(self):
with NodeStepper(self.sess, "optim") as stepper:
self.assertIsNone(stepper.cont(
"optim/update_a/ApplyGradientDescent",
invalidate_from_updated_variables=False))
# Even though invalidate_from_updated_variables is set to False, dirty
# variables should still have been tracked.
self.assertSetEqual({"a:0"}, stepper.last_updated())
self.assertEqual({"a:0"}, stepper.dirty_variables())
self.assertIn("a/read:0", stepper.intermediate_tensor_names())
self.assertIn("b/read:0", stepper.intermediate_tensor_names())
self.assertIn("c/read:0", stepper.intermediate_tensor_names())
self.assertIn("d:0", stepper.intermediate_tensor_names())
self.assertIn("e:0", stepper.intermediate_tensor_names())
self.assertIn("optim/learning_rate:0",
stepper.intermediate_tensor_names())
self.assertNotIn("a:0", stepper.intermediate_tensor_names())
self.assertNotIn("b:0", stepper.intermediate_tensor_names())
self.assertNotIn("c:0", stepper.intermediate_tensor_names())
self.assertAllClose(0.84, self.sess.run(self.a))
self.assertAllClose(2.0, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
# For the backprop on Variable b, the result should reflect the original
# value of Variable a, even though Variable a has actually been updated.
# 2.0 - learning_rate * 2 * a * b * c
# = 2.0 - 0.01 * 2 * 1.0 * 2.0 * 4.0 = 1.84
self.assertIsNone(stepper.cont(
"optim/update_b/ApplyGradientDescent",
invalidate_from_updated_variables=False,
restore_variable_values=False))
self.assertAllClose(0.84, self.sess.run(self.a))
self.assertAllClose(1.84, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
def testUpdateTwiceRestoreVariable(self):
with NodeStepper(self.sess, "optim") as stepper:
result = stepper.cont(
"optim/update_a/ApplyGradientDescent",
invalidate_from_updated_variables=True,
restore_variable_values=True)
self.assertIsNone(result)
self.assertSetEqual({"a:0"}, stepper.last_updated())
self.assertEqual({"a:0"}, stepper.dirty_variables())
result = stepper.cont(
"optim/update_b/ApplyGradientDescent",
invalidate_from_updated_variables=True,
restore_variable_values=True)
self.assertIsNone(result)
# Variables a and c should have been restored and hence no longer dirty.
# Variable b should have been marked as dirty.
self.assertSetEqual({"b:0"}, stepper.last_updated())
self.assertEqual({"b:0"}, stepper.dirty_variables())
# The result of the update should be identitcal to as if only update_b is
# run.
self.assertAllClose(1.0, self.sess.run(self.a))
self.assertAllClose(1.84, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
def testSelectiveHandleUsageDependingOnTransitiveCleanliness(self):
"""Test tensor handlers are using only during clean transitive closure.
"clean" means no Variables have been updated by preceding cont() calls.
"""
with NodeStepper(self.sess, "optim") as stepper:
# First, call cont() on the two tensors on the intermediate level: e and
| |
not isinstance(mac_address_, (bytes, str)):
raise Exception("Expected mac_address_ to be a str, received: {}".format(type(mac_address_)))
if provider_id_ is not None and not isinstance(provider_id_, (bytes, str)):
raise Exception("Expected provider_id_ to be a str, received: {}".format(type(provider_id_)))
self.interface_name = interface_name_
self.mac_address = mac_address_
self.provider_id = provider_id_
self.unknown_fields = unknown_fields
class ProviderInterfaceInfoResult(Type):
_toSchema = {'error': 'error', 'interfaces': 'interfaces', 'machine_tag': 'machine-tag'}
_toPy = {'error': 'error', 'interfaces': 'interfaces', 'machine-tag': 'machine_tag'}
def __init__(self, error=None, interfaces=None, machine_tag=None, **unknown_fields):
'''
error : Error
interfaces : typing.Sequence[~ProviderInterfaceInfo]
machine_tag : str
'''
error_ = Error.from_json(error) if error else None
interfaces_ = [ProviderInterfaceInfo.from_json(o) for o in interfaces or []]
machine_tag_ = machine_tag
# Validate arguments against known Juju API types.
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
if interfaces_ is not None and not isinstance(interfaces_, (bytes, str, list)):
raise Exception("Expected interfaces_ to be a Sequence, received: {}".format(type(interfaces_)))
if machine_tag_ is not None and not isinstance(machine_tag_, (bytes, str)):
raise Exception("Expected machine_tag_ to be a str, received: {}".format(type(machine_tag_)))
self.error = error_
self.interfaces = interfaces_
self.machine_tag = machine_tag_
self.unknown_fields = unknown_fields
class ProviderInterfaceInfoResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~ProviderInterfaceInfoResult]
'''
results_ = [ProviderInterfaceInfoResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class ProviderSpace(Type):
_toSchema = {'error': 'error', 'name': 'name', 'provider_id': 'provider-id', 'subnets': 'subnets'}
_toPy = {'error': 'error', 'name': 'name', 'provider-id': 'provider_id', 'subnets': 'subnets'}
def __init__(self, error=None, name=None, provider_id=None, subnets=None, **unknown_fields):
'''
error : Error
name : str
provider_id : str
subnets : typing.Sequence[~Subnet]
'''
error_ = Error.from_json(error) if error else None
name_ = name
provider_id_ = provider_id
subnets_ = [Subnet.from_json(o) for o in subnets or []]
# Validate arguments against known Juju API types.
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
if name_ is not None and not isinstance(name_, (bytes, str)):
raise Exception("Expected name_ to be a str, received: {}".format(type(name_)))
if provider_id_ is not None and not isinstance(provider_id_, (bytes, str)):
raise Exception("Expected provider_id_ to be a str, received: {}".format(type(provider_id_)))
if subnets_ is not None and not isinstance(subnets_, (bytes, str, list)):
raise Exception("Expected subnets_ to be a Sequence, received: {}".format(type(subnets_)))
self.error = error_
self.name = name_
self.provider_id = provider_id_
self.subnets = subnets_
self.unknown_fields = unknown_fields
class ProvisioningInfo(Type):
_toSchema = {'charm_lxd_profiles': 'charm-lxd-profiles', 'cloudinit_userdata': 'cloudinit-userdata', 'constraints': 'constraints', 'controller_config': 'controller-config', 'endpoint_bindings': 'endpoint-bindings', 'image_metadata': 'image-metadata', 'jobs': 'jobs', 'placement': 'placement', 'series': 'series', 'subnets_to_zones': 'subnets-to-zones', 'tags': 'tags', 'volume_attachments': 'volume-attachments', 'volumes': 'volumes'}
_toPy = {'charm-lxd-profiles': 'charm_lxd_profiles', 'cloudinit-userdata': 'cloudinit_userdata', 'constraints': 'constraints', 'controller-config': 'controller_config', 'endpoint-bindings': 'endpoint_bindings', 'image-metadata': 'image_metadata', 'jobs': 'jobs', 'placement': 'placement', 'series': 'series', 'subnets-to-zones': 'subnets_to_zones', 'tags': 'tags', 'volume-attachments': 'volume_attachments', 'volumes': 'volumes'}
def __init__(self, charm_lxd_profiles=None, cloudinit_userdata=None, constraints=None, controller_config=None, endpoint_bindings=None, image_metadata=None, jobs=None, placement=None, series=None, subnets_to_zones=None, tags=None, volume_attachments=None, volumes=None, **unknown_fields):
'''
charm_lxd_profiles : typing.Sequence[str]
cloudinit_userdata : typing.Mapping[str, typing.Any]
constraints : Value
controller_config : typing.Mapping[str, typing.Any]
endpoint_bindings : typing.Mapping[str, str]
image_metadata : typing.Sequence[~CloudImageMetadata]
jobs : typing.Sequence[str]
placement : str
series : str
subnets_to_zones : typing.Sequence[str]
tags : typing.Mapping[str, str]
volume_attachments : typing.Sequence[~VolumeAttachmentParams]
volumes : typing.Sequence[~VolumeParams]
'''
charm_lxd_profiles_ = charm_lxd_profiles
cloudinit_userdata_ = cloudinit_userdata
constraints_ = Value.from_json(constraints) if constraints else None
controller_config_ = controller_config
endpoint_bindings_ = endpoint_bindings
image_metadata_ = [CloudImageMetadata.from_json(o) for o in image_metadata or []]
jobs_ = jobs
placement_ = placement
series_ = series
subnets_to_zones_ = subnets_to_zones
tags_ = tags
volume_attachments_ = [VolumeAttachmentParams.from_json(o) for o in volume_attachments or []]
volumes_ = [VolumeParams.from_json(o) for o in volumes or []]
# Validate arguments against known Juju API types.
if charm_lxd_profiles_ is not None and not isinstance(charm_lxd_profiles_, (bytes, str, list)):
raise Exception("Expected charm_lxd_profiles_ to be a Sequence, received: {}".format(type(charm_lxd_profiles_)))
if cloudinit_userdata_ is not None and not isinstance(cloudinit_userdata_, dict):
raise Exception("Expected cloudinit_userdata_ to be a Mapping, received: {}".format(type(cloudinit_userdata_)))
if constraints_ is not None and not isinstance(constraints_, (dict, Value)):
raise Exception("Expected constraints_ to be a Value, received: {}".format(type(constraints_)))
if controller_config_ is not None and not isinstance(controller_config_, dict):
raise Exception("Expected controller_config_ to be a Mapping, received: {}".format(type(controller_config_)))
if endpoint_bindings_ is not None and not isinstance(endpoint_bindings_, dict):
raise Exception("Expected endpoint_bindings_ to be a Mapping, received: {}".format(type(endpoint_bindings_)))
if image_metadata_ is not None and not isinstance(image_metadata_, (bytes, str, list)):
raise Exception("Expected image_metadata_ to be a Sequence, received: {}".format(type(image_metadata_)))
if jobs_ is not None and not isinstance(jobs_, (bytes, str, list)):
raise Exception("Expected jobs_ to be a Sequence, received: {}".format(type(jobs_)))
if placement_ is not None and not isinstance(placement_, (bytes, str)):
raise Exception("Expected placement_ to be a str, received: {}".format(type(placement_)))
if series_ is not None and not isinstance(series_, (bytes, str)):
raise Exception("Expected series_ to be a str, received: {}".format(type(series_)))
if subnets_to_zones_ is not None and not isinstance(subnets_to_zones_, (bytes, str, list)):
raise Exception("Expected subnets_to_zones_ to be a Sequence, received: {}".format(type(subnets_to_zones_)))
if tags_ is not None and not isinstance(tags_, dict):
raise Exception("Expected tags_ to be a Mapping, received: {}".format(type(tags_)))
if volume_attachments_ is not None and not isinstance(volume_attachments_, (bytes, str, list)):
raise Exception("Expected volume_attachments_ to be a Sequence, received: {}".format(type(volume_attachments_)))
if volumes_ is not None and not isinstance(volumes_, (bytes, str, list)):
raise Exception("Expected volumes_ to be a Sequence, received: {}".format(type(volumes_)))
self.charm_lxd_profiles = charm_lxd_profiles_
self.cloudinit_userdata = cloudinit_userdata_
self.constraints = constraints_
self.controller_config = controller_config_
self.endpoint_bindings = endpoint_bindings_
self.image_metadata = image_metadata_
self.jobs = jobs_
self.placement = placement_
self.series = series_
self.subnets_to_zones = subnets_to_zones_
self.tags = tags_
self.volume_attachments = volume_attachments_
self.volumes = volumes_
self.unknown_fields = unknown_fields
class ProvisioningInfoResult(Type):
_toSchema = {'error': 'error', 'result': 'result'}
_toPy = {'error': 'error', 'result': 'result'}
def __init__(self, error=None, result=None, **unknown_fields):
'''
error : Error
result : ProvisioningInfo
'''
error_ = Error.from_json(error) if error else None
result_ = ProvisioningInfo.from_json(result) if result else None
# Validate arguments against known Juju API types.
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
if result_ is not None and not isinstance(result_, (dict, ProvisioningInfo)):
raise Exception("Expected result_ to be a ProvisioningInfo, received: {}".format(type(result_)))
self.error = error_
self.result = result_
self.unknown_fields = unknown_fields
class ProvisioningInfoResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~ProvisioningInfoResult]
'''
results_ = [ProvisioningInfoResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class ProvisioningScriptParams(Type):
_toSchema = {'data_dir': 'data-dir', 'disable_package_commands': 'disable-package-commands', 'machine_id': 'machine-id', 'nonce': 'nonce'}
_toPy = {'data-dir': 'data_dir', 'disable-package-commands': 'disable_package_commands', 'machine-id': 'machine_id', 'nonce': 'nonce'}
def __init__(self, data_dir=None, disable_package_commands=None, machine_id=None, nonce=None, **unknown_fields):
'''
data_dir : str
disable_package_commands : bool
machine_id : str
nonce : str
'''
data_dir_ = data_dir
disable_package_commands_ = disable_package_commands
machine_id_ = machine_id
nonce_ = nonce
# Validate arguments against known Juju API types.
if data_dir_ is not None and not isinstance(data_dir_, (bytes, str)):
raise Exception("Expected data_dir_ to be a str, received: {}".format(type(data_dir_)))
if disable_package_commands_ is not None and not isinstance(disable_package_commands_, bool):
raise Exception("Expected disable_package_commands_ to be a bool, received: {}".format(type(disable_package_commands_)))
if machine_id_ is not None and not isinstance(machine_id_, (bytes, str)):
raise Exception("Expected machine_id_ to be a str, received: {}".format(type(machine_id_)))
if nonce_ is not None and not isinstance(nonce_, (bytes, str)):
raise Exception("Expected nonce_ to be a str, received: {}".format(type(nonce_)))
self.data_dir = data_dir_
self.disable_package_commands = disable_package_commands_
self.machine_id = machine_id_
self.nonce = nonce_
self.unknown_fields = unknown_fields
class ProvisioningScriptResult(Type):
_toSchema = {'script': 'script'}
_toPy = {'script': 'script'}
def __init__(self, script=None, **unknown_fields):
'''
script : str
'''
script_ = script
# Validate arguments against known Juju API types.
if script_ is not None and not isinstance(script_, (bytes, str)):
raise Exception("Expected script_ to be a str, received: {}".format(type(script_)))
self.script = script_
self.unknown_fields = unknown_fields
class ProxyConfig(Type):
_toSchema = {'ftp': 'ftp', 'http': 'http', 'https': 'https', 'no_proxy': 'no-proxy'}
_toPy = {'ftp': 'ftp', 'http': 'http', 'https': 'https', 'no-proxy': 'no_proxy'}
def __init__(self, ftp=None, http=None, https=None, no_proxy=None, **unknown_fields):
'''
ftp : str
http : str
https : str
no_proxy : | |
# -*- coding: utf-8 -*-
# SendHub Python bindings
import logging
import re
import platform
import sys
import urllib
import urlparse
import textwrap
import datetime
import requests
from version import VERSION
import simplejson as json
try:
import cStringIO as StringIO
except ImportError:
import StringIO
_httplib = 'requests'
logger = logging.getLogger('sendhub')
# Configuration variables
userName = None
password = <PASSWORD>
internalApi = False
apiBase = 'https://api.sendhub.com'
entitlementsBase = 'https://entitlements.sendhub.com'
profileBase = 'https://profile.sendhub.com'
billingBase = 'https://billing.sendhub.com'
apiVersion = None
# Exceptions
class SendHubError(Exception):
def __init__(
self, message=None, devMessage=None, code=None, moreInfo=None):
super(SendHubError, self).__init__(message)
self.devMessage = devMessage.decode('utf-8') \
if devMessage is not None else ''
self.code = code if code is not None else -1
self.moreInfo = moreInfo.decode('utf-8') \
if moreInfo is not None else ''
_underscorer1 = re.compile(r'(.)([A-Z][a-z]+)')
_underscorer2 = re.compile('([a-z0-9])([A-Z])')
def camelToSnake(s):
"""
Is it ironic that this function is written in camel case, yet it
converts to snake case? hmm..
"""
subbed = _underscorer1.sub(r'\1_\2', s)
return _underscorer2.sub(r'\1_\2', subbed).lower()
import math as _math
import time as _time
def retry(tries, delay=3, backoff=2, desired_outcome=True, fail_value=None):
"""
Retry decorator with exponential backoff
Retries a function or method until it produces a desired outcome.
@param delay int Sets the initial delay in seconds, and backoff sets the
factor by which the delay should lengthen after each failure.
@param backoff int Must be greater than 1, or else it isn't really a
backoff. Tries must be at least 0, and delay greater than 0.
@param desired_outcome Can be a value or a callable.
If it is a callable the produced value will be passed and success
is presumed if the invocation returns True.
@param fail_value Value to return in the case of failure.
"""
if backoff <= 1:
raise ValueError('backoff must be greater than 1')
tries = _math.floor(tries)
if tries < 0:
raise ValueError('tries must be 0 or greater')
if delay <= 0:
raise ValueError('delay must be greater than 0')
def wrapped_retry(fn):
"""Decorative wrapper."""
def retry_fn(*args, **kwargs):
"""The function which does the actual retrying."""
# Make mutable:
mtries, mdelay = tries, delay
# First attempt.
rv = fn(*args, **kwargs)
while mtries > 0:
if (rv == desired_outcome or
(
callable(desired_outcome) and
desired_outcome(rv) is True
)):
# Success.
return rv
# Consume an attempt.
mtries -= 1
# Wait...
_time.sleep(mdelay)
# Make future wait longer.
mdelay *= backoff
# Try again.
rv = fn(*args, **kwargs)
# Ran out of tries :-(
return False
# True decorator -> decorated function.
return retry_fn
# @retry(arg[, ...]) -> decorator.
return wrapped_retry
class APIError(SendHubError):
pass
class APIConnectionError(SendHubError):
pass
class EntitlementError(SendHubError):
def __init__(self, message, devMessage=None, code=None, moreInfo=None):
super(EntitlementError, self).__init__(
message, devMessage, code, moreInfo)
class InvalidRequestError(SendHubError):
def __init__(self, message, devMessage=None, code=None, moreInfo=None):
super(InvalidRequestError, self).__init__(
message, devMessage, code, moreInfo)
class TryAgainLaterError(SendHubError):
def __init__(self, message, devMessage=None, code=None, moreInfo=None):
super(TryAgainLaterError, self).__init__(
message, devMessage, code, moreInfo)
class AuthenticationError(SendHubError):
pass
class AuthorizationError(SendHubError):
pass
def convertToSendhubObject(resp):
types = {'entitlement': Entitlement}
if isinstance(resp, list):
return [convertToSendhubObject(i) for i in resp]
elif isinstance(resp, dict):
resp = resp.copy()
klassName = resp.get('object')
if isinstance(klassName, basestring):
klass = types.get(klassName, SendHubObject)
else:
klass = SendHubObject
return klass.constructFrom(resp)
else:
return resp
# Network transport
class APIRequestor(object):
apiBase = None
def apiUrl(self, url=''):
return '%s%s/' % \
(self.apiBase if self.apiBase is not None else apiBase, url)
@classmethod
def _utf8(cls, value):
if isinstance(value, unicode) and sys.version_info < (3, 0):
return value.encode('utf-8')
else:
return value
@classmethod
def encodeDatetime(cls, dttime):
return dttime.strftime('%Y-%m-%dT%H:%M:%S')
@classmethod
def encode_list(cls, listvalue):
# only supports lists of things that can be represented as strings
return ','.join(map(str, listvalue))
@classmethod
def _encodeInner(cls, d):
# special case value encoding
ENCODERS = {
list: cls.encode_list,
datetime.datetime: cls.encodeDatetime
}
stk = {}
for key, value in d.iteritems():
key = cls._utf8(key)
try:
encoder = ENCODERS[value.__class__]
stk[key] = encoder(value)
except KeyError:
# don't need special encoding
value = cls._utf8(value)
stk[key] = value
return stk
@classmethod
def encode(cls, d):
"""
Internal: encode a string for url representation
"""
return urllib.urlencode(cls._encodeInner(d))
@classmethod
def encodeJson(cls, d):
"""
Internal: encode a string for url representation
"""
return json.dumps(cls._encodeInner(d))
@classmethod
def buildUrl(cls, url, params, authParamsOnly=False):
if authParamsOnly:
newParams = {}
for param in params:
if (param == 'username' or param == 'password'
or param == 'apiUsername' or param == 'apiPassword'):
newParams[param] = params[param]
params = newParams
baseQuery = urlparse.urlparse(url).query
if baseQuery:
return '%s&%s' % (url, cls.encode(params))
else:
return '%s?%s' % (url, cls.encode(params))
def request(self, meth, url, params={}):
resp = []
@retry(tries=3)
def _wrapped_request():
rbody, rcode = self.performRequest(meth, url, params)
try:
resp.append(self.interpretResponse(rbody, rcode))
except TryAgainLaterError:
return False
return True
if _wrapped_request():
return resp[0]
else:
raise APIError('API retries failed')
def handleApiError(self, rbody, rcode, resp):
try:
# message is required
message = resp['message']
except (KeyError, TypeError):
raise APIError(
"Invalid response object from API: %r (HTTP response code "
"was %d)" % (rbody, rcode), '', rcode, '')
if 'devMessage' in resp:
devMessage = resp['devMessage']
else:
devMessage = ''
if 'code' in resp:
code = resp['code']
else:
code = -1
if 'moreInfo' in resp:
moreInfo = resp['moreInfo']
else:
moreInfo = ''
if rcode in [400, 404]:
raise InvalidRequestError(message, devMessage, code, moreInfo)
elif rcode == 401:
raise AuthenticationError(message, devMessage, code, moreInfo)
elif rcode == 403:
raise AuthorizationError(message, devMessage, code, moreInfo)
elif rcode == 409 and 'Try again later' in message:
raise TryAgainLaterError(message, devMessage, code, moreInfo)
else:
raise APIError(message, devMessage, code, moreInfo)
def performRequest(self, meth, url, params={}):
"""
Mechanism for issuing an API call
"""
if userName is None or password is None:
raise AuthenticationError('No authentication details provided')
absUrl = self.apiUrl(url)
params = params.copy()
if internalApi:
params['apiUsername'] = userName
params['apiPassword'] = password
else:
params['username'] = userName
params['api_key'] = password
ua = {
'bindingsVersion': VERSION,
'lang': 'python',
'publisher': 'sendhub',
'httplib': _httplib,
}
for attr, func in [['langVersion', platform.python_version],
['platform', platform.platform],
['uname', lambda: ' '.join(platform.uname())]]:
try:
val = func()
except Exception, e:
val = "!! %s" % e
ua[attr] = val
headers = {
'Content-Type': 'application/json',
'X-SendHub-Client-User-Agent': json.dumps(ua),
'User-Agent': 'SendHub/v1 PythonBindings/%s' % (VERSION, )
}
if apiVersion is not None:
headers['SendHub-Version'] = apiVersion
rbody, rcode = self.doSendRequest(meth, absUrl, headers, params)
logger.info(
'API request to %s returned (response code, response body) '
'of (%d, %r)' % (absUrl, rcode, rbody))
return rbody, rcode
def interpretResponse(self, rbody, rcode):
# special case deleted because the response is empty
if rcode == 204:
resp = {
'message': 'OK'
}
return resp
try:
resp = json.loads(rbody.decode('utf-8'))
except Exception:
raise APIError(
"Invalid response body from API: %s (HTTP response code "
"was %d)" % (rbody, rcode), '', rcode)
if not (200 <= rcode < 300):
self.handleApiError(rbody, rcode, resp)
return resp
def doSendRequest(self, meth, absUrl, headers, params):
meth = meth.lower()
if meth == 'get' or meth == 'delete':
if params:
absUrl = self.buildUrl(absUrl, params)
data = None
elif meth in ('post', 'put', 'patch'):
absUrl = self.buildUrl(absUrl, params, True)
newParams = {}
for param in params:
if (param != 'username' and param != 'password'
and param != 'apiUsername' and param != 'apiPassword'):
newParams[param] = params[param]
params = newParams
data = self.encodeJson(params)
else:
raise APIConnectionError(
'Unrecognized HTTP method %r. This may indicate a bug '
'in the SendHub bindings. Please contact <EMAIL> '
'for assistance.' % (meth, ))
kwargs = {}
try:
try:
result = requests.request(meth, absUrl,
headers=headers, data=data,
timeout=80,
**kwargs)
except TypeError, e:
raise TypeError(
'Warning: It looks like your installed version of the '
'"requests" library is not compatible. The underlying '
'error was: %s' % (e, ))
content = result.content
statusCode = result.status_code
except Exception, e:
self.handleRequestError(e)
return content, statusCode
def handleRequestError(self, e):
if isinstance(e, requests.exceptions.RequestException):
msg = "Unexpected error communicating with SendHub. If this " \
"problem persists, let us know at <EMAIL>."
err = "%s: %s" % (type(e).__name__, str(e))
else:
msg = "Unexpected error communicating with SendHub. It looks " \
"like there's probably a configuration issue locally. " \
"If this problem persists, let us know at " \
"<EMAIL>."
err = "A %s was raised" % (type(e).__name__, )
if str(e):
err += " with error message %s" % (str(e), )
else:
err += " with no error message"
msg = textwrap.fill(msg) + "\n\n(Network error: " + err + ")"
raise APIConnectionError(msg)
class SendHubObject(object):
def __init__(self, id=None, **params):
self.__dict__['_values'] = set()
def __setattr__(self, k, v):
self.__dict__[k] | |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for keypoint profiles.
Note: Most of the tests in this file are in one-to-one correspondence with tests
in:
//photos/vision/human_sensing/pose_estimation/e3d/utils/keypoint_profiles_test.cc.
Updates should be synced between the two files.
"""
import tensorflow as tf
from poem.core import keypoint_profiles
class KeypointProfileTest(tf.test.TestCase):
def test_std16_keypoint_profile_3d_is_correct(self):
profile = keypoint_profiles.create_keypoint_profile_or_die('3DSTD16')
self.assertEqual(profile.name, '3DSTD16')
self.assertEqual(profile.keypoint_dim, 3)
self.assertEqual(profile.keypoint_num, 16)
self.assertEqual(profile.keypoint_names, [
'HEAD', 'NECK', 'LEFT_SHOULDER', 'RIGHT_SHOULDER', 'LEFT_ELBOW',
'RIGHT_ELBOW', 'LEFT_WRIST', 'RIGHT_WRIST', 'SPINE', 'PELVIS',
'LEFT_HIP', 'RIGHT_HIP', 'LEFT_KNEE', 'RIGHT_KNEE', 'LEFT_ANKLE',
'RIGHT_ANKLE'
])
self.assertEqual(
profile.keypoint_left_right_type(1),
keypoint_profiles.LeftRightType.CENTRAL)
self.assertEqual(
profile.segment_left_right_type(1, 2),
keypoint_profiles.LeftRightType.LEFT)
self.assertEqual(profile.offset_keypoint_index, [9])
self.assertEqual(profile.scale_keypoint_index_pairs, [([1], [8]),
([8], [9])])
self.assertEqual(profile.keypoint_index('LEFT_SHOULDER'), 2)
self.assertEqual(profile.keypoint_index('dummy'), -1)
self.assertEqual(profile.segment_index_pairs, [([0], [1]), ([1], [2]),
([1], [3]), ([1], [8]),
([2], [4]), ([3], [5]),
([4], [6]), ([5], [7]),
([8], [9]), ([9], [10]),
([9], [11]), ([10], [12]),
([11], [13]), ([12], [14]),
([13], [15])])
self.assertAllEqual(profile.keypoint_affinity_matrix, [
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1],
])
self.assertEqual(profile.head_keypoint_index, [0])
self.assertEqual(profile.neck_keypoint_index, [1])
self.assertEqual(profile.left_shoulder_keypoint_index, [2])
self.assertEqual(profile.right_shoulder_keypoint_index, [3])
self.assertEqual(profile.left_elbow_keypoint_index, [4])
self.assertEqual(profile.right_elbow_keypoint_index, [5])
self.assertEqual(profile.left_wrist_keypoint_index, [6])
self.assertEqual(profile.right_wrist_keypoint_index, [7])
self.assertEqual(profile.spine_keypoint_index, [8])
self.assertEqual(profile.pelvis_keypoint_index, [9])
self.assertEqual(profile.left_hip_keypoint_index, [10])
self.assertEqual(profile.right_hip_keypoint_index, [11])
self.assertEqual(profile.left_knee_keypoint_index, [12])
self.assertEqual(profile.right_knee_keypoint_index, [13])
self.assertEqual(profile.left_ankle_keypoint_index, [14])
self.assertEqual(profile.right_ankle_keypoint_index, [15])
def test_std13_keypoint_profile_3d_is_correct(self):
profile = keypoint_profiles.create_keypoint_profile_or_die('3DSTD13')
self.assertEqual(profile.name, '3DSTD13')
self.assertEqual(profile.keypoint_dim, 3)
self.assertEqual(profile.keypoint_num, 13)
self.assertEqual(profile.keypoint_names, [
'HEAD', 'LEFT_SHOULDER', 'RIGHT_SHOULDER', 'LEFT_ELBOW', 'RIGHT_ELBOW',
'LEFT_WRIST', 'RIGHT_WRIST', 'LEFT_HIP', 'RIGHT_HIP', 'LEFT_KNEE',
'RIGHT_KNEE', 'LEFT_ANKLE', 'RIGHT_ANKLE'
])
self.assertEqual(
profile.keypoint_left_right_type(1),
keypoint_profiles.LeftRightType.LEFT)
self.assertEqual(
profile.segment_left_right_type(1, 2),
keypoint_profiles.LeftRightType.CENTRAL)
self.assertEqual(profile.offset_keypoint_index, [7, 8])
self.assertEqual(profile.scale_keypoint_index_pairs, [([1, 2], [7, 8])])
self.assertEqual(profile.keypoint_index('LEFT_SHOULDER'), 1)
self.assertEqual(profile.keypoint_index('dummy'), -1)
self.assertEqual(profile.segment_index_pairs, [([0], [1, 2]), ([1, 2], [1]),
([1, 2], [2]),
([1, 2], [1, 2, 7, 8]),
([1], [3]), ([2], [4]),
([3], [5]), ([4], [6]),
([1, 2, 7, 8], [7, 8]),
([7, 8], [7]), ([7, 8], [8]),
([7], [9]), ([8], [10]),
([9], [11]), ([10], [12])])
self.assertAllEqual(profile.keypoint_affinity_matrix, [
[1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1],
])
self.assertEqual(profile.head_keypoint_index, [0])
self.assertEqual(profile.neck_keypoint_index, [1, 2])
self.assertEqual(profile.left_shoulder_keypoint_index, [1])
self.assertEqual(profile.right_shoulder_keypoint_index, [2])
self.assertEqual(profile.left_elbow_keypoint_index, [3])
self.assertEqual(profile.right_elbow_keypoint_index, [4])
self.assertEqual(profile.left_wrist_keypoint_index, [5])
self.assertEqual(profile.right_wrist_keypoint_index, [6])
self.assertEqual(profile.spine_keypoint_index, [1, 2, 7, 8])
self.assertEqual(profile.pelvis_keypoint_index, [7, 8])
self.assertEqual(profile.left_hip_keypoint_index, [7])
self.assertEqual(profile.right_hip_keypoint_index, [8])
self.assertEqual(profile.left_knee_keypoint_index, [9])
self.assertEqual(profile.right_knee_keypoint_index, [10])
self.assertEqual(profile.left_ankle_keypoint_index, [11])
self.assertEqual(profile.right_ankle_keypoint_index, [12])
self.assertEqual(profile.get_standard_part_index('HEAD'), [0])
self.assertEqual(profile.get_standard_part_index('NECK'), [1, 2])
self.assertEqual(profile.get_standard_part_index('LEFT_SHOULDER'), [1])
self.assertEqual(profile.get_standard_part_index('RIGHT_SHOULDER'), [2])
self.assertEqual(profile.get_standard_part_index('LEFT_ELBOW'), [3])
self.assertEqual(profile.get_standard_part_index('RIGHT_ELBOW'), [4])
self.assertEqual(profile.get_standard_part_index('LEFT_WRIST'), [5])
self.assertEqual(profile.get_standard_part_index('RIGHT_WRIST'), [6])
self.assertEqual(profile.get_standard_part_index('SPINE'), [1, 2, 7, 8])
self.assertEqual(profile.get_standard_part_index('PELVIS'), [7, 8])
self.assertEqual(profile.get_standard_part_index('LEFT_HIP'), [7])
self.assertEqual(profile.get_standard_part_index('RIGHT_HIP'), [8])
self.assertEqual(profile.get_standard_part_index('LEFT_KNEE'), [9])
self.assertEqual(profile.get_standard_part_index('RIGHT_KNEE'), [10])
self.assertEqual(profile.get_standard_part_index('LEFT_ANKLE'), [11])
self.assertEqual(profile.get_standard_part_index('RIGHT_ANKLE'), [12])
def test_legacy_h36m17_keypoint_profile_3d_is_correct(self):
profile = keypoint_profiles.create_keypoint_profile_or_die(
'LEGACY_3DH36M17')
self.assertEqual(profile.name, 'LEGACY_3DH36M17')
self.assertEqual(profile.keypoint_dim, 3)
self.assertEqual(profile.keypoint_num, 17)
self.assertEqual(profile.keypoint_names, [
'Hip', 'Head', 'Neck/Nose', 'Thorax', 'LShoulder', 'RShoulder',
'LElbow', 'RElbow', 'LWrist', 'RWrist', 'Spine', 'LHip', 'RHip',
'LKnee', 'RKnee', 'LFoot', 'RFoot'
])
self.assertEqual(
profile.keypoint_left_right_type(1),
keypoint_profiles.LeftRightType.CENTRAL)
self.assertEqual(
profile.segment_left_right_type(1, 4),
keypoint_profiles.LeftRightType.LEFT)
self.assertEqual(profile.offset_keypoint_index, [0])
self.assertEqual(profile.scale_keypoint_index_pairs, [([0], [10]),
([10], [3])])
self.assertEqual(profile.keypoint_index('Thorax'), 3)
self.assertEqual(profile.keypoint_index('dummy'), -1)
self.assertEqual(profile.segment_index_pairs, [([0], [10]), ([0], [11]),
([0], [12]), ([10], [3]),
([11], [13]), ([12], [14]),
([13], [15]), ([14], [16]),
([3], [2]), ([3], [4]),
([3], [5]), ([2], [1]),
([4], [6]), ([5], [7]),
([6], [8]), ([7], [9])])
self.assertAllEqual(profile.keypoint_affinity_matrix, [
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1],
])
self.assertEqual(profile.head_keypoint_index, [1])
self.assertEqual(profile.neck_keypoint_index, [3])
self.assertEqual(profile.left_shoulder_keypoint_index, [4])
self.assertEqual(profile.right_shoulder_keypoint_index, [5])
self.assertEqual(profile.left_elbow_keypoint_index, [6])
self.assertEqual(profile.right_elbow_keypoint_index, [7])
self.assertEqual(profile.left_wrist_keypoint_index, [8])
self.assertEqual(profile.right_wrist_keypoint_index, [9])
self.assertEqual(profile.spine_keypoint_index, [10])
self.assertEqual(profile.pelvis_keypoint_index, [0])
self.assertEqual(profile.left_hip_keypoint_index, [11])
self.assertEqual(profile.right_hip_keypoint_index, [12])
self.assertEqual(profile.left_knee_keypoint_index, [13])
self.assertEqual(profile.right_knee_keypoint_index, [14])
self.assertEqual(profile.left_ankle_keypoint_index, [15])
self.assertEqual(profile.right_ankle_keypoint_index, [16])
self.assertEqual(profile.get_standard_part_index('HEAD'), [1])
self.assertEqual(profile.get_standard_part_index('NECK'), [3])
self.assertEqual(profile.get_standard_part_index('LEFT_SHOULDER'), [4])
self.assertEqual(profile.get_standard_part_index('RIGHT_SHOULDER'), [5])
self.assertEqual(profile.get_standard_part_index('LEFT_ELBOW'), [6])
self.assertEqual(profile.get_standard_part_index('RIGHT_ELBOW'), [7])
self.assertEqual(profile.get_standard_part_index('LEFT_WRIST'), [8])
self.assertEqual(profile.get_standard_part_index('RIGHT_WRIST'), [9])
self.assertEqual(profile.get_standard_part_index('SPINE'), [10])
self.assertEqual(profile.get_standard_part_index('PELVIS'), [0])
self.assertEqual(profile.get_standard_part_index('LEFT_HIP'), [11])
self.assertEqual(profile.get_standard_part_index('RIGHT_HIP'), [12])
self.assertEqual(profile.get_standard_part_index('LEFT_KNEE'), [13])
self.assertEqual(profile.get_standard_part_index('RIGHT_KNEE'), [14])
self.assertEqual(profile.get_standard_part_index('LEFT_ANKLE'), [15])
self.assertEqual(profile.get_standard_part_index('RIGHT_ANKLE'), [16])
def test_legacy_h36m13_keypoint_profile_3d_is_correct(self):
profile = keypoint_profiles.create_keypoint_profile_or_die(
'LEGACY_3DH36M13')
self.assertEqual(profile.name, 'LEGACY_3DH36M13')
self.assertEqual(profile.keypoint_dim, 3)
self.assertEqual(profile.keypoint_num, 13)
self.assertEqual(profile.keypoint_names, [
'Head', 'LShoulder', 'RShoulder', 'LElbow', 'RElbow', | |
sani = aim_utils.sanitize_display_name
scope = self._scope_name_if_common
d_name = self._display_name(l3out)
filter_name = scope(l3out.tenant_name, 'EXT-%s' % l3out.name)
fltr = resource.Filter(
tenant_name=l3out.tenant_name,
name=filter_name,
display_name=sani(scope(l3out.tenant_name, 'EXT-%s' % d_name)))
entry = resource.FilterEntry(
tenant_name=fltr.tenant_name,
filter_name=fltr.name,
name='Any',
display_name='Any')
contract = self._get_nat_contract(ctx, l3out)
subject = resource.ContractSubject(
tenant_name=contract.tenant_name,
contract_name=contract.name,
name='Allow', display_name='Allow')
subject_filter = resource.ContractSubjFilter(
tenant_name=contract.tenant_name,
contract_name=contract.name,
contract_subject_name='Allow',
filter_name=fltr.name)
bd = self._get_nat_bd(ctx, l3out)
bd.vrf_name = l3out.vrf_name
ap, epg = self._get_nat_ap_epg(ctx, l3out)
vm_doms = getattr(
self, 'vmm_domains',
[{'type': d.type, 'name': d.name} for d in
self.mgr.find(ctx, resource.VMMDomain)])
phy_doms = getattr(
self, 'physical_domains',
[{'name': d.name} for d in
self.mgr.find(ctx, resource.PhysicalDomain)])
epg.bd_name = bd.name
epg.provided_contract_names = [contract.name]
epg.consumed_contract_names = [contract.name]
epg.vmm_domains = vm_doms
epg.physical_domains = phy_doms
return [fltr, entry, contract, subject, subject_filter, bd, ap, epg]
def _select_domains(self, objs, vmm_domains=None, phys_domains=None):
for obj in objs:
if isinstance(obj, resource.EndpointGroup):
if vmm_domains is not None:
obj.vmm_domains = vmm_domains
if phys_domains is not None:
obj.physical_domains = phys_domains
def _create_nat_epg(self, ctx, l3out, vmm_domains=None, phys_domains=None):
objs = self._get_nat_objects(ctx, l3out)
self._select_domains(objs, vmm_domains=vmm_domains,
phys_domains=phys_domains)
with ctx.store.begin(subtransactions=True):
for r in objs:
if not self.mgr.get(ctx, r):
self.mgr.create(ctx, r)
def _delete_nat_epg(self, ctx, l3out):
with ctx.store.begin(subtransactions=True):
nat_bd = self._get_nat_bd(ctx, l3out)
for sub in self.mgr.find(ctx, resource.Subnet,
tenant_name=nat_bd.tenant_name,
bd_name=nat_bd.name):
self.mgr.delete(ctx, sub)
for r in reversed(self._get_nat_objects(ctx, l3out)):
if isinstance(r, resource.ApplicationProfile):
epgs = self.mgr.find(ctx, resource.EndpointGroup,
tenant_name=r.tenant_name,
app_profile_name=r.name)
if epgs:
continue
self.mgr.delete(ctx, r)
def _update_contract(self, ctx, ext_net, contract, is_remove):
if is_remove:
prov = [c for c in ext_net.provided_contract_names
if c != contract.name]
cons = [c for c in ext_net.consumed_contract_names
if c != contract.name]
else:
prov = [contract.name]
prov.extend(ext_net.provided_contract_names)
cons = [contract.name]
cons.extend(ext_net.consumed_contract_names)
ext_net = self.mgr.update(ctx, ext_net,
provided_contract_names=prov,
consumed_contract_names=cons)
return ext_net
def _is_visible(self, target_tenant, from_tenant):
return (target_tenant == from_tenant or target_tenant == 'common')
def _vrf_by_name(self, ctx, vrf_name, tenant_name_hint):
vrfs = self.mgr.find(ctx, resource.VRF,
tenant_name=tenant_name_hint,
name=vrf_name)
if vrfs:
return vrfs[0]
vrfs = self.mgr.find(ctx, resource.VRF, tenant_name='common',
name=vrf_name)
if vrfs:
return vrfs[0]
def _scope_name_if_common(self, tenant_name, name):
if tenant_name == 'common':
scope = getattr(self, 'common_scope', None)
scope = scope + '_' if scope else ''
return aim_utils.sanitize_display_name(scope + name)
return name
class NoNatStrategy(NatStrategyMixin):
"""No NAT Strategy.
Provides direct external connectivity without any network
address translation.
"""
def __init__(self, mgr):
super(NoNatStrategy, self).__init__(mgr)
def delete_external_network(self, ctx, external_network):
"""Clean-up any connected VRFs before deleting the external network."""
with ctx.store.begin(subtransactions=True):
ext_net = self.mgr.get(ctx, external_network)
if not ext_net:
return
l3out = self.mgr.get(ctx,
self._ext_net_to_l3out(external_network))
vrf = self._vrf_by_name(ctx, l3out.vrf_name, l3out.tenant_name)
if vrf:
self._disconnect_vrf_from_l3out(ctx, l3out, vrf)
self._delete_ext_net(ctx, ext_net)
def connect_vrf(self, ctx, external_network, vrf):
"""Allow external connectivity to VRF.
Make external_network provide/consume specified contracts.
Locate BDs referring to the VRF, and include L3Outside
in their l3out_names.
"""
with ctx.store.begin(subtransactions=True):
if not self._is_visible(vrf.tenant_name,
external_network.tenant_name):
raise VrfNotVisibleFromExternalNetwork(
vrf=vrf, ext_net=external_network)
ext_net = self.mgr.get(ctx, external_network)
if not ext_net:
return
l3out = self.mgr.get(ctx,
self._ext_net_to_l3out(external_network))
old_vrf = self._vrf_by_name(ctx, l3out.vrf_name,
l3out.tenant_name)
if not old_vrf or old_vrf.identity != vrf.identity:
LOG.error('connect_vrf: cannot change VRF connected to '
'no-NAT L3Outside %s',
l3out)
raise L3OutsideVrfChangeDisallowed(l3out=l3out,
old_vrf=old_vrf, vrf=vrf)
nat_bd = self._get_nat_bd(ctx, l3out)
self._set_bd_l3out(ctx, l3out, vrf, exclude_bd=nat_bd)
contract = self._get_nat_contract(ctx, l3out)
prov = list(set(external_network.provided_contract_names +
[contract.name]))
cons = list(set(external_network.consumed_contract_names +
[contract.name]))
self.mgr.update(ctx, external_network,
provided_contract_names=prov,
consumed_contract_names=cons)
def disconnect_vrf(self, ctx, external_network, vrf):
"""Remove external connectivity for VRF.
Remove contracts provided/consumed by external_network.
Locate BDs referring to the VRF, and exclude L3Outside
from their l3out_names.
"""
with ctx.store.begin(subtransactions=True):
ext_net = self.mgr.get(ctx, external_network)
if not ext_net:
return
l3out = self.mgr.get(ctx,
self._ext_net_to_l3out(external_network))
old_vrf = self._vrf_by_name(ctx, l3out.vrf_name,
l3out.tenant_name)
if old_vrf and old_vrf.identity != vrf.identity:
LOG.info('disconnect_vrf: %s is not connected to %s',
ext_net, vrf)
return
self._disconnect_vrf_from_l3out(ctx, l3out, vrf)
contract = self._get_nat_contract(ctx, l3out)
self.mgr.update(ctx, external_network,
provided_contract_names=[contract.name],
consumed_contract_names=[contract.name])
def read_vrfs(self, ctx, external_network):
l3out = self.mgr.get(ctx,
self._ext_net_to_l3out(external_network))
vrf = self._vrf_by_name(ctx, l3out.vrf_name, l3out.tenant_name)
return [vrf] if vrf else []
def set_bd_l3out(self, ctx, bridge_domain, l3outside):
bridge_domain = self.mgr.get(ctx, bridge_domain)
if bridge_domain and l3outside.name not in bridge_domain.l3out_names:
self.mgr.update(
ctx, bridge_domain,
l3out_names=bridge_domain.l3out_names + [l3outside.name])
def unset_bd_l3out(self, ctx, bridge_domain, l3outside):
bridge_domain = self.mgr.get(ctx, bridge_domain)
if bridge_domain and l3outside.name in bridge_domain.l3out_names:
bridge_domain.l3out_names.remove(l3outside.name)
self.mgr.update(ctx, bridge_domain,
l3out_names=bridge_domain.l3out_names)
def _get_bds_in_vrf_for_l3out(self, ctx, vrf, l3out):
if vrf.tenant_name == 'common' and l3out.tenant_name == 'common':
# BDs in all tenants are candidates - locate all BDs whose
# vrf_name matches vrf.name, and exclude those that have a
# local VRF aliasing the given VRF.
all_bds = self.mgr.find(ctx, resource.BridgeDomain,
vrf_name=vrf.name)
bd_tenants = set([b.tenant_name for b in all_bds])
bd_tenants = [t for t in bd_tenants
if not self.mgr.get(
ctx, resource.VRF(tenant_name=t, name=vrf.name))]
return [b for b in all_bds if b.tenant_name in bd_tenants]
elif (vrf.tenant_name == 'common' or
vrf.tenant_name == l3out.tenant_name):
# VRF and L3out are visible only to BDs in l3out's tenant
return self.mgr.find(ctx, resource.BridgeDomain,
tenant_name=l3out.tenant_name,
vrf_name=vrf.name)
# Other combinations of L3Out and VRF are not valid
# configurations and can be excluded:
# 1. L3out in common, VRF not in common: VRF is not
# visible to L3out
# 2. L3Out and VRF are in different non-common tenants:
# VRF is not visible to L3out
return []
def _set_bd_l3out(self, ctx, l3outside, vrf, exclude_bd=None):
# update all the BDs
for bd in self._get_bds_in_vrf_for_l3out(ctx, vrf, l3outside):
if exclude_bd and exclude_bd.identity == bd.identity:
continue
# Add L3Out to existing list
if l3outside.name not in bd.l3out_names:
self.mgr.update(ctx, bd,
l3out_names=bd.l3out_names + [l3outside.name])
def _unset_bd_l3out(self, ctx, l3outside, vrf, exclude_bd=None):
# update all the BDs
for bd in self._get_bds_in_vrf_for_l3out(ctx, vrf, l3outside):
if exclude_bd and exclude_bd.identity == bd.identity:
continue
# Remove L3Out from existing list
if l3outside.name in bd.l3out_names:
bd.l3out_names.remove(l3outside.name)
self.mgr.update(ctx, bd, l3out_names=bd.l3out_names)
def _disconnect_vrf_from_l3out(self, ctx, l3outside, vrf):
nat_bd = self._get_nat_bd(ctx, l3outside)
self._unset_bd_l3out(ctx, l3outside, vrf, exclude_bd=nat_bd)
class DistributedNatStrategy(NatStrategyMixin):
"""Distributed NAT Strategy.
Provides external connectivity with network address
translation (DNAT/SNAT) where the translation is distributed
amongst nodes in the fabric.
"""
def delete_external_network(self, ctx, external_network):
"""Delete external-network from main and cloned L3Outs.
"""
with ctx.store.begin(subtransactions=True):
# Delete specified external-network from all cloned L3Outs.
# Delete external-network from main L3Out.
l3out = self.mgr.get(ctx,
self._ext_net_to_l3out(external_network))
ext_net_db = self.mgr.get(ctx, external_network)
if l3out and ext_net_db:
clone_l3outs = self._find_l3out_clones(ctx, l3out)
for clone in clone_l3outs:
clone_ext_net = resource.ExternalNetwork(
tenant_name=clone.tenant_name,
l3out_name=clone.name,
name=ext_net_db.name)
self._delete_ext_net(ctx, clone_ext_net)
self._delete_unused_l3out(ctx, clone)
self._delete_ext_net(ctx, ext_net_db)
def update_external_cidrs(self, ctx, external_network, external_cidrs):
"""Update external CIDRs in main and cloned ExternalNetworks."""
l3out = self.mgr.get(ctx, self._ext_net_to_l3out(external_network))
ext_net_db = self.mgr.get(ctx, external_network)
if l3out and ext_net_db:
clone_l3outs = self._find_l3out_clones(ctx, l3out)
with ctx.store.begin(subtransactions=True):
for clone in clone_l3outs:
clone_ext_net = resource.ExternalNetwork(
tenant_name=clone.tenant_name,
l3out_name=clone.name,
name=external_network.name)
self._manage_external_subnets(ctx, clone_ext_net,
external_cidrs)
self._manage_external_subnets(ctx, ext_net_db,
external_cidrs)
def connect_vrf(self, ctx, external_network, vrf):
"""Allow external connectivity to VRF.
Create shadow L3Outside for L3Outside-VRF combination
in VRF's tenant, if required.
Create ExternalNetwork and ExternalSubnet(s) in the shadow
L3Out, if required.
Set vrf_name of shadow L3Outside to VRF.
"""
with ctx.store.begin(subtransactions=True):
return self._create_shadow(ctx, external_network, vrf)
def disconnect_vrf(self, ctx, external_network, vrf):
"""Remove external connectivity for VRF.
Delete ExternalNetwork and contained ExternalSubnet
in the shadow L3Outside. Remove shadow L3Outside if
there are no more ExternalNetworks in the shadow
L3Outside.
"""
with ctx.store.begin(subtransactions=True):
self._delete_shadow(ctx, external_network, vrf)
def read_vrfs(self, ctx, external_network):
l3out = self.mgr.get(ctx,
self._ext_net_to_l3out(external_network))
result = []
for c in self.db.get_clones(ctx, l3out):
l3c = self.mgr.get(ctx, resource.L3Outside(tenant_name=c[0],
name=c[1]))
if l3c:
vrf = self.mgr.get(
ctx, resource.VRF(tenant_name=l3c.tenant_name,
name=l3c.vrf_name))
if vrf:
result.append(vrf)
return result
def _generate_l3out_name(self, l3outside, vrf):
# Generate a name based on its relationship with VRF
name = '%s-%s' % (l3outside.name, vrf.name)
display_name = aim_utils.sanitize_display_name(
'%s-%s' % (self._display_name(l3outside),
self._display_name(vrf)))
return (name, display_name)
def _make_l3out_clone(self, ctx, l3out, vrf):
new_tenant = vrf.tenant_name
new_name, new_display_name = self._generate_l3out_name(l3out, vrf)
clone_l3out = resource.L3Outside(
tenant_name=new_tenant,
name=new_name,
display_name=new_display_name,
vrf_name=vrf.name)
return clone_l3out
def _create_shadow(self, ctx, ext_net, vrf, with_nat_epg=True):
"""Clone ExternalNetwork as a shadow."""
ext_net_db = self.mgr.get(ctx, ext_net)
if not ext_net_db:
return
l3out = self.mgr.get(ctx, self._ext_net_to_l3out(ext_net_db))
clone_l3out = self._make_l3out_clone(ctx, l3out, vrf)
clone_ext_net = resource.ExternalNetwork(
tenant_name=clone_l3out.tenant_name,
l3out_name=clone_l3out.name,
display_name=ext_net_db.display_name,
**{k: getattr(ext_net, k)
for k in ['name',
'provided_contract_names',
'consumed_contract_names']})
if with_nat_epg:
_, nat_epg = self._get_nat_ap_epg(ctx, l3out)
clone_ext_net.nat_epg_dn = nat_epg.dn
with ctx.store.begin(subtransactions=True):
self.mgr.create(ctx, clone_l3out, overwrite=True)
self.mgr.create(ctx, clone_ext_net, overwrite=True)
cidrs = self.mgr.find(ctx, resource.ExternalSubnet,
tenant_name=ext_net_db.tenant_name,
l3out_name=ext_net_db.l3out_name,
external_network_name=ext_net_db.name)
cidrs = [c.cidr for c in cidrs]
self._manage_external_subnets(ctx, clone_ext_net, cidrs)
# Set this item as a clone
if not self.db.get(ctx, clone_l3out):
self.db.set(ctx, l3out, clone_l3out)
return clone_ext_net
def _delete_shadow(self, ctx, ext_net, vrf):
l3out = self.mgr.get(ctx, self._ext_net_to_l3out(ext_net))
clone_l3out = resource.L3Outside(
tenant_name=vrf.tenant_name,
name=self._generate_l3out_name(l3out, vrf)[0])
clone_ext_net = resource.ExternalNetwork(
tenant_name=clone_l3out.tenant_name,
l3out_name=clone_l3out.name,
name=ext_net.name)
with ctx.store.begin(subtransactions=True):
self._delete_ext_net(ctx, clone_ext_net)
self._delete_unused_l3out(ctx, clone_l3out)
def _find_l3out_clones(self, ctx, l3outside):
clone_keys = self.db.get_clones(ctx, | |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
""" Unit tests for DayNightMask class """
import unittest
from datetime import datetime
import cf_units as unit
import iris
import numpy as np
from iris.tests import IrisTest
from improver.synthetic_data.set_up_test_cubes import (
add_coordinate,
set_up_variable_cube,
)
from improver.utilities.solar import DayNightMask
class Test__init__(IrisTest):
""" Test initialisation of the DayNightMask class """
def test_basic_init(self):
""" Test Initiation of DayNightMask Object"""
plugin = DayNightMask()
self.assertEqual(plugin.day, 1)
self.assertEqual(plugin.night, 0)
class Test__repr__(IrisTest):
""" Test string representation """
def test_basic_repr(self):
""" Test Representation string of DayNightMask Object"""
expected = "<DayNightMask : Day = 1, Night = 0>"
result = str(DayNightMask())
self.assertEqual(result, expected)
class Test__create_daynight_mask(IrisTest):
""" Test string representation """
def setUp(self):
"""Set up the cube for testing."""
data = np.ones((1, 16, 16), dtype=np.float32)
data[:, 7, 7] = 0.0
attributes = {"institution": "Met Office", "title": "A model field"}
self.cube = set_up_variable_cube(
data, "precipitation_amount", "kg m^-2", "equalarea", attributes=attributes
)
def test_basic_daynight_mask(self):
""" Test this create a blank mask cube"""
result = DayNightMask()._create_daynight_mask(self.cube)
self.assertIsInstance(result, iris.cube.Cube)
self.assertEqual(result.long_name, "day_night_mask")
self.assertEqual(result.units, unit.Unit("1"))
self.assertEqual(result.data.min(), DayNightMask().night)
self.assertEqual(result.data.max(), DayNightMask().night)
self.assertEqual(result.attributes["title"], "Day-Night mask")
self.assertEqual(result.attributes["institution"], "Met Office")
self.assertEqual(result.dtype, np.int32)
class Test__daynight_lat_lon_cube(IrisTest):
""" Test string representation """
def setUp(self):
"""Set up the cube for testing."""
data = np.ones((16, 16), dtype=np.float32)
cube = set_up_variable_cube(
data,
"precipitation_amount",
"kg m^-2",
grid_spacing=1,
domain_corner=(49, -8),
)
self.mask_cube = DayNightMask()._create_daynight_mask(cube)
cube = set_up_variable_cube(
data,
"precipitation_amount",
"kg m^-2",
grid_spacing=1,
domain_corner=(49, 345),
)
self.mask_cube_360 = DayNightMask()._create_daynight_mask(cube)
def test_basic_lat_lon_cube(self):
""" Test this create a blank mask cube"""
day_of_year = 10
utc_hour = 12.0
expected_result = np.ones((16, 16))
result = DayNightMask()._daynight_lat_lon_cube(
self.mask_cube, day_of_year, utc_hour
)
self.assertIsInstance(result, iris.cube.Cube)
self.assertArrayEqual(result.data, expected_result)
def test_basic_lat_lon_cube_360(self):
""" Test this still works with 360 data"""
day_of_year = 10
utc_hour = 0.0
expected_result = np.zeros((16, 16))
result = DayNightMask()._daynight_lat_lon_cube(
self.mask_cube_360, day_of_year, utc_hour
)
self.assertIsInstance(result, iris.cube.Cube)
self.assertArrayEqual(result.data, expected_result)
class Test_process(IrisTest):
"""Test DayNight Mask."""
def setUp(self):
"""Set up the cubes for testing."""
data = np.ones((16, 16), dtype=np.float32)
data[7, 7] = 0.0
vt = datetime(2015, 11, 20, 8, 0)
self.cube = set_up_variable_cube(
data,
"precipitation_amount",
"kg m^-2",
"equalarea",
grid_spacing=2000,
domain_corner=(0, -30000),
time=vt,
frt=vt,
)
# Lat lon cubes
self.cube_lat_lon = set_up_variable_cube(
data,
"precipitation_amount",
"kg m^-2",
grid_spacing=1,
domain_corner=(49, -8),
time=vt,
frt=vt,
)
self.cube_lat_lon_360 = set_up_variable_cube(
data,
"precipitation_amount",
"kg m^-2",
grid_spacing=1,
domain_corner=(49, 345),
time=vt,
frt=vt,
)
def test_basic_standard_grid_ccrs(self):
"""Test day_night mask with standard_grid_ccrs projection."""
result = DayNightMask().process(self.cube)
expected_result = np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
)
self.assertArrayEqual(result.data, expected_result)
def test_time_as_dimension(self):
"""Test day_night mask for a cube with multiple times."""
datetime_points = [datetime(2015, 11, 20, 8, 0), datetime(2015, 11, 20, 14, 0)]
cube = add_coordinate(self.cube, datetime_points, "time", is_datetime=True)
result = DayNightMask().process(cube)
expected_result = np.array(
[
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
],
np.ones((16, 16)),
]
)
self.assertArrayEqual(result.data, expected_result)
self.assertEqual(result.shape, cube.shape)
def test_basic_lat_lon(self):
"""Test day_night mask with lat lon data."""
result = DayNightMask().process(self.cube_lat_lon)
expected_result = np.array(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, | |
{} -g {} -k '{}'"
.format(edge_device_ids[1], LIVE_HUB, LIVE_RG, content_path), checks=[self.check('length([*])', 3)])
self.cmd("iot edge set-modules -d {} -n {} -g {} --content '{}'"
.format(edge_device_ids[1], LIVE_HUB, LIVE_RG, '{generic_content}'), self.check('length([*])', 3))
# With connection string
self.cmd("iot edge set-modules -d {} --login {} -k '{}'"
.format(edge_device_ids[1], LIVE_HUB_CS, content_path_v1), checks=[self.check('length([*])', 4)])
def test_hub_device_twins(self):
self.kwargs['generic_dict'] = {'key': 'value'}
self.kwargs['bad_format'] = "{'key: 'value'}"
device_count = 2
names = self._create_entity_names(devices=device_count)
device_ids = names['device_ids']
for i in range(device_count):
self.cmd('iot hub device-identity create -d {} -n {} -g {}'.format(device_ids[i], LIVE_HUB, LIVE_RG),
checks=[self.check('deviceId', device_ids[i])])
self.cmd('iot hub device-twin show -d {} -n {} -g {}'.format(device_ids[0], LIVE_HUB, LIVE_RG),
checks=[self.check('deviceId', device_ids[0]),
self.check('status', 'enabled'),
self.exists('properties.desired'),
self.exists('properties.reported')])
# With connection string
self.cmd('iot hub device-twin show -d {} --login {}'.format(device_ids[0], LIVE_HUB_CS),
checks=[self.check('deviceId', device_ids[0]),
self.check('status', 'enabled'),
self.exists('properties.desired'),
self.exists('properties.reported')])
result = self.cmd('iot hub device-twin update -d {} -n {} -g {} --set properties.desired.special={}'
.format(device_ids[0], LIVE_HUB, LIVE_RG, '"{generic_dict}"')).get_output_in_json()
assert result['deviceId'] == device_ids[0]
assert result['properties']['desired']['special']['key'] == 'value'
result = self.cmd('iot hub device-twin update -d {} -n {} -g {} --set properties.desired.special="null"'
.format(device_ids[0], LIVE_HUB, LIVE_RG)).get_output_in_json()
assert result['deviceId'] == device_ids[0]
assert result['properties']['desired'].get('special') is None
# With connection string
result = self.cmd('iot hub device-twin update -d {} --login {} --set properties.desired.special={}'
.format(device_ids[0], LIVE_HUB_CS, '"{generic_dict}"')).get_output_in_json()
assert result['deviceId'] == device_ids[0]
assert result['properties']['desired']['special']['key'] == 'value'
# Error case, test type enforcer
self.cmd('iot hub device-twin update -d {} -n {} -g {} --set tags={}'
.format(device_ids[0], LIVE_HUB, LIVE_RG, '"{bad_format}"'),
expect_failure=True)
content_path = os.path.join(CWD, 'test_generic_replace.json')
self.cmd("iot hub device-twin replace -d {} -n {} -g {} -j '{}'"
.format(device_ids[0], LIVE_HUB, LIVE_RG, content_path),
checks=[self.check('deviceId', device_ids[0]),
self.check('properties.desired.awesome', 9001),
self.check('properties.desired.temperature.min', 10),
self.check('properties.desired.temperature.max', 100),
self.check('tags.location.region', 'US')])
self.kwargs['twin_payload'] = read_file_content(content_path)
self.cmd("iot hub device-twin replace -d {} -n {} -g {} -j '{}'"
.format(device_ids[1], LIVE_HUB, LIVE_RG, '{twin_payload}'),
checks=[self.check('deviceId', device_ids[1]),
self.check('properties.desired.awesome', 9001),
self.check('properties.desired.temperature.min', 10),
self.check('properties.desired.temperature.max', 100),
self.check('tags.location.region', 'US')])
# With connection string
self.cmd("iot hub device-twin replace -d {} --login {} -j '{}'"
.format(device_ids[1], LIVE_HUB_CS, '{twin_payload}'),
checks=[self.check('deviceId', device_ids[1]),
self.check('properties.desired.awesome', 9001),
self.check('properties.desired.temperature.min', 10),
self.check('properties.desired.temperature.max', 100),
self.check('tags.location.region', 'US')])
def test_hub_modules(self):
edge_device_count = 2
device_count = 1
module_count = 2
names = self._create_entity_names(edge_devices=edge_device_count, devices=device_count, modules=module_count)
edge_device_ids = names['edge_device_ids']
module_ids = names['module_ids']
device_ids = names['device_ids']
for edge_device in edge_device_ids:
self.cmd('iot hub device-identity create -d {} -n {} -g {} --ee'.format(edge_device, LIVE_HUB, LIVE_RG),
checks=[self.check('deviceId', edge_device)])
self.cmd('iot hub device-identity create -d {} -n {} -g {}'.format(device_ids[0], LIVE_HUB, LIVE_RG),
checks=[self.check('deviceId', device_ids[0])])
# Symmetric Key
# With connection string
self.cmd('iot hub module-identity create --device-id {} --hub-name {} --resource-group {} --module-id {}'
.format(edge_device_ids[0], LIVE_HUB, LIVE_RG, module_ids[1]),
checks=[self.check('deviceId', edge_device_ids[0]),
self.check('moduleId', module_ids[1]),
self.exists('authentication.symmetricKey.primaryKey'),
self.exists('authentication.symmetricKey.secondaryKey')])
self.cmd('iot hub module-identity create -d {} --login {} -m {}'
.format(edge_device_ids[0], LIVE_HUB_CS, module_ids[0]),
checks=[self.check('deviceId', edge_device_ids[0]),
self.check('moduleId', module_ids[0]),
self.exists('authentication.symmetricKey.primaryKey'),
self.exists('authentication.symmetricKey.secondaryKey')])
# X509 Thumbprint
# With connection string
self.cmd('''iot hub module-identity create --module-id {} --device-id {} --login {}
--auth-method x509_thumbprint --primary-thumbprint {} --secondary-thumbprint {}'''
.format(module_ids[0], device_ids[0], LIVE_HUB_CS, PRIMARY_THUMBPRINT, SECONDARY_THUMBPRINT),
checks=[self.check('deviceId', device_ids[0]),
self.check('moduleId', module_ids[0]),
self.check('connectionState', 'Disconnected'),
self.check(
'authentication.symmetricKey.primaryKey', None),
self.check(
'authentication.symmetricKey.secondaryKey', None),
self.check(
'authentication.x509Thumbprint.primaryThumbprint', PRIMARY_THUMBPRINT),
self.check('authentication.x509Thumbprint.secondaryThumbprint', SECONDARY_THUMBPRINT)])
self.cmd('''iot hub module-identity create -m {} -d {} -n {} -g {} --am x509_thumbprint --vd {}'''
.format(module_ids[1], device_ids[0], LIVE_HUB, LIVE_RG, 10),
checks=[self.check('deviceId', device_ids[0]),
self.check('moduleId', module_ids[1]),
self.check('connectionState', 'Disconnected'),
self.check(
'authentication.symmetricKey.primaryKey', None),
self.check(
'authentication.symmetricKey.secondaryKey', None),
self.exists('authentication.x509Thumbprint.primaryThumbprint'),
self.check('authentication.x509Thumbprint.secondaryThumbprint', None)])
# X509 CA
# With connection string
self.cmd('''iot hub module-identity create --module-id {} --device-id {} --login {} --auth-method x509_ca'''
.format(module_ids[0], edge_device_ids[1], LIVE_HUB_CS),
checks=[self.check('deviceId', edge_device_ids[1]),
self.check('moduleId', module_ids[0]),
self.check('connectionState', 'Disconnected'),
self.check('authentication.symmetricKey.primaryKey', None),
self.check('authentication.symmetricKey.secondaryKey', None),
self.check('authentication.x509Thumbprint.primaryThumbprint', None),
self.check('authentication.x509Thumbprint.secondaryThumbprint', None)])
# Includes $edgeAgent && $edgeHub system modules
result = self.cmd('iot hub query --hub-name {} -g {} -q "{}"'
.format(LIVE_HUB, LIVE_RG, "select * from devices.modules where devices.deviceId='{}'"
.format(edge_device_ids[0]))).get_output_in_json()
assert len(result) == 4
self.cmd('''iot hub module-identity update -d {} -n {} -g {} -m {}
--set authentication.symmetricKey.primaryKey="" authentication.symmetricKey.secondaryKey=""'''
.format(edge_device_ids[0], LIVE_HUB, LIVE_RG, module_ids[0]),
checks=[self.check('deviceId', edge_device_ids[0]),
self.check('moduleId', module_ids[0]),
self.exists('authentication.symmetricKey.primaryKey'),
self.exists('authentication.symmetricKey.secondaryKey')])
# With connection string
self.cmd('''iot hub module-identity update -d {} --login {} -m {}
--set authentication.symmetricKey.primaryKey="" authentication.symmetricKey.secondaryKey=""'''
.format(edge_device_ids[0], LIVE_HUB_CS, module_ids[0]),
checks=[self.check('deviceId', edge_device_ids[0]),
self.check('moduleId', module_ids[0]),
self.exists('authentication.symmetricKey.primaryKey'),
self.exists('authentication.symmetricKey.secondaryKey')])
self.cmd('iot hub module-identity list -d {} -n {} -g {}'.format(edge_device_ids[0], LIVE_HUB, LIVE_RG),
checks=[self.check('length([*])', 4),
self.exists("[?moduleId=='$edgeAgent']"),
self.exists("[?moduleId=='$edgeHub']")])
self.cmd('iot hub module-identity list -d {} -n {} -g {} --top -1'.format(edge_device_ids[0], LIVE_HUB, LIVE_RG),
checks=[self.check('length([*])', 4),
self.exists("[?moduleId=='$edgeAgent']"),
self.exists("[?moduleId=='$edgeHub']")])
# With connection string
self.cmd('iot hub module-identity list -d {} --login {}'.format(edge_device_ids[0], LIVE_HUB_CS),
checks=[self.check('length([*])', 4),
self.exists("[?moduleId=='$edgeAgent']"),
self.exists("[?moduleId=='$edgeHub']")])
self.cmd('iot hub module-identity show -d {} -n {} -g {} -m {}'
.format(edge_device_ids[0], LIVE_HUB, LIVE_RG, module_ids[0]),
checks=[
self.check('deviceId', edge_device_ids[0]),
self.check('moduleId', module_ids[0]),
self.exists('authentication.symmetricKey.primaryKey'),
self.exists('authentication.symmetricKey.secondaryKey')])
# With connection string
self.cmd('iot hub module-identity show -d {} --login {} -m {}'
.format(edge_device_ids[0], LIVE_HUB_CS, module_ids[0]),
checks=[
self.check('deviceId', edge_device_ids[0]),
self.check('moduleId', module_ids[0]),
self.exists('authentication.symmetricKey.primaryKey'),
self.exists('authentication.symmetricKey.secondaryKey')])
mod_sym_conn_str_pattern = r'^HostName={}\.azure-devices\.net;DeviceId={};ModuleId={};SharedAccessKey='.format(
LIVE_HUB, edge_device_ids[0], module_ids[0])
self.cmd('iot hub module-identity show-connection-string -d {} -n {} -g {} -m {}'
.format(edge_device_ids[0], LIVE_HUB, LIVE_RG, module_ids[0]),
checks=[self.check_pattern('cs', mod_sym_conn_str_pattern)])
# With connection string
self.cmd('iot hub module-identity show-connection-string -d {} --login {} -m {}'
.format(edge_device_ids[0], LIVE_HUB_CS, module_ids[0]),
checks=[self.check_pattern('cs', mod_sym_conn_str_pattern)])
self.cmd('iot hub module-identity show-connection-string -d {} -n {} -g {} -m {} --kt {}'
.format(edge_device_ids[0], LIVE_HUB, LIVE_RG, module_ids[0], "secondary"),
checks=[self.check_pattern('cs', mod_sym_conn_str_pattern)])
for i in module_ids:
if module_ids.index(i) == (len(module_ids) - 1):
# With connection string
self.cmd('iot hub module-identity delete -d {} --login {} --module-id {}'
.format(edge_device_ids[0], LIVE_HUB_CS, i), checks=self.is_empty())
else:
self.cmd('iot hub module-identity delete -d {} -n {} -g {} --module-id {}'
.format(edge_device_ids[0], LIVE_HUB, LIVE_RG, i), checks=self.is_empty())
def test_hub_module_twins(self):
self.kwargs['generic_dict'] = {'key': 'value'}
self.kwargs['bad_format'] = "{'key: 'value'}"
edge_device_count = 1
device_count = 1
module_count = 1
names = self._create_entity_names(edge_devices=edge_device_count, modules=module_count, devices=device_count)
edge_device_ids = names['edge_device_ids']
module_ids = names['module_ids']
device_ids = names['device_ids']
self.cmd('iot hub device-identity create -d {} -n {} -g {} --ee'.format(edge_device_ids[0], LIVE_HUB, LIVE_RG),
checks=[self.check('deviceId', edge_device_ids[0])])
self.cmd('iot hub device-identity create -d {} -n {} -g {}'.format(device_ids[0], LIVE_HUB, LIVE_RG),
checks=[self.check('deviceId', device_ids[0])])
self.cmd('iot hub module-identity create -d {} -n {} -g {} -m {}'
.format(edge_device_ids[0], LIVE_HUB, LIVE_RG, module_ids[0]),
checks=[self.check('deviceId', edge_device_ids[0]),
self.check('moduleId', module_ids[0]),
self.exists('authentication.symmetricKey.primaryKey'),
self.exists('authentication.symmetricKey.secondaryKey')])
self.cmd('iot hub module-identity create -d {} -n {} -g {} -m {}'
.format(device_ids[0], LIVE_HUB, LIVE_RG, module_ids[0]),
checks=[self.check('deviceId', device_ids[0]),
self.check('moduleId', module_ids[0]),
self.exists('authentication.symmetricKey.primaryKey'),
self.exists('authentication.symmetricKey.secondaryKey')])
self.cmd('iot hub module-twin show -d {} -n {} -g {} -m {}'
.format(edge_device_ids[0], LIVE_HUB, LIVE_RG, module_ids[0]),
checks=[self.check('deviceId', edge_device_ids[0]),
self.check('moduleId', module_ids[0]),
self.exists('properties.desired'),
self.exists('properties.reported')])
# With connection string
self.cmd('iot hub module-twin show -d {} --login {} -m {}'
.format(edge_device_ids[0], LIVE_HUB_CS, module_ids[0]),
checks=[self.check('deviceId', edge_device_ids[0]),
self.check('moduleId', module_ids[0]),
self.exists('properties.desired'),
self.exists('properties.reported')])
self.cmd('iot hub module-twin update -d {} -n {} -g {} -m {} --set properties.desired.special={}'
.format(edge_device_ids[0], LIVE_HUB, LIVE_RG, module_ids[0], '"{generic_dict}"'),
checks=[self.check('deviceId', edge_device_ids[0]),
self.check('moduleId', module_ids[0]),
self.check('properties.desired.special.key', 'value')])
# With connection string
self.cmd('iot hub module-twin update -d {} --login {} -m {} --set properties.desired.special={}'
.format(edge_device_ids[0], LIVE_HUB_CS, module_ids[0], '"{generic_dict}"'),
checks=[self.check('deviceId', edge_device_ids[0]),
self.check('moduleId', module_ids[0]),
self.check('properties.desired.special.key', 'value')])
# Error case test type enforcer
self.cmd('iot hub module-twin update -d {} --login {} -m {} --set properties.desired={}'
.format(edge_device_ids[0], LIVE_HUB_CS, module_ids[0], '"{bad_format}"'), expect_failure=True)
self.cmd('iot hub module-twin update -d {} --login {} -m {} --set tags={}'
.format(edge_device_ids[0], LIVE_HUB_CS, module_ids[0], '"{bad_format}"'), expect_failure=True)
content_path = os.path.join(CWD, 'test_generic_replace.json')
self.cmd("iot hub module-twin replace -d {} -n {} -g {} -m {} -j '{}'"
.format(edge_device_ids[0], LIVE_HUB, LIVE_RG, module_ids[0], content_path),
checks=[self.check('deviceId', edge_device_ids[0]),
self.check('moduleId', module_ids[0]),
self.check('properties.desired.awesome', 9001),
self.check('properties.desired.temperature.min', 10),
self.check('properties.desired.temperature.max', 100),
self.check('tags.location.region', 'US')])
# With connection string
self.cmd("iot hub module-twin replace -d {} --login {} -m {} -j '{}'"
.format(edge_device_ids[0], LIVE_HUB_CS, module_ids[0], content_path),
checks=[self.check('deviceId', edge_device_ids[0]),
self.check('moduleId', module_ids[0]),
self.check('properties.desired.awesome', 9001),
self.check('properties.desired.temperature.min', 10),
self.check('properties.desired.temperature.max', 100),
self.check('tags.location.region', 'US')])
self.kwargs['twin_payload'] = read_file_content(content_path)
self.cmd("iot hub module-twin replace -d {} -n {} -g {} -m {} -j '{}'"
.format(edge_device_ids[0], LIVE_HUB, LIVE_RG, module_ids[0], '{twin_payload}'),
checks=[self.check('deviceId', edge_device_ids[0]),
self.check('moduleId', module_ids[0]),
self.check('properties.desired.awesome', 9001),
self.check('properties.desired.temperature.min', 10),
self.check('properties.desired.temperature.max', 100),
self.check('tags.location.region', 'US')])
for i in module_ids:
self.cmd('iot hub module-identity delete -d {} -n {} -g {} --module-id {}'
.format(edge_device_ids[0], LIVE_HUB, LIVE_RG, i), checks=self.is_empty())
def test_device_configurations(self):
self.kwargs['generic_dict'] = {'key': 'value'}
self.kwargs['bad_format'] = "{'key: 'value'}"
config_count = 5
names = self._create_entity_names(configs=config_count)
config_ids = names['config_ids']
content_path = os.path.join(CWD, 'test_config_device_content.json')
metrics_path = os.path.join(CWD, 'test_config_device_metrics.json')
self.kwargs['configuration_payload'] = read_file_content(content_path)
self.kwargs['metrics_payload'] = read_file_content(metrics_path)
priority = random.randint(1, 10)
condition = 'tags.building=9 and tags.environment=\'test\''
empty_metrics = {'queries': {}, 'results': {}}
# With connection string
self.cmd("iot hub configuration create -c {} --login {} --pri {} --tc \"{}\" --lab {} -k '{}'"
.format(config_ids[0], LIVE_HUB_CS, priority, condition, '"{generic_dict}"', content_path),
checks=[
self.check('id', config_ids[0]),
self.check('priority', priority),
self.check('targetCondition', condition),
self.check('labels', self.kwargs['generic_dict']),
self.check('content.deviceContent', json.loads(
self.kwargs['configuration_payload'])['content']['deviceContent']),
self.check('metrics', empty_metrics)])
self.cmd("""iot hub configuration create --config-id {} --hub-name {} | |
[]
self.speedlines = []
self.accellines = []
for idx, cut in enumerate(self.spacetimeslits):
text = self.axes_dspec.text(cut['cutslit']['xcen'][-1], cut['cutslit']['ycen'][-1],
'{}'.format(idx), color=self.color, transform=self.axes_dspec.transData,
ha='left', va='bottom')
self.slitlines_text.append(text)
slitline, = self.axes_dspec.plot(cut['cutslit']['xcen'], cut['cutslit']['ycen'], color=self.color, ls=':')
self.slitlines.append(slitline)
if self.axes_speed:
speedline, = self.axes_speed.plot(cut['cutslit']['xcen'], cut['cutslit']['speed'], color=self.color,
ls='-')
self.speedlines.append(speedline)
if self.axes_accel:
accelline, = self.axes_accel.plot(cut['cutslit']['xcen'], cut['cutslit']['accel'], color=self.color,
ls='-')
self.accellines.append(accelline)
class CutslitBuilder:
def __init__(self, axes, cutwidth=5.0, cutlength=150, cutang=0.0, cutsmooth=10.0, scale=1.0):
self.axes = axes
self.clickedpoints, = self.axes.plot([], [], 'o', color='white')
self.slitline, = self.axes.plot([], [], color='white', ls='solid')
self.slitline0, = self.axes.plot([], [], color='white', ls='dotted')
self.slitline1, = self.axes.plot([], [], color='white', ls='dotted')
self.cutlength = cutlength
self.cutwidth = cutwidth
self.cutang = cutang
self.cutsmooth = cutsmooth
self.scale = scale
self.xx = list(self.clickedpoints.get_xdata())
self.yy = list(self.clickedpoints.get_ydata())
self.cid = self.clickedpoints.figure.canvas.mpl_connect('button_press_event', self)
def __call__(self, event):
tmode = '{}'.format(self.clickedpoints.figure.canvas.toolbar.mode)
if tmode == '':
if event.inaxes != self.axes:
return
if event.button == 1:
self.xx.append(event.xdata)
self.yy.append(event.ydata)
elif event.button == 3:
if len(self.xx) > 0:
self.xx.pop()
self.yy.pop()
self.clickedpoints.set_data(self.xx, self.yy)
self.clickedpoints.figure.canvas.draw()
self.update()
else:
if event.inaxes != self.axes:
return
if event.button == 1 or event.button == 3:
self.clickedpoints.figure.canvas.toolbar.set_message('Uncheck toolbar button {} first!'.format(tmode))
def update(self, mask=None):
xx = np.array(self.xx, dtype=np.float64)
yy = np.array(self.yy, dtype=np.float64)
if len(self.xx) <= 1:
cutslitplt = {'xcen': [], 'ycen': [], 'xs0': [], 'ys0': [], 'xs1': [], 'ys1': [], 'cutwidth': [],
'posangs': [], 'posangs2': [],
'dist': []}
else:
if len(self.xx) <= 3:
cutslitplt = FitSlit(xx, yy, self.cutwidth * self.scale, self.cutang, 300, method='Polyfit')
else:
cutslitplt = FitSlit(xx, yy, self.cutwidth * self.scale, self.cutang, 300,
s=len(xx) / 10.0 * self.cutsmooth, method='Param_Spline')
self.cutlength = int(np.ceil(cutslitplt['dist'][-1] / self.scale) * 1.5)
if len(self.xx) <= 3:
cutslitplt = FitSlit(xx, yy, self.cutwidth * self.scale, self.cutang, self.cutlength, method='Polyfit')
else:
cutslitplt = FitSlit(xx, yy, self.cutwidth * self.scale, self.cutang, self.cutlength,
s=len(xx) / 10.0 * self.cutsmooth, method='Param_Spline')
self.cutslitplt = cutslitplt
if mask is None:
self.slitline.set_data(cutslitplt['xcen'], cutslitplt['ycen'])
self.slitline0.set_data(cutslitplt['xs0'], cutslitplt['ys0'])
self.slitline1.set_data(cutslitplt['xs1'], cutslitplt['ys1'])
else:
self.slitline.set_data(ma.masked_array(cutslitplt['xcen'], mask), ma.masked_array(cutslitplt['ycen'], mask))
self.slitline0.set_data(ma.masked_array(cutslitplt['xs0'], mask), ma.masked_array(cutslitplt['ys0'], mask))
self.slitline1.set_data(ma.masked_array(cutslitplt['xs1'], mask), ma.masked_array(cutslitplt['ys1'], mask))
self.slitline.figure.canvas.draw()
self.slitline0.figure.canvas.draw()
self.slitline1.figure.canvas.draw()
class Stackplot:
instrum_meta = {'SDO/AIA': {'scale': 0.6 * u.arcsec / u.pix}}
# try to find predefined data directory, AIA_LVL1 takes precedence
aia_lvl1 = os.getenv('AIA_LVL1')
suncasadb = os.getenv('SUNCASADB')
if aia_lvl1:
print('Use ' + aia_lvl1 + ' as the file searching path')
fitsdir = aia_lvl1
else:
if suncasadb:
fitsdir = suncasadb + '/aiaBrowserData/Download/'
else:
print('Environmental variable for either AIA_LVL1 or SUNCASADB not defined')
print('Use current path')
fitsdir = './'
mapcube = None
mapcube_diff = None
mapcube_plot = None
cutslitbd = None
stackplt = None
trange = None
wavelength = None
fitsfile = None
exptime_orig = None
fov = None
binpix = None
dt_data = None
divider_im = None
divider_dspec = None
sCutwdth = None
sCutang = None
fig_mapcube = None
pixscale = None
@resettable
def __init__(self, infile=None):
if infile:
if isinstance(infile, MapSequence):
self.mapcube = infile
self.mapcube_info()
else:
self.mapcube_fromfile(infile)
def get_plot_title(self, smap, title):
titletext = ''
if 'observatory' in title:
titletext = titletext + ' {}'.format(smap.observatory)
if 'detector' in title:
titletext = titletext + ' {}'.format(smap.detector)
if 'wavelength' in title:
titletext = titletext + ' {}'.format(smap.wavelength)
if 'time' in title:
titletext = titletext + ' {}'.format(smap.meta['date-obs'])
return titletext
def plot_map(self, smap, dspec=None, diff=False, norm=None, cmap=None, SymLogNorm=False, linthresh=0.5,
returnImAx=False,
layout_vert=False, uni_cm=False, draw_limb=False, draw_grid=False, colortitle=None,
title=['observatory', 'detector', 'wavelength', 'time'], fov=fov,
*args, **kwargs):
import matplotlib.cm as cm
import matplotlib.colors as colors
def plot_limb(axes, smap):
rsun = smap.rsun_obs
phi = np.linspace(-180, 180, num=181) * u.deg
x = np.cos(phi) * rsun
y = np.sin(phi) * rsun
axes.plot(x, y, color='w', linestyle='-')
def plot_grid(axes, smap, grid_spacing=10. * u.deg):
def hgs2hcc(rsun, lon, lat, B0, L0):
lon_L0 = lon - L0
x = rsun * np.cos(lat) * np.sin(lon)
y = rsun * (np.sin(lat) * np.cos(B0) - np.cos(lat) * np.cos(lon_L0) * np.sin(B0))
z = rsun * (np.sin(lat) * np.sin(B0) + np.cos(lat) * np.cos(lon_L0) * np.cos(B0))
return x, y, z
def hcc2hpc(x, y, z, dsun):
d = np.sqrt(x ** 2 + y ** 2 + (dsun - z) ** 2)
Tx = np.arctan2(x, dsun - z)
Ty = np.arcsin(y / d)
return Tx, Ty
dsun = smap.dsun
rsun = smap.rsun_meters
b0 = smap.heliographic_latitude.to(u.deg)
l0 = smap.heliographic_longitude.to(u.deg)
hg_longitude_deg = np.linspace(-90, 90, num=91) * u.deg
hg_latitude_deg = np.arange(0, 90, grid_spacing.to(u.deg).value)
hg_latitude_deg = np.hstack([-hg_latitude_deg[1:][::-1], hg_latitude_deg]) * u.deg
for lat in hg_latitude_deg:
c = hgs2hcc(rsun, hg_longitude_deg, lat * np.ones(91), b0, l0)
coords = hcc2hpc(c[0], c[1], c[2], dsun)
axes.plot(coords[0].to(u.arcsec), coords[1].to(u.arcsec), color='w', linestyle=':')
hg_longitude_deg = np.arange(0, 90, grid_spacing.to(u.deg).value)
hg_longitude_deg = np.hstack([-hg_longitude_deg[1:][::-1], hg_longitude_deg]) * u.deg
hg_latitude_deg = np.linspace(-90, 90, num=91) * u.deg
for lon in hg_longitude_deg:
c = hgs2hcc(rsun, lon * np.ones(91), hg_latitude_deg, b0, l0)
coords = hcc2hpc(c[0], c[1], c[2], dsun)
axes.plot(coords[0].to(u.arcsec), coords[1].to(u.arcsec), color='w', linestyle=':')
try:
clrange = DButil.sdo_aia_scale_dict(wavelength=smap.meta['wavelnth'])
except:
clrange = {'high': None, 'log': False, 'low': None}
plt.clf()
if dspec:
if layout_vert:
ax = plt.subplot(211)
else:
ax = plt.subplot(121)
else:
ax = plt.subplot()
if 'vmin' in kwargs.keys():
vmin = kwargs['vmin']
else:
vmin = clrange['low']
if 'vmax' in kwargs.keys():
vmax = kwargs['vmax']
else:
vmax = clrange['high']
if uni_cm:
norm = dspec['args']['norm']
if norm is None:
if diff:
if SymLogNorm:
norm = colors.SymLogNorm(linthresh=linthresh, vmin=vmin, vmax=vmax)
else:
norm = colors.Normalize(vmin=vmin, vmax=vmax)
else:
if clrange['log']:
norm = colors.LogNorm(vmin=vmin, vmax=vmax)
else:
norm = colors.Normalize(vmin=vmin, vmax=vmax)
if not cmap:
try:
cmap = cm.get_cmap('sdoaia{}'.format(smap.meta['wavelnth']))
except:
cmap = 'gray_r'
imshow_args = {'cmap': cmap, 'norm': norm, 'interpolation': 'nearest', 'origin': 'lower'}
try:
if smap.coordinate_system.x == 'HG':
xlabel = 'Longitude [{lon}]'.format(lon=smap.spatial_units.x)
else:
xlabel = 'X-position [{xpos}]'.format(xpos=smap.spatial_units.x)
if smap.coordinate_system.y == 'HG':
ylabel = 'Latitude [{lat}]'.format(lat=smap.spatial_units.y)
else:
ylabel = 'Y-position [{ypos}]'.format(ypos=smap.spatial_units.y)
except:
if smap.coordinate_system.axis1 == 'HG':
xlabel = 'Longitude [{lon}]'.format(lon=smap.spatial_units.axis1)
else:
xlabel = 'X-position [{xpos}]'.format(xpos=smap.spatial_units.axis1)
if smap.coordinate_system.axis2 == 'HG':
ylabel = 'Latitude [{lat}]'.format(lat=smap.spatial_units.axis2)
else:
ylabel = 'Y-position [{ypos}]'.format(ypos=smap.spatial_units.axis2)
# try:
# smap.draw_limb()
# except:
# pass
#
# try:
# smap.draw_grid()
# except:
# pass
if draw_limb:
plot_limb(ax, smap)
if draw_grid:
if type(draw_grid) in [int, float]:
plot_grid(ax, smap, draw_grid * u.deg)
else:
plot_grid(ax, smap, 10 * u.deg)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
imshow_args.update({'extent': stpu.get_map_corner_coord(smap).value})
if smap.detector == 'HMI':
im1 = ax.imshow(np.rot90(smap.data, 2), **imshow_args)
else:
im1 = ax.imshow(smap.data, **imshow_args)
# ['observatory', 'detector', 'wavelength', 'time']
titletext = self.get_plot_title(smap, title)
ax.set_title(titletext)
print(imshow_args['extent'])
if fov:
ax.set_xlim(fov[:2])
ax.set_ylim(fov[2:])
self.divider_im = make_axes_locatable(ax)
cax = self.divider_im.append_axes('right', size='1.5%', pad=0.05)
cax.tick_params(direction='in')
if colortitle is None:
colortitle = 'DN counts per second'
plt.colorbar(im1, ax=ax, cax=cax, label=colortitle)
ax.set_autoscale_on(False)
if dspec:
fig = plt.gcf()
# if figsize:
# fig.set_size_inches(figsize)
# else:
# fig.set_size_inches(13, 5)
if layout_vert:
ax2 = plt.subplot(212)
else:
ax2 = plt.subplot(122)
im2 = plt.pcolormesh(dspec['x'], dspec['y'], dspec['dspec'], **dspec['args'])
date_format = mdates.DateFormatter('%H:%M:%S')
ax2.xaxis_date()
ax2.xaxis.set_major_formatter(date_format)
for xlabel in ax2.get_xmajorticklabels():
xlabel.set_rotation(30)
xlabel.set_horizontalalignment("right")
ax2.yaxis.set_label_text(dspec['ytitle'])
# self.divider_dspec = make_axes_locatable(ax2)
# cax = self.divider_dspec.append_axes('right', size='1.5%', pad=0.05)
# cax.tick_params(direction='in')
# plt.colorbar(im2, ax=ax2, cax=cax, label=dspec['ctitle'])
ax2.set_autoscale_on(False)
if 'axvspan' in dspec.keys():
vspan = ax2.axvspan(dspec['axvspan'][0], dspec['axvspan'][1], alpha=0.5, color='white')
if 'xs' in dspec.keys() and 'ys' in dspec.keys():
ax2.plot(dspec['xs'], dspec['ys'], '--', lw=2.0, alpha=0.7, c='black')
if 'xlim' in dspec.keys():
ax2.set_xlim(dspec['xlim'])
if 'ylim' in dspec.keys():
ax2.set_ylim(dspec['ylim'])
if returnImAx:
return ax, im1, ax2, im2, vspan
else:
return ax, ax2
else:
if returnImAx:
return ax, im1
else:
return ax # ax.autoscale(True, 'both', True) # ax.autoscale_view(True, True, True) # ax.relim(visible_only=True)
def make_mapcube(self, trange, outfile=None, fov=None, wavelength='171', binpix=1, dt_data=1, derotate=False,
tosave=True, superpixel=False, aia_prep=False, mapinterp=False, overwrite=False, dtype=None, normalize=True):
if not overwrite:
if outfile is not None:
if os.path.exists(outfile):
return
def map_interp(mapin, xycoord):
from scipy import ndimage
# from astropy.coordinates import SkyCoord
pixelxy = mapin.world_to_pixel(xycoord)
return ndimage.map_coordinates(mapin.data, [pixelxy.y.value, pixelxy.x.value], order=1)
if isinstance(trange, list):
if isinstance(trange[0], Time):
trange = Time([trange[0], trange[-1]])
fitsfile = stpu.readsdofile(datadir=self.fitsdir, wavelength=wavelength, trange=trange)
else:
fitsfile = trange
elif isinstance(trange, Time):
fitsfile = stpu.readsdofile(datadir=self.fitsdir, wavelength=wavelength, trange=trange)
else:
fitsfile = trange
print(
'Input trange format not recognized. trange can either be a file list or a timerange of astropy Time object')
maplist = []
self.exptime_orig = []
print('Loading fits files....')
for idx, ll in enumerate(tqdm(fitsfile[::dt_data])):
if mapinterp:
maptmp = sunpy.map.Map(ll)
if type(maptmp) is list:
maptmp = maptmp[0]
if idx == 0:
all_coord = sunpy.map.all_coordinates_from_map(maptmp)
meta0 = deepcopy(maptmp.meta)
else:
# print('1',meta0['date-obs'])
meta0.update({'date-obs': maptmp.meta['date-obs']})
meta0.update({'date_obs': maptmp.meta['date_obs']})
meta0.update({'date_end': maptmp.meta['date_end']})
# print('2',meta0['date-obs'])
maptmp = sunpy.map.Map(map_interp(maptmp, all_coord), meta0)
# print('3',meta0['date-obs'])
else:
maptmp = sunpy.map.Map(ll)
if type(maptmp) is list:
maptmp = maptmp[0]
self.exptime_orig.append(maptmp.exposure_time.value)
if dtype is not None:
maptmp = sunpy.map.Map(maptmp.data.astype(dtype), maptmp.meta)
if aia_prep:
maptmp = aiaprep(maptmp)
if | |
#
# Copyright (c) 2014, 2016, 2018, 2020 LexisNexis Risk Data Management Inc.
#
# This file is part of the RadSSH software package.
#
# RadSSH is free software, released under the Revised BSD License.
# You are permitted to use, modify, and redsitribute this software
# according to the Revised BSD License, a copy of which should be
# included with the distribution as file LICENSE.txt
#
'''
Python wrapper for parallel execution shell
===========================================
*** This module should be run, not imported ***
Usage: ```python -m radssh.shell host [...]```
Will read settings from /etc/radssh_config, and supplement with ~/.radssh_config.
Settings may also be provided on the command line, using the form --keyword=value.
'''
import sys
import os
import time
import socket
import pprint
import readline
import atexit
import logging
from . import ssh
from . import config
from .console import RadSSHConsole, monochrome
try:
from . import star_commands as star
import radssh.plugins
except ImportError:
class NullStarCommands(object):
'''Use stub if plugins or star_commands can not be loaded'''
@classmethod
def call(*args, **kwargs):
print('Plugins directory not found - *commands disabled')
star_help = call
star_info = call
commands = {'*help': star_help}
star = NullStarCommands()
# Try using colorama when running on Windows
if sys.platform.startswith('win'):
try:
import colorama
colorama.initialise.init()
except Exception as e:
print('Unable to support ANSI escape sequences via colorama module')
print(e)
sys.exit(1)
# Ensure ~/.ssh directory exists, with sensible permissions
try:
os.mkdir(os.path.expanduser('~/.ssh'), 0o700)
except OSError:
pass
################################################################################
command_listeners = []
def shell(cluster, logdir=None, playbackfile=None, defaults=None):
'''Very basic interactive shell'''
if not defaults:
defaults = config.load_default_settings()
while True:
try:
if playbackfile:
try:
cmd = next(playbackfile)
print('%s %s' % (defaults['shell.prompt'], cmd.strip()))
except StopIteration:
return
else:
try:
cmd = input('%s ' % defaults['shell.prompt'])
except KeyboardInterrupt:
print('\n<Ctrl-C> during input\nUse EOF (<Ctrl-D>) or *exit to exit shell\n')
continue
# Feed command line to any registered listeners from plugins
for feed in command_listeners:
feed_result = feed(cmd)
if feed_result:
if defaults['show_altered_commands'] == 'on':
cluster.console.message('Command modified from "%s" to "%s"' % (cmd, feed_result))
cmd = str(feed_result)
if logdir:
with open(os.path.join(logdir, 'session.commands'), 'a') as f:
f.write('%s\n' % cmd)
args = cmd.split()
if len(args) > 0:
if os.path.basename(args[0]) == 'sudo' and len(args) > 1:
initial_command = os.path.basename(args[1])
else:
initial_command = os.path.basename(args[0])
if initial_command in defaults['commands.forbidden'].split(','):
print('You really don\'t want to run %s without a TTY, do you?' % initial_command)
continue
if initial_command in defaults['commands.restricted'].split(','):
print('STOP! "%s" is listed as a restricted command (Potentially dangerous)' % initial_command)
print('and requires explicit confirmation before running.')
print('Please double check all parameters, just to be sure...')
print(' >>>', cmd)
confirm = input('Enter \'100%\' if completely sure: ')
if confirm != '100%':
continue
if args[0].startswith('#'):
# Comment
continue
if args[0].startswith('*'):
ret = star.call(cluster, logdir, cmd)
cluster.console.join()
if isinstance(ret, ssh.Cluster):
cluster.console.message('Switched cluster from %r to %r' % (cluster, ret))
cluster = ret
continue
r = cluster.run_command(cmd)
if logdir:
cluster.log_result(logdir, encoding=defaults['character_encoding'])
# Quick summary report, if jobs failed
failures = {}
completions = []
completion_time = 0.0
for k, job in r.items():
v = job.result
if job.completed:
if v.return_code == 0:
completions.append(str(k))
completion_time += job.end_time - job.start_time
else:
failures.setdefault(v.return_code, []).append(str(k))
else:
failures.setdefault(None, []).append(str(k))
if failures:
print('\nSummary of return codes:')
for k, v in [(0, completions)] + list(failures.items()):
if len(v) > 5:
print(k, '\t- (%d hosts)' % len(v))
else:
print(k, '\t-', sorted(v))
if completions:
print('Average completion time for %d hosts: %fs' % (len(completions), (completion_time / len(completions))))
except KeyboardInterrupt:
print('Ctrl-C during command preparation - command aborted.')
except EOFError as e:
print(e)
break
print('Shell exiting')
cluster.close_connections()
################################################################################
# Readline/libedit command completion
# Supports *commands, executables (LOCAL), and path (REMOTE) completion
class radssh_tab_handler(object):
'''Class wrapper for readline TAB key completion'''
def __init__(self, cluster, star):
# Need access to the cluster object to get SFTP service
# for remote path completion, and the star command dictionary
# to know what *commands are available.
self.cluster = cluster
self.star = star
try:
self.using_libedit = ('libedit' in readline.__doc__)
except TypeError:
# pyreadline (windows) readline.__doc__ is None (not iterable)
self.using_libedit = False
self.completion_choices = []
readline.set_completer()
readline.set_completer(self.complete)
readline.set_completer_delims(' \t\n/*')
if self.using_libedit:
readline.parse_and_bind('bind ^I rl_complete')
else:
readline.parse_and_bind('tab: complete')
def complete_star_command(self, lead_in, text, state):
if state == 0:
# Rebuild cached list of choices that match
# Reset list to empty (choices = [] would reference local, not persistent list)
del self.completion_choices[:]
for choice in self.star.commands.keys():
if choice.startswith(lead_in):
self.completion_choices.append(choice + ' ')
# Discrepancy with readline/libedit and handling of leading *
if self.using_libedit:
return self.completion_choices[state]
else:
return self.completion_choices[state][1:]
def complete_executable(self, lead_in, text, state):
if state == 0:
del self.completion_choices[:]
for path_dir in os.environ['PATH'].split(os.path.pathsep):
try:
for f in os.listdir(path_dir):
try:
if os.path.isdir(os.path.join(path_dir, f)):
continue
st = os.stat(os.path.join(path_dir, f))
if (st.st_mode & 0o111) and f.startswith(text):
self.completion_choices.append(f + ' ')
except OSError:
continue
except OSError:
continue
self.completion_choices.append(None)
return self.completion_choices[state]
def complete_remote_path(self, lead_in, text, state):
if state == 0:
del self.completion_choices[:]
for t in self.cluster.connections.values():
if t.is_authenticated():
break
else:
print('No authenticated connections')
raise RuntimeError('Tab Completion unavailable')
s = t.open_sftp_client()
parent = os.path.dirname(lead_in)
partial = os.path.basename(lead_in)
if not parent:
parent = './'
for x in s.listdir(parent):
if x.startswith(partial):
full_path = os.path.join(parent, x)
try:
# See if target is a directory, and append '/' if it is
s.chdir(full_path)
x += '/'
full_path += '/'
except Exception:
pass
if self.using_libedit:
self.completion_choices.append(full_path)
else:
self.completion_choices.append(x)
self.completion_choices.append(None)
return self.completion_choices[state]
def complete_local_path(self, lead_in, text, state):
if state == 0:
del self.completion_choices[:]
parent = os.path.dirname(lead_in)
partial = os.path.basename(lead_in)
if not parent:
parent = './'
for x in os.listdir(parent):
if x.startswith(partial):
full_path = os.path.join(parent, x)
if os.path.isdir(full_path):
# See if target is a directory, and append '/' if it is
x += '/'
full_path += '/'
if self.using_libedit:
self.completion_choices.append(full_path)
else:
self.completion_choices.append(x)
self.completion_choices.append(None)
return self.completion_choices[state]
def complete(self, text, state):
buffer = readline.get_line_buffer()
lead_in = buffer[:readline.get_endidx()].split()[-1]
try:
if buffer.startswith('*') and ' ' in buffer:
# See if *command has custom tab completion
star_command = self.star.commands.get(buffer.split()[0], None)
if star_command and star_command.tab_completion:
return star_command.tab_completion(self, buffer, lead_in, text, state)
if lead_in.startswith('*'):
# User needs help completing *command...
return self.complete_star_command(lead_in, text, state)
else:
# Default behavior - remote file path completion
return self.complete_remote_path(lead_in, text, state)
except Exception:
raise
################################################################################
# Workaround for https://github.com/radssh/radssh/issues/32
# Newer GNU Readline library raise false errno value that the Python
# wrapper reraises as IOError. https://bugs.python.org/issue10350 not
# being backported to Python 2.7, so handle it with more code...
def safe_write_history_file(filename):
# To avoid false negative, use stat() to test the file modification times
try:
readline.write_history_file(filename)
except IOError as e:
# Ignore this exception if we wrote out the history file recently
try:
post = os.stat(filename).st_mtime
if post > time.time() - 3:
logging.debug('Ignoring "%s" writing history file', str(e))
except Exception:
raise e
################################################################################
def radssh_shell_main():
args = sys.argv[1:]
defaults = config.load_settings()
# Keep command line options separately, for reuse in sshconfig defaults
cmdline_options = config.command_line_settings(args, defaults.get('user.settings'))
defaults.update(cmdline_options)
if 'socket.timeout' in defaults:
socket.setdefaulttimeout(float(defaults['socket.timeout']))
# Setup Logging
logformat = '%(asctime)s %(levelname)-8s [%(name)s:%(thread)08X] %(message)s'
logdir = os.path.expanduser(time.strftime(defaults.get('logdir', '')))
if logdir:
if not os.path.exists(logdir):
os.mkdir(logdir)
logging.basicConfig(filename=os.path.join(logdir, 'radssh.log'),
format=logformat)
else:
logging.basicConfig(format=logformat)
pass
try:
logging.getLogger().setLevel(getattr(logging, defaults['loglevel'].upper()))
except AttributeError:
raise RuntimeError('RadSSH setting "loglevel" should be set to one of [CRITICAL,ERROR,WARNING,INFO,DEBUG] instead of "%s"', defaults['loglevel'])
logger = logging.getLogger('radssh')
# Make an AuthManager to handle user authentication
a = ssh.AuthManager(defaults['username'],
auth_file=os.path.expanduser(defaults['authfile']),
try_auth_none=(defaults['try_auth_none'] == 'on'))
# Load Plugins to aid in host lookups and add *commands dynamically
loaded_plugins = {}
exe_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
system_plugin_dir = os.path.join(exe_dir, 'plugins')
disable_plugins = defaults['disable_plugins'].split(',')
plugin_dirs = [x for x in defaults['plugins'].split(';') if x]
plugin_dirs.append(system_plugin_dir)
for x in plugin_dirs:
plugin_dir = os.path.abspath(os.path.expanduser(x))
if not os.path.exists(plugin_dir):
continue
for module in sorted(os.listdir(plugin_dir)):
if module.endswith('.py') and not module.startswith('__'):
plugin = module[:-3]
# Skip modules found in more that 1 location, and ones explicitly disabled
if plugin in loaded_plugins or plugin in disable_plugins:
continue
try:
logger.info('Loading plugin module: %s', plugin)
this_plugin = radssh.plugins.load_plugin(os.path.join(plugin_dir, module))
if hasattr(this_plugin, 'settings'):
prefix = 'plugin.%s.' % plugin
user_settings = {}
user_settings = dict([(k[len(prefix):], v) for k, v in defaults.items() if k.startswith(prefix)])
logger.info('Updating settings for plugin %s with: %s', plugin, user_settings)
this_plugin.settings.update(user_settings)
if hasattr(this_plugin, 'init'):
logger.debug('Calling init method for plugin: %s', plugin)
this_plugin.init(defaults=defaults, auth=a, plugins=loaded_plugins, star_commands=star.commands, shell=shell)
if hasattr(this_plugin, 'star_commands'):
logger.debug('Registering *commands for plugin: %s %s', plugin, this_plugin.star_commands.keys())
star.commands.update(this_plugin.star_commands)
if hasattr(this_plugin, 'command_listener'):
command_listeners.append(this_plugin.command_listener)
loaded_plugins[plugin] = this_plugin
except Exception as e:
logger.error('Failed to load plugin (%s): %s', plugin, repr(e))
# Use | |
"""
Implements modification of human attributes at different levels.
###################### Quarantining / Behavior change logic ########################
Following orders takes care of the person faced with multiple quarantining triggers (each trigger has a suggested duration for quarantine) -
(i) (not app-based) QUARANTINE_DUE_TO_POSITIVE_TEST_RESULT, QUARANTINE_UNTIL_TEST_RESULT. In event of positive result, person is quarantined for 14 days from the day that test was taken. If negative result, person is quarantined until the test results come out
(ii) (non-app based) SELF_DIAGNOSIS
(iii) (app based) RISK_LEVEL_UPDATE: x->MAX LEVEL
Dropout enables non-adherence to quarantine at any time.
To consider household quarantine, residents are divided into two groups:
(i) index cases - they have a quarantine trigger i.e. a reason to believe that they should quarantine
(ii) secondary cases - rest of the residents
non-app quarantining for index cases -
* A trigger higher in precedence overwrites other triggers i.e. quarantining duration is changed based on the trigger
* `human` might already be quarantining at the time of this trigger, so the duration is changed only if trigger requirements require so.
* if there are no non-app triggers, app-based triggers are checked every `human.time_slot` and behavior levels are adjusted accordingly
non-app quarantining for secondary cases -
* All of them quarantine for the same duration unless someone is converted to an index case, in which case, they quarantine and influence household quarantine according to their triggers.
* this duration is defined by the index case who has maximum quarantining restrictions.
app-based recommendations -
Behavior changes for non-app recommendation for household members -
* if there are no non-app quarantining triggers, humans are put on app recommendation
* if MAKE_HOUSEHOLD_BEHAVE_SAME_AS_MAX_RISK_RESIDENT is True, other residents follow the same behavior as the max risk individual in the house
########################################################################
"""
import numpy as np
import warnings
import datetime
from covid19sim.locations.hospital import Hospital, ICU
from covid19sim.utils.constants import SECONDS_PER_DAY
from covid19sim.utils.constants import TEST_TAKEN, SELF_DIAGNOSIS, RISK_LEVEL_UPDATE
from covid19sim.utils.constants import NEGATIVE_TEST_RESULT, POSITIVE_TEST_RESULT
from covid19sim.utils.constants import QUARANTINE_UNTIL_TEST_RESULT, QUARANTINE_DUE_TO_POSITIVE_TEST_RESULT, QUARANTINE_DUE_TO_SELF_DIAGNOSIS
from covid19sim.utils.constants import UNSET_QUARANTINE, QUARANTINE_HOUSEHOLD
from covid19sim.utils.constants import INITIALIZED_BEHAVIOR, INTERVENTION_START_BEHAVIOR, IS_IMMUNE_BEHAVIOR
def convert_intervention_to_behavior_level(intervention_level):
"""
Maps `human._intervention_level` to `IntervenedBehavior.behavior_level`
"""
return intervention_level + 1 if intervention_level >= 0 else -1
class Quarantine(object):
"""
Contains logic to handle different combinations of non-app quarantine triggers.
Args:
human (covid19sim.human.Human): `human` whose behavior needs to be changed
env (simpy.Environment): environment to schedule events
conf (dict): yaml configuration of the experiment
"""
def __init__(self, intervened_behavior, human, env, conf):
self.human = human
self.intervened_behavior = intervened_behavior
self.env = env
self.conf = conf
self.start_timestamp = None
self.end_timestamp = None
self.reasons = []
self.quarantine_idx = self.intervened_behavior.quarantine_idx
self.baseline_behavior_idx = self.intervened_behavior.baseline_behavior_idx
self.human_no_longer_needs_quarantining = False # once human has recovered (infered from 14 days after positive test), human no longer quarantines
def update(self, trigger):
"""
Updates quarantine start and end timestamp based on the new `trigger` and previous triggers.
Note 1: `human_no_longer_needs_quarantining` is set in `reset_quarantine`. if its True, all calls to this function are ignored.
Note 2: Test results are treated to have conclusive and ultimate say on the duration of quarantine.
Note 3: There can be quarantining due to several reasons, so all those combinations are treated in this function through rules described in Quaranining Logic at the top.
Args:
trigger (str): reason for quarantine trigger.
"""
if self.human_no_longer_needs_quarantining:
return
# if `human` is already quarantining due to TEST_TAKEN, then do not change anything
if (
QUARANTINE_UNTIL_TEST_RESULT in self.reasons
or QUARANTINE_DUE_TO_POSITIVE_TEST_RESULT in self.reasons
):
return
if (
trigger == QUARANTINE_HOUSEHOLD
and self.end_timestamp is not None
and self.end_timestamp >= self.human.household.quarantine_end_timestamp
):
return
#
self.reasons.append(trigger)
if self.start_timestamp is None:
self.start_timestamp = self.env.timestamp
# set end timestamp and behavior levels accordingly
# negative test result - quarantine until the test result
if trigger == QUARANTINE_UNTIL_TEST_RESULT:
duration = self.human.time_to_test_result * SECONDS_PER_DAY
self.end_timestamp = self.env.timestamp + datetime.timedelta(seconds=duration)
self._set_quarantine_behavior(self.reasons, test_recommended=False)
if self.conf['QUARANTINE_HOUSEHOLD_UPON_INDIVIDUAL_TEST_TAKEN']:
self.human.household.add_to_index_case(self.human, trigger)
# positive test result - quarantine until max duration
elif trigger == QUARANTINE_DUE_TO_POSITIVE_TEST_RESULT:
duration = self.conf['QUARANTINE_DAYS_ON_POSITIVE_TEST'] * SECONDS_PER_DAY
self.end_timestamp = self.start_timestamp + datetime.timedelta(seconds=duration)
self._set_quarantine_behavior(self.reasons, test_recommended=False)
if self.conf['QUARANTINE_HOUSEHOLD_UPON_INDIVIDUAL_POSITIVE_TEST']:
self.human.household.add_to_index_case(self.human, trigger)
elif trigger == QUARANTINE_DUE_TO_SELF_DIAGNOSIS:
assert False, NotImplementedError(f"{trigger} quarantine not implemented")
elif trigger == QUARANTINE_HOUSEHOLD:
self.end_timestamp = self.human.household.quarantine_end_timestamp
self._set_quarantine_behavior(self.reasons, test_recommended=False)
else:
raise ValueError(f"Unknown trigger for quarantine: {trigger}")
def _set_quarantine_behavior(self, reasons, test_recommended):
"""
Sets behavior level for quarantining and whether a test is recommended or not. Check Quarantine.update for more.
Note: It is to be called from `Quarantine.update`
Args:
reasons (list): reasons for quarantining.
test_recommended (bool): whether `human` should get a test or not.
"""
self.intervened_behavior.set_behavior(level=self.quarantine_idx, reasons=reasons)
self.human._test_recommended = test_recommended
def _unset_quarantine_behavior(self, to_level):
"""
Resets `human` from `quarantine_idx` to `to_level`.
Note: It is to be called from `Quarantine.update`
Args:
to_level (int): the level to which `human`s behavior level should be reset to.
"""
assert to_level != self.quarantine_idx, "unsetting the quarantine to quarantine_level. Something is wrong."
self.intervened_behavior.set_behavior(level=to_level, reasons=[UNSET_QUARANTINE, f"{UNSET_QUARANTINE}: {self.intervened_behavior._behavior_level}->{to_level}"])
self.human._test_recommended = False
def reset_quarantine(self):
"""
Resets quarantine related attributes and puts `human` into a relevant behavior level.
Note 1: Specific to non-binary risk tracing, reset doesn't work if the recommendation is still to quarantine.
Note 2: It also sets the flag for no more need to quarantine once the test results are positive.
"""
assert self.start_timestamp is not None, "unsetting quarantine twice not allowed"
assert not self.human_no_longer_needs_quarantining, f"{self.human} was quarantined while it shouldn't have"
last_reason = self.reasons[-1]
#
if (
not self.human_no_longer_needs_quarantining
and (
self.human.has_had_positive_test
or last_reason == QUARANTINE_DUE_TO_POSITIVE_TEST_RESULT
or self.human.test_result == POSITIVE_TEST_RESULT
)
):
self.human_no_longer_needs_quarantining = True
self.start_timestamp = None
self.end_timestamp = None
self.reasons = []
self._unset_quarantine_behavior(self.baseline_behavior_idx)
self.human.household.reset_index_case(self.human)
def reset_if_its_time(self):
"""
Resets `timestamp`s.
It is called everytime a new activity is to be decided or a trigger is added.
"""
if self.start_timestamp is not None:
if self.end_timestamp <= self.env.timestamp:
self.reset_quarantine()
class IntervenedBehavior(object):
"""
A base class to implement intervened behavior.
Args:
human (covid19sim.human.Human): `human` whose behavior needs to be changed
env (simpy.Environment): environment to schedule events
conf (dict): yaml configuration of the experiment
"""
def __init__(self, human, env, conf):
self.human = human
self.env = env
self.conf = conf
self.rng = human.rng
assert conf['N_BEHAVIOR_LEVELS'] >= 2, "At least 2 behavior levels are required to model behavior changes"
# we reserve 0-index
self.n_behavior_levels = conf['N_BEHAVIOR_LEVELS'] + 1
self.quarantine_idx = self.n_behavior_levels - 1
self.baseline_behavior_idx = 1
self._behavior_level = 0 # true behavior level
self.behavior_level = 0 # its a property.setter
# start filling the reduction levels from the end
reduction_levels = {
"HOUSEHOLD": np.zeros(self.n_behavior_levels),
"WORKPLACE": np.zeros(self.n_behavior_levels),
"OTHER": np.zeros(self.n_behavior_levels),
"SCHOOL": np.zeros(self.n_behavior_levels),
}
reduction_levels["HOUSEHOLD"][-1] = 1.0
reduction_levels["WORKPLACE"][-1] = 1.0
reduction_levels["OTHER"][-1] = 1.0
reduction_levels["SCHOOL"][-1] = 1.0
last_filled_index = self.quarantine_idx
# if number of behavior levels is 2 and interpolation is with respect to lockdown contacts, it is a Lockdown scenario
if conf['INTERPOLATE_CONTACTS_USING_LOCKDOWN_CONTACTS']:
reduction_levels["HOUSEHOLD"][-2] = conf['FRACTION_LOCKDOWN_INTERPOLATION'] * conf['LOCKDOWN_FRACTION_REDUCTION_IN_CONTACTS_AT_HOUSEHOLD']
reduction_levels["WORKPLACE"][-2] = conf['FRACTION_LOCKDOWN_INTERPOLATION'] * conf['LOCKDOWN_FRACTION_REDUCTION_IN_CONTACTS_AT_WORKPLACE']
reduction_levels["OTHER"][-2] = conf['FRACTION_LOCKDOWN_INTERPOLATION'] * conf['LOCKDOWN_FRACTION_REDUCTION_IN_CONTACTS_AT_OTHER']
reduction_levels["SCHOOL"][-2] = conf['FRACTION_LOCKDOWN_INTERPOLATION'] * conf['LOCKDOWN_FRACTION_REDUCTION_IN_CONTACTS_AT_SCHOOL']
last_filled_index -= 1
else:
# if its a non-tracing scenario, and lockdown is not desired, its an unmitigated scenario with 0% reduction in the first level
if conf["RISK_MODEL"] == "" and conf['N_BEHAVIOR_LEVELS'] == 2:
last_filled_index -= 1
assert last_filled_index == self.baseline_behavior_idx, "unmitigated scenario should not have non-zero reduction in baseline_behavior"
# in a non-tracing scenario, baseline_behavior is not defined so we populate levels until baseline_behavior
while last_filled_index > self.baseline_behavior_idx:
to_fill_index = last_filled_index - 1
for location_type in ["HOUSEHOLD", "WORKPLACE", "OTHER", "SCHOOL"]:
reduction_levels[location_type][to_fill_index] = reduction_levels[location_type][last_filled_index] / 2
last_filled_index = to_fill_index
self.reduction_levels = reduction_levels
# start everyone at the zero level by default (unmitigated scenario i.e. no reduction in contacts)
self.quarantine = Quarantine(self, self.human, self.env, self.conf)
self.set_behavior(level=0, reasons=[INITIALIZED_BEHAVIOR])
# dropout
self._follow_recommendation_today = None
self.last_date_to_decide_dropout = None
#
self.intervention_started = False
self.pay_no_attention_to_triggers = False
def initialize(self, check_has_app=False):
"""
Sets up a baseline behavior on the day intervention starts.
Args:
check_has_app (bool): whether to initialize a baseline beahvior only for humans with the app
"""
assert self.conf['INTERVENTION_DAY'] >= 0, "negative intervention day and yet intialization is called."
assert self.n_behavior_levels >= 2, "with 2 behavior levels and a risk model, behavior level 1 will quarantine everyone"
if check_has_app and self.human.has_app:
warnings.warn("An unrealistic scenario - initilization of baseline behavior is only for humans with an | |
import numpy as np
import random,sys
import scipy
from scipy.spatial.distance import pdist,squareform,cdist
#from scipy.spatial import distance_matrix
import matplotlib.pyplot as plt
import scipy
### "for loop" version
### faster than "matrix version"
### because only need to consider points within h_k
### for loop version
### run this cell to overwrite the previous matrix version
### because this version is faster
def adaptive_cluster(data, gap_par = 0.5, n0=None,debug=False,assign_outliers = 'nearest_cluster'):
'''
data:: a numeric numpy array
gap_par: the lambda parameter used to test the gap
n0: the initial neighbors for each data point.
debug: for debug
assign_outliers: nearest_cluster, assign outliers to nearest cluster. new_cluster, assign outliers to a new cluster
'''
weight_matrix_history = []
(n_points,n_features) = data.shape
#distance_matrix = scipy.spatial.distance_matrix(data,data)
## faster version
distance_matrix = scipy.spatial.distance.cdist(data, data, 'euclidean')
#print('distance_matrix.shape',distance_matrix.shape)
weight_matrix = np.zeros(shape=(n_points,n_points))
weight_matrix_history.append((0,weight_matrix))
#print('weight_matrix.shape',weight_matrix.shape)
#plot_weight_matrix(weight_matrix)
### sort the distance matrix
sorted_distance_idx_matrix = np.argsort(distance_matrix,axis=1)
sorted_distance_matrix = np.sort(distance_matrix,axis=1)
#print('sorted_distance_matrix.shape',sorted_distance_matrix.shape)
#print('sorted_distance_idx_matrix.shape',sorted_distance_idx_matrix.shape)
### number of neighbors
if n0 is None:
n0 = 2*n_features+2
### h0 is the the radius such that the point has n0 neighbors
h0 = sorted_distance_matrix[:,n0]
#print('h0.shape',h0.shape)
### max(h0(Xi),h0(Xj))
#max_h0 = np.reshape([np.maximum(h0[i],h0[j]) for i in range(n_points) for j in range(n_points)],newshape=(n_points,n_points))
#print('max_h0.shape',max_h0.shape)
### weight_matrix
#weight_matrix = (distance_matrix <= max_h0).astype('int')
### faster version
h0_matrix = np.tile(h0, (n_points, 1))
h0_matrix_T = h0_matrix.T
h0_matrix_max = np.maximum(h0_matrix,h0_matrix_T)
weight_matrix = (distance_matrix<=h0_matrix_max).astype('int')
#print('weight_matrix.shape',weight_matrix.shape)
#plot_weight_matrix(weight_matrix)
#################################################################
### find h sequence
a = 1.4142135623730951
b = 1.95
#gap_par = -1
max_distance = np.max(sorted_distance_matrix)
### h0 is a vector, each data point has n0 neighbors
### max(h0) makes sure that each data point has at least n0 neighbors
h_array = np.array([np.max(h0)])
#n_matrix = np.repeat(n0, n_points)
#n_matrix = n_matrix[:,np.newaxis]
k = 0
weight_matrix_history.append((h_array[k],weight_matrix.copy()))
while h_array[k] <= max_distance:
### upper bound of n(Xi,h_k+1)
### given radius h_array[k], how many neighbors for each data point
### -1 removes its self from counting
n_upper = a * np.array([np.sum(sorted_distance_matrix[i,:]<=h_array[k])-1 for i in np.arange(n_points)])
n_upper = (np.floor(n_upper)).astype('int')
### when h is big, the n_upper may be > n_points
n_upper = np.clip(n_upper, a_min=None,a_max=(n_points-1))
#print(n_upper)
### n_upper can decide the h_upper
h_upper_by_n_upper = np.min(np.array([sorted_distance_matrix[i,n_upper[i]] for i in np.arange(n_points)]))
### upper bound of h_k+1
h_upper = b*h_array[k]
### must satisfy both conditions
min_h_upper = np.minimum(h_upper_by_n_upper,h_upper)
#print(k,min_h_upper)
### append to the h_array
### just make sure h is not > max_distance
if min_h_upper <= max_distance:
if min_h_upper <= h_array[k]: break
#print(k,'h',min_h_upper)
h_array = np.append(h_array,min_h_upper)
k = k + 1
#################################################################
### check if those h satisfy the conditions
if debug:
for k in range(1,len(h_array)):
if h_array[k] <= b*h_array[k-1]:
continue
print('k',k,h_array[k],h_array[k-1],b*h_array[k-1],end=',')
print(h_array[k]/h_array[k-1])
else:
print('h error')
for k in range(1,len(h_array)):
for i in range(n_points):
n1 = np.sum(sorted_distance_matrix[i,:]<=h_array[k-1])-1
n2 = np.sum(sorted_distance_matrix[i,:]<=h_array[k])-1
if n2<=a*n1 and n1>=n0 and n2>=n0:
continue
print('n',k,n1,n2,a*n1,end=',')
print(n2/n1)
else:
print('n error')
#################################################################
beta_a = (n_features+1.0)/2.0
beta_b = 0.5
beta_function = scipy.special.beta(beta_a,beta_b)
np.seterr(divide='ignore', invalid='ignore')
print('h_k',h_array[0])
for k in range(1,len(h_array)):
print('h_k',h_array[k])
#t_matrix = distance_matrix/h_array[k-1]
#beta_x_matrix = 1.0-(t_matrix**2)/4.0
#incomplete_beta_function_matrix = scipy.special.betainc(beta_a,beta_b,beta_x_matrix)
#q_matrix = incomplete_beta_function_matrix / (2*beta_function-incomplete_beta_function_matrix)
for i in range(n_points):
weight_matrix[i,i] = 1
for j in range(i,n_points):
#if weight_matrix[i,j] == 1:
# continue
#if i == j:
# weight_matrix[i,j] = 1
# continue
#if i > j:
# weight_matrix[i,j] = weight_matrix[j,i]
# continue
if distance_matrix[i,j] <= h_array[k] and h_array[k-1] >= h0[i] and h_array[k-1] >= h0[j]:
#### caclulate overlap
N_overlap = np.dot(weight_matrix[i,:],weight_matrix[j,:])
#### caclulate complement
#N_complement = np.zeros(shape=(n_points,n_points))
if k>1:
ind1 = (distance_matrix[j,:] > h_array[k-1]) + 0.0
ind2 = (distance_matrix[i,:] > h_array[k-1]) + 0.0
else:
ind1 = (distance_matrix[j,:] > h0_matrix_max[i,j]) + 0.0
ind2 = (distance_matrix[i,:] > h0_matrix_max[i,j]) + 0.0
N_complement = np.dot(weight_matrix[i,:],ind1) + np.dot(weight_matrix[j,:],ind2)
#### caclulate union
N_union = N_overlap + N_complement
#### theta
theta = N_overlap / N_union
#### q
t = distance_matrix[i,j]/h_array[k-1]
beta_x = 1.0-(t**2)/4.0
incomplete_beta_function = scipy.special.betainc(beta_a,beta_b,beta_x)
q = incomplete_beta_function / (2*beta_function-incomplete_beta_function)
#q = q_matrix[i,j]
T1 = N_union
#### this may raise warnings about log(0) or log(nan)
#### this is fine, since I used the whole matrix here
#### some of the points are out of the h(k) radius
#### we will mask those points in the later step
T2 = theta*np.log(theta/q)+(1.0-theta)*np.log((1.0-theta)/(1.0-q))
#### when N_overlap is 0, theta is 0, this leands to T is nan
#### replace those nan with 0 in T
#T2 = np.where(theta==0.0,0.0,T2)
#T2 = np.where(theta==1.0,0.0,T2)
#T3 = ((theta<=q).astype('int')-(theta>q).astype('int'))
### faster version
if theta<=q:
T = T1 * T2
else:
T = - (T1 * T2)
#T = T1 * T2 * T3
####
####
#weight_matrix[i,j] = (distance_matrix[i,j]<=h_array[k]) * (T<=gap_par) + 0.0
weight_matrix[i,j] = (T<=gap_par) + 0.0
#### be careful with those boundary points
#### theta=0 means no overlap at all
#### theta=1 means completely overlap
#### needs special treatment for them
if theta==0: weight_matrix[i,j] = 0
if theta==1: weight_matrix[i,j] = 1
####
weight_matrix[j,i] = weight_matrix[i,j]
weight_matrix_history.append((h_array[k],weight_matrix.copy()))
### reset to default
np.seterr(divide='warn', invalid='warn')
### calculate S
S = np.sum(weight_matrix)
### extract clusters from weight matrix
labels = (np.zeros(shape=weight_matrix.shape[0]))
labels.fill(np.nan)
cluster_ind = 0
for i in range(len(labels)):
for j in range(len(labels)):
if i == j:continue
if weight_matrix[i,j] == 1:
if np.isnan(labels[i]) and np.isnan(labels[j]):
labels[i] = cluster_ind
labels[j] = cluster_ind
cluster_ind = cluster_ind + 1
elif not np.isnan(labels[i]) and np.isnan(labels[j]):
labels[j] = labels[i]
elif np.isnan(labels[i]) and not np.isnan(labels[j]):
labels[i] = labels[j]
elif not np.isnan(labels[i]) and not np.isnan(labels[j]):
continue
else:
print(i,j,labels[i],labels[j])
print('cluster assignment error')
### some points may not belong to any cluster
### assign those points to the nearest cluster
### or they can be ignored (by default, those points will have np.nan as labels)
### thus those points can be considered as outliers
if assign_outliers == 'nearest_cluster':
if np.sum(np.isnan(labels))>0:
nan_ind = np.argwhere(np.isnan(labels)).flatten()
for i in nan_ind:
dist = distance_matrix[i,:].copy()
dist[i] = np.max(dist)
nearest_ind = np.argmin(dist)
labels[i] = labels[nearest_ind]
#print(dist)
#print(i,nearest_ind)
elif assign_outliers == 'new_cluster':
if np.sum(np.isnan(labels))>0:
nan_ind = np.argwhere(np.isnan(labels)).flatten()
outlier_label = np.nanmax(np.unique(labels)) + 1
for i in nan_ind:
labels[i] = outlier_label
else:
print('assign_outliers parameter is not correct')
return({"S":S,"weight_matrix":weight_matrix,
"cluster_label":labels,
"weight_matrix_history":weight_matrix_history,
})
def k_means(data, n_clusters=3, n_init=20, max_iter=100, kernel=None,
verbose=False,sigma = 1.0,use_kmean_controid=False):
'''
data: a numeric numpy array
n_clusters: number of clusters
n_init: number of different initializations to run kmeans
max_iter: number of max iterations
kernel: "None", regular k means; "gaussian", k means with gaussian kernel
verbose: output detailed information
sigma: the sigma parameter in the gaussian kernel
use_kmean_controid: for kenel K means, use the best controids from K means as initialization points.
'''
### may not be efficient in terms of memory use
### no need to save whole history
### get whole hitory for debugging purpose
controid_history = {}
cluster_label_history = {}
sse_history = np.zeros(shape=(n_init,1))
### start k-means
n_points = data.shape[0]
### calculate the kernel matrix
if kernel == 'gaussian':
### 'sqeuclidean': squared Euclidean distance
kernel_matrix = np.exp(-0.5/(sigma**2)*squareform(pdist(data,'sqeuclidean')))
### repeat k-means n_init times
### return the best one
np.seterr(divide='ignore', invalid='ignore')
for i_init in range(n_init):
if verbose: print('Random seed',i_init)
#### set random seed
np.random.seed(i_init)
#### generate initial cluster labels
cluster_labels = np.random.choice(range(n_clusters),size=n_points, replace=True)
#### generate initial centroids
#### randomly choose n_clusters points from the data as centroids
if use_kmean_controid:
#### run one K means
print('Use best K means centroid')
km_result = k_means(data, n_clusters, n_init=20, max_iter=100, kernel=None)
centroids = km_result['best_controids']
else:
#### randomly choose n_clusters points from the data as centroids
centroids = data[np.random.choice(np.arange(n_points), n_clusters, replace=False),:]
for i_iter in range(max_iter):
if verbose: print('Iteration',i_iter,end=', ')
distance_to_centroids = np.zeros(shape=(data.shape[0],n_clusters))
######
if kernel is None:
distance_to_centroids = scipy.spatial.distance.cdist(data, centroids, 'euclidean')
######
elif kernel == 'gaussian':
dist1 = np.diag(kernel_matrix)
cluster_ind_matrix = np.zeros(shape=(data.shape[0],n_clusters))
for i_centroid in range(n_clusters):
cluster_ind_matrix[:,i_centroid] = (cluster_labels == i_centroid) + 0.0
kth_cluster_ind = (cluster_labels == i_centroid) + 0.0
kth_cluster_matrix = np.outer(kth_cluster_ind,kth_cluster_ind)
dist2 = 2.0*np.sum(np.tile(kth_cluster_ind,(n_points,1))*kernel_matrix,axis=1)/np.sum(kth_cluster_ind)
dist3 = np.sum(kth_cluster_matrix*kernel_matrix)/np.sum(kth_cluster_matrix)
#print(dist1.shape,dist2.shape,dist3.shape,)
### ord=2 is L2 distance
### axis=1 is to calculate norm along columns
distance_to_centroids[:,i_centroid] = dist1-dist2+dist3
#break
else:
sys.exit('Kernel parameter is not correct!')
#print(distance_to_centroids)
### assign the cluster labels
cluster_labels = np.argmin(distance_to_centroids,axis=1)
sse = np.sum((np.min(distance_to_centroids,axis=1))**2)
if verbose: print('SSE',sse)
### re-calculate centroids
previous_centroids = centroids
centroids = np.array([data[cluster_labels == i_centroid].mean(axis = 0) for i_centroid in range(n_clusters)])
### if centroids don't | |
import numpy as np
import copy
import cv2
import os
import PIL.Image as Image
from PIL import ImageDraw
import lmdb
from tqdm import tqdm
import carla
from collect_pm import CollectPerspectiveImage, InversePerspectiveMapping
config = dict(
env=dict(
simulator=dict(
disable_two_wheels=True,
waypoint_num=32,
planner=dict(
type='behavior',
resolution=1,
),
obs=(
dict(
name='rgb',
type='rgb',
size=[640, 360],
position=[0.5, 0.0, 2.5],
rotation=[0, 0, 0],
sensor_tick=1. / 30,
),
dict(
name='lidar',
type='lidar',
channels=64,
range=50,
points_per_second=100000,
rotation_frequency=30,
upper_fov=10,
lower_fov=-30,
position=[0.5, 0.0, 2.5],
rotation=[0, 0, 0],
sensor_tick=0.05,
)
),
verbose=True,
),
# visualize=dict(
# type='rgb',
# outputs=['show']
# ),
col_is_failure=True,
stuck_is_failure=True
),
env_num=5,
episode_nums=40,
env_manager=dict(
auto_reset=False,
shared_memory=False,
),
env_wrapper=dict(suite='FullTown01-v3', ),
server=[
dict(carla_host='localhost', carla_ports=[9000, 9010, 2]),
],
policy=dict(
target_speed=25,
noise=False,
),
collector=dict(suite='FullTown01-v3', ),
dir_path='datasets/cict_datasets_train',
npy_prefix='_preloads'
)
scale = 12.0
x_offset = 800
y_offset = 1000
MAX_SPEED = 50
TRAJ_LENGTH = 25
MIN_TRAJ_LENGTH = 15
vehicle_width = 2.0
longitudinal_sample_number_near = 8
longitudinal_sample_number_far = 0.5
longitudinal_length = 25.0
lateral_sample_number = 20
lateral_step_factor = 1.0
ksize = 21
sensor_config = {'rgb': {'img_height': 360, 'img_width': 640, 'fov': 120, 'location': [0.5, 0, 2.5]}}
class Param(object):
def __init__(self):
self.traj_length = float(TRAJ_LENGTH)
self.target_speed = float(MAX_SPEED)
self.vehicle_width = float(vehicle_width)
self.longitudinal_sample_number_near = longitudinal_sample_number_near
self.longitudinal_sample_number_far = longitudinal_sample_number_far
self.lateral_sample_number = lateral_sample_number
self.lateral_step_factor = lateral_step_factor
self.longitudinal_length = longitudinal_length
self.ksize = ksize
self.sensor_config = sensor_config
params = Param()
def get_map():
origin_map = np.zeros((6000, 6000, 3), dtype="uint8")
#origin_map.fill(255)
origin_map = Image.fromarray(origin_map)
return origin_map
def draw_point(waypoint_list, origin_map):
route_list = []
for waypoint in waypoint_list:
x = scale * waypoint[0] + x_offset
y = scale * waypoint[1] + y_offset
route_list.append(x)
route_list.append(y)
draw = ImageDraw.Draw(origin_map)
draw.point(route_list, fill=(255, 255, 255))
#print(route_list)
#print(waypoint_list)
return origin_map
def draw_route(waypoint_list, origin_map):
route_list = []
for waypoint in waypoint_list:
x = scale * waypoint[0] + x_offset
y = scale * waypoint[1] + y_offset
route_list.append(x)
route_list.append(y)
draw = ImageDraw.Draw(origin_map)
draw.line(route_list, 'red', width=30)
#print(route_list)
#print(waypoint_list)
return origin_map
def find_dest_with_fix_length(start, waypoint_list):
length = start
for i in range(len(waypoint_list) - 1):
length += np.linalg.norm(waypoint_list[i + 1][:2] - waypoint_list[i][:2])
if length >= params.traj_length:
return waypoint_list[i + 1][:2], i + 1
return waypoint_list[-1][:2], -1
def draw_destination(location, waypoint_list, origin_map):
start = np.linalg.norm(waypoint_list[0][:2] - location[:2])
#print(location, waypoint_list[0], start)
dest, _ = find_dest_with_fix_length(start, waypoint_list)
x = scale * dest[0] + x_offset
y = scale * dest[1] + y_offset
#print(dest, x, y)
draw = ImageDraw.Draw(origin_map)
draw.ellipse((x - 15, y - 15, x + 15, y + 15), fill='red', outline='red', width=30)
return origin_map
def get_nav(location, rotation, plan_map, town=1):
if town == 1:
x_offset = 800
y_offset = 1000
elif town == 2:
x_offset = 1500
y_offset = 0
x = int(scale * location[0] + x_offset)
y = int(scale * location[1] + y_offset)
#print(x, y, plan_map)
_nav = plan_map.crop((x - 400, y - 400, x + 400, y + 400))
im_rotate = _nav.rotate(rotation[1] + 90)
nav = im_rotate.crop((_nav.size[0] // 2 - 320, _nav.size[1] // 2 - 360, _nav.size[0] // 2 + 320, _nav.size[1] // 2))
#print(nav)
nav = cv2.cvtColor(np.asarray(nav), cv2.COLOR_BGR2RGB)
return nav
'''
def get_bezier(location, waypoint_list):
total_length = [np.linalg.norm(location[:2] - waypoint_list[0][:2])]
for i in range(len(waypoint_list)-1):
total_length.append(np.linalg.norm(waypoint_list[i][:2] - waypoint_list[i+1][:2]) + total_length[-1])
t = np.array(total_length[:-1]).reshape(-1, 1) / total_length[-1]
b0 = location[:2].reshape(1, 2)
b4 = waypoint_list[-1][:2].reshape(1, 2)
B0 = (1 - t) ** 4
B4 = t ** 4
p = waypoint_list[:-1, :2] - np.dot(np.concatenate([B0, B4], axis=1), np.concatenate([b0, b4], axis=0))
B1 = 4 * t * ((1 - t) ** 3)
B2 = 6 * (t ** 2) * ((1 - t) ** 2)
B3 = 4 * (1 - t) * (t ** 3)
Bm = np.concatenate([B1, B2, B3], axis=1)
bm = np.dot(np.linalg.inv(np.dot(Bm.T, Bm)), Bm.T)
bm = np.dot(bm, p)
b = np.concatenate([b0, bm, b4], axis=0)
t = np.linspace(0, 1, 100)
t = t.reshape(100, 1)
B0 = (1 - t) ** 4
B4 = t ** 4
B1 = 4 * t * ((1 - t) ** 3)
B2 = 6 * (t ** 2) * ((1 - t) ** 2)
B3 = 4 * (1 - t) * (t ** 3)
B = np.concatenate([B0, B1, B2, B3, B4], axis=1)
bezier_list = np.dot(B, b)
#print(b)
return bezier_list, b
'''
def destination(save_dir, episode_path):
lmdb_file = lmdb.open(os.path.join(save_dir, episode_path, 'measurements.lmdb')).begin()
waypoint_file = [
x for x in os.listdir(os.path.join(save_dir, episode_path)) if (x.endswith('npy') and x.startswith('way'))
]
waypoint_file.sort()
#print(waypoint_file)
for k in tqdm(waypoint_file):
index = k.split('_')[1].split('.')[0]
measurements = np.frombuffer(lmdb_file.get(('measurements_%05d' % int(index)).encode()), np.float32)
location = np.array([measurements[7], measurements[8], measurements[9]]).astype(np.float32)
rotation = np.array([measurements[18], measurements[19], measurements[20]]).astype(np.float32)
waypoint_list = np.load(os.path.join(save_dir, episode_path, k))
origin_map = get_map()
plan_map = draw_destination(location, waypoint_list, copy.deepcopy(origin_map))
dest = get_nav(location, rotation, plan_map, town=1)
cv2.imwrite(os.path.join(save_dir, episode_path, 'dest_%05d.png' % int(index)), dest)
class Sensor(object):
def __init__(self, config):
self.type_id = 'sensor.camera.rgb'
self.transform = carla.Transform(
carla.Location(x=config['location'][0], y=config['location'][1], z=config['location'][2])
)
self.attributes = dict()
self.attributes['role_name'] = 'front'
self.attributes['image_size_x'] = str(config['img_width'])
self.attributes['image_size_y'] = str(config['img_height'])
self.attributes['fov'] = str(config['fov'])
def get_transform(self):
return self.transform
def find_traj_with_fix_length(start_index, pose_list):
length = 0.0
for i in range(start_index, len(pose_list) - 1):
length += pose_list[i].location.distance(pose_list[i + 1].location)
if length >= params.traj_length:
return i + 1
return -1
def destination2(save_dir, episode_path):
lmdb_file = lmdb.open(os.path.join(save_dir, episode_path, 'measurements.lmdb')).begin()
waypoint_file = [
x for x in os.listdir(os.path.join(save_dir, episode_path)) if (x.endswith('npy') and x.startswith('way'))
]
waypoint_file.sort()
#print(waypoint_file)
sensor = Sensor(params.sensor_config['rgb'])
collect_perspective = CollectPerspectiveImage(params, sensor)
commands = []
for k in tqdm(waypoint_file):
index = k.split('_')[1].split('.')[0]
measurements = np.frombuffer(lmdb_file.get(('measurements_%05d' % int(index)).encode()), np.float32)
location = np.array([measurements[7], measurements[8], measurements[9]]).astype(np.float32)
rotation = np.array([measurements[18], measurements[19], measurements[20]]).astype(np.float32)
waypoint_list = np.load(os.path.join(save_dir, episode_path, k))
start = np.linalg.norm(waypoint_list[0][:2] - location[:2])
#print(location, waypoint_list[0], start)
dest, _ = find_dest_with_fix_length(start, waypoint_list)
#if location[0] - dest[0] > 0.5:
# commands.append(1)
#elif location[0] - dest[0] < -0.5:
# commands.append(2)
#else:
# commands.append(0)
zero = np.zeros((3, 1))
zero[:2, 0] = dest
dest_map = collect_perspective.drawDestInImage(zero, location, rotation)
cv2.imwrite(os.path.join(save_dir, episode_path, 'dest2_%05d.png' % int(index)), dest_map)
#np.save(os.path.join(save_dir, episode_path, 'commands.npy'), np.array(commands))
def get_potential_map(save_dir, episode_path, measurements, img_file):
pm_dir = os.path.join(save_dir, episode_path, 'pm')
print(pm_dir)
if not os.path.exists(pm_dir):
os.mkdir(pm_dir)
pose_list = []
loc_list = []
for measurement in measurements:
transform = carla.Transform()
transform.location.x = float(measurement['location'][0])
transform.location.y = float(measurement['location'][1])
transform.location.z = float(measurement['location'][2])
transform.rotation.pitch = float(measurement['rotation'][0])
transform.rotation.yaw = float(measurement['rotation'][1])
transform.rotation.roll = float(measurement['rotation'][2])
pose_list.append(transform)
loc_list.append(measurement['location'])
sensor = Sensor(params.sensor_config['rgb'])
collect_perspective = CollectPerspectiveImage(params, sensor)
for index in tqdm(range(len(pose_list))):
end_index = find_traj_with_fix_length(index, pose_list)
if end_index < 0:
print('no enough traj: ', index, index / len(pose_list))
break
vehicle_transform = pose_list[index] # in world coordinate
traj_pose_list = []
traj_list = []
for i in range(index, end_index):
traj_pose_list.append((i, pose_list[i]))
traj_list.append(loc_list[i])
#t1 = time.time()
'''
bezier_list, bezier_coff = get_bezier(traj_list[0], np.stack(traj_list[1:], axis=0))
measurements[index]['bezier_coff'] = bezier_coff
origin_map = get_map()
plan_map = draw_route(bezier_list, copy.deepcopy(origin_map))
plan_map = draw_point(traj_list, plan_map)
nav = get_nav(measurements[index]['location'], measurements[index]['rotation'], plan_map, town=1)
cv2.imwrite(os.path.join(save_dir, episode_path, 'nav_%05d.png' % int(index)), nav)
'''
empty_image = collect_perspective.getPM(traj_pose_list, vehicle_transform)
#t2 = time.time()
#cv2.imshow('empty_image', empty_image)
#cv2.waitKey(3)
cv2.imwrite(os.path.join(pm_dir, '%05d.png' % index), empty_image)
return measurements
def get_inverse_potential_map(save_dir, episode_path, pm_file, lidar_file):
ipm_dir = os.path.join(save_dir, episode_path, 'ipm')
if not os.path.exists(ipm_dir):
os.mkdir(ipm_dir)
sensor = Sensor(params.sensor_config['rgb'])
inverse_perspective_mapping = InversePerspectiveMapping(params, sensor)
for i in tqdm(range(len(pm_file))):
pm = cv2.imread(os.path.join(save_dir, pm_file[i]))
lidar = np.load(os.path.join(save_dir, lidar_file[i]))
ipm = inverse_perspective_mapping.getIPM(pm)
img = inverse_perspective_mapping.get_cost_map(ipm, lidar)
cv2.imwrite(os.path.join(ipm_dir, '%05d.png' % i), img)
def get_option(option_name, end_ind):
x = np.load(option_name, allow_pickle=True)
option = x[0] - 1
end_ind = len(option_name) if end_ind == -1 else end_ind + 1
for o in x[1:end_ind]:
if o != 4:
option = o - 1
break
return option
def save_as_npy(save_dir, episode_path):
lmdb_file = lmdb.open(os.path.join(save_dir, episode_path, 'measurements.lmdb')).begin()
dest_file = [
x for x in os.listdir(os.path.join(save_dir, episode_path)) if (x.endswith('png') and x.startswith('dest_'))
]
dest_file.sort()
dest_file2 = [
x for x in os.listdir(os.path.join(save_dir, episode_path)) if (x.endswith('png') and x.startswith('dest2_'))
]
dest_file2.sort()
img_file = [
x for x in os.listdir(os.path.join(save_dir, episode_path)) if (x.endswith('png') and x.startswith('rgb'))
]
img_file.sort()
#print(waypoint_file)
measurements_list = []
for k in tqdm(img_file):
index = k.split('_')[1].split('.')[0]
measurements = np.frombuffer(lmdb_file.get(('measurements_%05d' % int(index)).encode()), np.float32)
data = {}
data['time'] = float(measurements[1])
data['acceleration'] = np.array([measurements[4], measurements[5], measurements[6]], dtype=np.float32)
data['location'] = np.array([measurements[7], measurements[8], measurements[9]], dtype=np.float32)
data['direction'] = float(measurements[11]) - 1.
data['velocity'] = np.array([measurements[12], measurements[13], measurements[14]], dtype=np.float32)
data['angular_velocity'] = np.array([measurements[15], measurements[16], measurements[17]], dtype=np.float32)
data['rotation'] = np.array([measurements[18], measurements[19], measurements[20]]).astype(np.float32)
data['steer'] = float(measurements[21])
data['throttle'] = float(measurements[22])
data['brake'] = float(measurements[23])
data['real_steer'] = float(measurements[24])
data['real_throttle'] = float(measurements[25])
data['real_brake'] = float(measurements[26])
data['tl_state'] = float(measurements[27])
data['tl_distance'] = float(measurements[28])
waypoint_list = np.load(os.path.join(save_dir, episode_path, 'waypoints_%05d.npy' % int(index)))
start = np.linalg.norm(data['location'][:2] - waypoint_list[0][:2])
_, end_ind = find_dest_with_fix_length(start, waypoint_list)
data['option'] = get_option(os.path.join(save_dir, episode_path, 'direction_%05d.npy' %
int(index)), end_ind) if data['direction'] == 3 else data['direction']
#print(episode_path, int(index), data['option'], data['command'])
measurements_list.append(data)
dest_file = [os.path.join(episode_path, x) for x in dest_file]
dest_file2 = [os.path.join(episode_path, x) for x in dest_file2]
img_file = [os.path.join(episode_path, x) for | |
import torch
import torch.nn as nn
import numpy as np
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, attn_dropout=0.5):
super().__init__()
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v, mask=None):
temperature = np.power(k.shape[-1], 0.5)
attn = torch.bmm(q, k.permute(0, 2, 1))
attn = attn / temperature
attn = self.softmax(attn)
if mask is not None:
attn = attn + mask
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn
class ScaledDotProductAttentionPoolVariableKSTD(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, attn_dropout=0.1, k=2):
super().__init__()
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
self.k = k
def forward(self, q, k, v, mask=None):
temperature = np.power(k.shape[-1], 0.5)
attn = torch.bmm(q, k.permute(0, 2, 1))
attn = attn / temperature
attn = self.softmax(attn)
if mask is not None:
attn = attn + mask
# attn = self.dropout(attn)
std_token = torch.std(attn, dim=2)
idx = torch.topk(std_token, std_token.shape[1] - self.k, dim=1)[1].sort(dim=1)[0]
idx = idx.view(idx.shape[0], idx.shape[1], 1).repeat(1, 1, attn.shape[2])
attn_pool = attn.gather(1, idx)
# attn = torch.cat([attn[:, :1, :], attn_pool], dim=1)
attn = self.softmax(attn_pool)
output = torch.bmm(attn, v)
return output, attn, idx
class ScaledDotProductAttentionPoolVariableKSTDDual(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, attn_dropout=0.1, k=2):
super().__init__()
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
self.k = k
def forward(self, q, k, v, mask=None):
temperature = np.power(k.shape[-1], 0.5)
attn = torch.bmm(q, k.permute(0, 2, 1))
attn = attn / temperature
attn = self.softmax(attn)
if mask is not None:
attn = attn + mask
# attn = self.dropout(attn)
attn_cls = attn[:, :1, :]
attn_t = attn[:, 1:, :]
std_token = torch.std(attn_t, dim=2)
idx = torch.topk(std_token, std_token.shape[1] - self.k, dim=1)[1].sort(dim=1)[0]
idx = idx.view(idx.shape[0], idx.shape[1], 1).repeat(1, 1, attn_t.shape[2])
attn_pool = attn_t.gather(1, idx)
attn = torch.cat([attn_cls, attn_pool], dim=1)
attn = self.softmax(attn)
output = torch.bmm(attn, v)
return output, attn, idx
class _Linear(nn.Module):
def __init__(self, d_model):
super().__init__()
self.weight = nn.Parameter(torch.Tensor(d_model, d_model))
self.bias = nn.Parameter(torch.Tensor(d_model, ))
def forward(self, x):
return x @ self.weight.t() + self.bias
class MultiHeadSplitAttentionSpatioToken(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, head_num, d_model, dropout=0.1):
super().__init__()
self.head_num = head_num
self.d_model = d_model
self.in_proj_weight = nn.Parameter(torch.Tensor(d_model * 3, d_model))
self.in_proj_bias = nn.Parameter(torch.Tensor(d_model * 3, ))
self.attention_t = ScaledDotProductAttention(attn_dropout=dropout)
self.attention_s = ScaledDotProductAttention(attn_dropout=dropout)
self.avg_pool = nn.AvgPool2d(kernel_size=(2, 1), stride=(2, 1))
self.out_proj = _Linear(d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, value, orig_shape, attn_mask=None, key_padding_mask=None):
mask = attn_mask
b, c, t, w, h = orig_shape
seq_l, sz_b, c = q.shape
qkv = q @ self.in_proj_weight.t() + self.in_proj_bias
q = qkv[:, :, :c]
k = qkv[:, :, c: 2 * c]
v = qkv[:, :, 2 * c:]
q_s = q.view(t, w * h + 1, b, self.head_num, c // self.head_num).permute(3, 2, 4, 0, 1).contiguous().view(
self.head_num * b, c // self.head_num, t, w * h + 1)
k_s = k.view(t, w * h + 1, b, self.head_num, c // self.head_num).permute(3, 2, 4, 0, 1).contiguous().view(
self.head_num * b, c // self.head_num, t, w * h + 1)
q_t = q.view(t, w * h + 1, b, self.head_num, c // self.head_num).permute(3, 2, 1, 0, 4).contiguous().view(-1, t,
self.d_model // self.head_num)
k_t = k.view(t, w * h + 1, b, self.head_num, c // self.head_num).permute(3, 2, 1, 0, 4).contiguous().view(-1, t,
self.d_model // self.head_num)
v_t = v.view(t, w * h + 1, b, self.head_num, c // self.head_num).permute(3, 2, 1, 0, 4).contiguous().view(-1, t,
self.d_model // self.head_num)
output_t, attnx = self.attention_t(q_t, k_t, v_t, mask=mask)
v_s = output_t.view(self.head_num, b, w * h + 1, t, self.d_model // self.head_num).permute(0, 1, 3, 2, 4).contiguous().view(self.head_num * sz_b * t, w * h + 1, -1)
q_s = q_s.permute(0, 2, 3, 1).contiguous().view(-1, w * h + 1, c // self.head_num)
k_s = k_s.permute(0, 2, 3, 1).contiguous().view(-1, w * h + 1, c // self.head_num)
output_s, attn = self.attention_s(q_s, k_s, v_s, mask=mask)
_, seq_l, _ = output_s.shape
output = output_s.view(self.head_num, b, -1, w * h + 1, self.d_model // self.head_num).permute(2, 3, 1, 0,
4).contiguous().view(
-1, b, c)
output = self.dropout(self.out_proj(output))
return output, attn
class MultiHeadSplitAttentionDualToken(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, head_num, d_model, dropout=0.1):
super().__init__()
self.head_num = head_num
self.d_model = d_model
self.in_proj_weight = nn.Parameter(torch.Tensor(d_model * 3, d_model))
self.in_proj_bias = nn.Parameter(torch.Tensor(d_model * 3, ))
self.attention_t = ScaledDotProductAttention(attn_dropout=dropout)
self.attention_s = ScaledDotProductAttention(attn_dropout=dropout)
self.avg_pool = nn.AvgPool2d(kernel_size=(2, 1), stride=(2, 1))
self.out_proj = _Linear(d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, value, orig_shape, attn_mask=None, key_padding_mask=None):
mask = attn_mask
b, c, t, w, h = orig_shape
seq_l, sz_b, c = q.shape
qkv = q @ self.in_proj_weight.t() + self.in_proj_bias
q = qkv[:, :, :c]
k = qkv[:, :, c: 2 * c]
v = qkv[:, :, 2 * c:]
q_s = q.view(t + 1, w * h + 1, b, self.head_num, c // self.head_num).permute(3, 2, 4, 0, 1).contiguous().view(
self.head_num * b, c // self.head_num, t + 1, w * h + 1)
k_s = k.view(t + 1, w * h + 1, b, self.head_num, c // self.head_num).permute(3, 2, 4, 0, 1).contiguous().view(
self.head_num * b, c // self.head_num, t + 1, w * h + 1)
q_t = q.view(t + 1, w * h + 1, b, self.head_num, c // self.head_num).permute(3, 2, 1, 0, 4).contiguous().view(-1, t + 1,
self.d_model // self.head_num)
k_t = k.view(t + 1, w * h + 1, b, self.head_num, c // self.head_num).permute(3, 2, 1, 0, 4).contiguous().view(-1, t + 1,
self.d_model // self.head_num)
v_t = v.view(t + 1, w * h + 1, b, self.head_num, c // self.head_num).permute(3, 2, 1, 0, 4).contiguous().view(-1, t + 1,
self.d_model // self.head_num)
output_t, attnx = self.attention_t(q_t, k_t, v_t, mask=mask)
v_s = output_t.view(self.head_num, b, w * h + 1, t + 1, self.d_model // self.head_num).permute(0, 1, 3, 2, 4).contiguous().view(self.head_num * sz_b * (t + 1), w * h + 1, -1)
q_s = q_s.permute(0, 2, 3, 1).contiguous().view(-1, w * h + 1, c // self.head_num)
k_s = k_s.permute(0, 2, 3, 1).contiguous().view(-1, w * h + 1, c // self.head_num)
output_s, attn = self.attention_s(q_s, k_s, v_s, mask=mask)
_, seq_l, _ = output_s.shape
output = output_s.view(self.head_num, b, -1, w * h + 1, self.d_model // self.head_num).permute(2, 3, 1, 0,
4).contiguous().view(
-1, b, c)
output = self.out_proj(output)
return output, attn
class MultiHeadSequentialPoolAttentionVariableKSTDReverse(nn.Module):
''' Multi-Head Attention module '''
''' Multi-Head Attention module '''
def __init__(self, head_num, d_model, dropout=0.1, pool=False, k=2):
super().__init__()
self.head_num = head_num
self.d_model = d_model
self.in_proj_weight = nn.Parameter(torch.Tensor(d_model * 3, d_model))
self.in_proj_bias = nn.Parameter(torch.Tensor(d_model * 3, ))
self.pool = pool
self.k = k
torch.nn.init.xavier_uniform(self.in_proj_weight)
self.in_proj_bias.data.fill_(0.01)
if pool:
self.attention_t = ScaledDotProductAttentionPoolVariableKSTD(attn_dropout=dropout, k=k)
else:
self.attention_t = ScaledDotProductAttention(attn_dropout=dropout)
self.attention_s = ScaledDotProductAttention(attn_dropout=dropout)
self.out_proj = _Linear(d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, value, orig_shape, attn_mask=None, key_padding_mask=None):
mask = attn_mask
b, c, t, w, h = orig_shape
seq_l, sz_b, c = q.shape
q_cls = q[:1, :, :]
q = q[1:, :, :]
qkv = q @ self.in_proj_weight.t() + self.in_proj_bias
q = qkv[:, :, :c]
k = qkv[:, :, c: 2 * c]
v = qkv[:, :, 2 * c:]
q_t = q.view(t, w * h, b, self.head_num, c // self.head_num).permute(3, 2, 1, 0, 4).contiguous().view(-1, t,
self.d_model // self.head_num)
k_t = k.view(t, w * h, b, self.head_num, c // self.head_num).permute(3, 2, 1, 0, 4).contiguous().view(-1, t,
self.d_model // self.head_num)
v_t = v.view(t, w * h, b, self.head_num, c // self.head_num).permute(3, 2, 1, 0, 4).contiguous().view(-1, t,
self.d_model // self.head_num)
if self.pool:
output_t, attnx, idx = self.attention_t(q_t, k_t, v_t, mask=mask)
v_s = output_t.view(self.head_num, b, w * h, t - self.k, self.d_model // self.head_num).permute(0, 1, 3, 2, 4).contiguous().view(
self.head_num * sz_b * (t - self.k), w * h, -1)
idx_ = idx[:, :, :1].repeat(1, 1, c // self.head_num)
q_t = q_t.gather(1, idx_)
k_t = k_t.gather(1, idx_)
q_s = q_t.view(self.head_num, b, w * h, t - self.k, self.d_model // self.head_num).permute(0, 1, 3, 2,
4).contiguous().view(
self.head_num * sz_b * (t - self.k), w * h, -1)
k_s = k_t.view(self.head_num, b, w * h, t - self.k, self.d_model // self.head_num).permute(0, 1, 3, 2,
4).contiguous().view(
self.head_num * sz_b * (t - self.k), w * h, -1)
else:
output_t, attnx = self.attention_t(q_t, k_t, v_t, mask=mask)
idx = None
v_s = output_t.view(self.head_num, b, w * h, t, self.d_model // self.head_num).permute(0, 1, 3, 2, 4).contiguous().view(self.head_num * sz_b * t, w | |
from common.diagrams import Diagram
from common.definitions import G_PROTEIN_SEGMENTS
from residue.models import Residue
from residue.models import ResidueGenericNumber
from residue.models import ResidueNumberingScheme
from django.utils.safestring import mark_safe
from math import cos, sin, pi, floor,sqrt
from datetime import datetime
from collections import OrderedDict
class DrawGproteinPlot(Diagram):
def __init__(self, residue_list, protein_class, protein_name, nobuttons = None):
self.nobuttons = 'gprotein'
self.type = 'snakeplot'
plot_data = {}
plot_data['direction'] = [0, 0, 1, 0, 1, 0, 1, 0] # 0: EC->IC, 1: IC->EC
plot_data['helixRadius'] = 70
self.receptorId = protein_name
self.family = protein_class
self.output = ''
# FIXME DO PUREIMAGE
# $pureImage = isset($_GET['pureimage']) && $_GET['pureimage'] == 'TRUE' ? TRUE : FALSE;
# get sequence, baldwin, and bw information of this receptor
self.sequence = residue_list
self.segments = {}
self.segments_full = OrderedDict()
i = 0
for r in self.sequence:
if r.protein_segment:
segment = str(r.protein_segment.slug)
elif r.segment_slug: #from family aligment
segment = str(r.segment_slug)
if segment not in self.segments:
self.segments[segment] = []
self.segments_full[segment] = r.protein_segment
label = ''
displaylabel = ''
if r.generic_number:
label = r.generic_number.label
elif hasattr(r, 'family_generic_number'):
label = r.family_generic_number
if r.display_generic_number: displaylabel = r.display_generic_number.label
displaylabel = r.amino_acid + str(r.sequence_number) + " \n " + displaylabel
if hasattr(r, 'frequency'):
displaylabel = displaylabel + "\n" + r.frequency
self.segments[segment].append([r.sequence_number, r.amino_acid,label,displaylabel])
i += 1
# for helix_num in range(1,2): #FIX for missing generic numbers
# rs = self.segments['H5']
# for i in range(0,len(rs)):
# if not rs[i][2]:
# if i+1<len(rs): #if there is a next one
# if rs[i+1][2]: #if it has generic number
# number = str(int(rs[i+1][2].split('x')[1])-1)
# rs[i][2] = str(helix_num) + "x" + number
# print(rs[i][2])
self.helixWidth = 85 # Width of helix
self.resNumPerRow = 4 # Residue number per row in helix
self.angleDeg = 22.0 # Angle size of each helix turn
self.residue_radius = 12 # Radius of the residue circle
# svg image padding offset
self.offsetX = 0 #-200
self.offsetY = 0 #-50
# margin between two helixes
self.margin = 10
# highest and lowest bound of this svg
self.high =0
self.low = 0
# keep track of max Y positions of intra/extra loops
self.maxY = {'bottom':0,'top':0}
self.maxX = {'left':0,'right':0}
# helices length
# helicesLength = Svg::getSnakePlotHelicesLength($baldwin, $helixWidth, $angleDeg) #FIXME
# top and bottom residue coords in each helix
self.TBCoords = {}
self.output = ""
self.traceoutput = ""
self.helixoutput = ""
# Draw sheets and helices
self.count = 1
self.count_sheet = 0
for s in G_PROTEIN_SEGMENTS['Full']:
if s in self.segments_full:
if self.segments_full[s].category=='helix':
self.helixoutput += self.drawSnakePlotHelix(s)
self.count += 1
elif self.segments_full[s].category=='sheet':
self.helixoutput += self.drawSnakePlotSheet(s)
self.count += 1
self.count_sheet += 1
# Draw loops
self.count = 0
for s in G_PROTEIN_SEGMENTS['Full']:
if s in self.segments_full and self.segments_full[s].category=='loop':
#pass
self.drawSnakePlotLoop(s)
else:
self.count += 1
def __str__(self):
self.output = "<g id=snake transform='translate(0, " + str(-self.low+ self.offsetY) + ")'>" + self.traceoutput+self.output+self.helixoutput+self.drawToolTip() + "</g>"; #for resizing height
return mark_safe(self.create(self.output,self.maxX['right']+30,self.high-self.low+self.offsetY*2,"snakeplot", self.nobuttons))
def drawSnakePlotHelix(self, segment):
rs = self.segments[segment]
helix_num = self.count
self.TBCoords[helix_num] = {}
if helix_num%2!=0: rs.reverse() # reverse direction for even helix because they go from inside to outside
output_residues = []
res_num = len(self.segments[segment])
output_residue_in = ''
output_residue_out = ''
output_trace = ''
startX = self.helixWidth+self.offsetX+(self.margin+self.helixWidth)*(helix_num-1)-(self.count_sheet*20)
startY = self.offsetY
row_length = 3
row_pos = 0
row = 0
prevGeneric = '0.0.0'
bulgeX = 0
bulgeY = 0
bulge = 0
skip = 0
indentX = -self.residue_radius+3
indentY = 3
for i in range(0,res_num):
prevGeneric_number = prevGeneric.split('.')[2]
currGeneric_number = rs[i][2].split('.')[2]
if ((helix_num%2==0 and prevGeneric_number+'1'==currGeneric_number) or (helix_num%2!=0 and str(int(prevGeneric_number)-1)+'1'==currGeneric_number)) and i!=0:
bulge = 1
if row_pos==0: # if first in row, use space for bulge
bulgeY = 5
bulgeX = 7
else:
bulgeY = 5
bulgeX = 5
row_length+=1
elif i!=0 and ((helix_num%2!=0 and int(prevGeneric_number)-1!= int(currGeneric_number)) or (helix_num%2==0 and int(prevGeneric_number)+1!= int(currGeneric_number))):
skip = 1
if row_pos!=0 and row_pos+1<row_length:
nextX =round(startX-(row_pos+1)*self.residue_radius*1.5+indentX+bulgeX)
nextY = round(startY+row*self.residue_radius*2.4+(row_pos+1)*self.residue_radius*0.5+indentY+bulgeY)
output_trace += "<line x1="+str(prevX)+" y1="+str(prevY)+" x2="+str(nextX)+" y2="+str(nextY)+" stroke='grey' fill='none' stroke-width='1' stroke-dasharray='1,1' />"
row_pos +=1
elif row_pos+1==row_length:
row+=1
row_pos=0
row_length = 3 if row_length == 4 else 4
else:
row_pos +=1
# move left as you go down a row
x = round(startX-row_pos*self.residue_radius*1.6+indentX+bulgeX)
# Move down with right amount
y = round(startY+row*self.residue_radius*2.4+row_pos*self.residue_radius*0.5+indentY+bulgeY)
output_residue = self.DrawResidue(x,y,rs[i][1], rs[i][0], rs[i][3], self.residue_radius)
if x<self.maxX['left']: self.maxX['left'] = x
if x>self.maxX['right']: self.maxX['right'] = x
row_pos += 1
if bulge==1:
if row_pos==1: # if first in row, use space for bulge
bulgeY = -3
bulgeX = 10
else:
bulgeY = -3
bulgeX = 7
rs[i][2] = prevGeneric # make it the prev one, to catch missing ones correctly
bulge = 0
if row_length==3:
output_residue_in += output_residue
else:
output_residue_out += output_residue
output_residues.append(output_residue)
if i==0: self.TBCoords[helix_num]['top'] = [x,y]
if i==res_num-1: self.TBCoords[helix_num]['bottom'] = [x,y]
if (row_pos==1 and row!=0) or (skip==1 and row_pos==2): # if need for trace
if row_length==3: points = "M "+str(prevX)+" "+str(prevY)+" Q"+str(prevX-40)+" "+str(prevY+30)+", "+str(x-21)+" "+str(y-8)+" T"+str(x)+" "+str(y)
if row_length>=4: points = "M "+str(prevX)+" "+str(prevY)+" Q"+str(prevX-40)+" "+str(prevY+30)+", "+str(x-24)+" "+str(y-7)+" T"+str(x)+" "+str(y)
output_trace += "<path d='" + points + "' stroke='grey' fill='none' stroke-width='2' />"
# alternate between 4 and 3 res per row
if row_length>3 and row_pos>=row_length:
row_length=3
row_pos = 0
row += 1
bulgeX = 0
bulgeY = 0
indentX = -self.residue_radius+3
indentY = 3
elif row_length==3 and row_pos>=3:
row_length=4
row_pos = 0
row += 1
bulgeX = 0
bulgeY = 0
indentX = 0
indentY = 0
skip = 0
prevX = x
prevY = y
prevGeneric = rs[i][2]
temp = ''
if helix_num%2!=0: output_residues.reverse()
for res in output_residues:
temp += res
return output_trace+temp
def drawSnakePlotSheet(self, segment):
rs = self.segments[segment]
helix_num = self.count
self.TBCoords[helix_num] = {}
if helix_num%2!=0: rs.reverse() # reverse direction for even helix because they go from inside to outside
output_residues = []
res_num = len(self.segments[segment])
output_residue_in = ''
output_residue_out = ''
output_trace = ''
startX = 50+self.offsetX+(self.margin+self.helixWidth)*(helix_num-1)-(self.count_sheet*20)
startY = self.offsetY
row_length = 3
row_pos = 0
row = 0
prevGeneric = '0.0.0'
bulgeX = 0
bulgeY = 0
bulge = 0
skip = 0
indentX = -self.residue_radius+3
indentY = 3
for i in range(0,res_num):
prevGeneric_number = prevGeneric.split('.')[2]
currGeneric_number = rs[i][2].split('.')[2]
if (helix_num%2==0 and prevGeneric_number+'1'==currGeneric_number) or (helix_num%2!=0 and str(int(prevGeneric_number)-1)+'1'==currGeneric_number):
bulge = 1
if row_pos==0: # if first in row, use space for bulge
bulgeY = 5
bulgeX = 7
else:
bulgeY = 5
bulgeX = 5
row_length+=1
elif i!=0 and ((helix_num%2!=0 and int(prevGeneric_number)-1!= int(currGeneric_number)) or (helix_num%2==0 and int(prevGeneric_number)+1!= int(currGeneric_number))):
skip = 1
if row_pos!=0 and row_pos+1<row_length:
nextX =round(startX-(row_pos+1)*self.residue_radius*1.5+indentX+bulgeX)
nextY = round(startY+row*self.residue_radius*2.4+(row_pos+1)*self.residue_radius*0.5+indentY+bulgeY)
#output_trace += "<line x1="+str(prevX)+" y1="+str(prevY)+" x2="+str(nextX)+" y2="+str(nextY)+" stroke='grey' fill='none' stroke-width='1' stroke-dasharray='1,1' />"
row_pos +=1
elif row_pos+1==row_length:
row+=1
row_pos=0
row_length = 3 if row_length == 4 else 4
else:
row_pos +=1
# move left as you go down a row
x = round(startX) #+indentX+bulgeX
# Move down with right amount
y = round(startY+i*self.residue_radius*1.5)
output_residue = self.DrawResidueSquare(x,y,rs[i][1], rs[i][0], rs[i][3], self.residue_radius)
if x<self.maxX['left']: self.maxX['left'] = x
if x>self.maxX['right']: self.maxX['right'] = x
row_pos += 1
if bulge==1:
if row_pos==1: # if first in row, use space for bulge
bulgeY = -3
bulgeX = 10
else:
bulgeY = -3
bulgeX = 7
rs[i][2] = prevGeneric # make it the prev one, to catch missing ones correctly
bulge = 0
if row_length==3:
output_residue_in += output_residue
else:
output_residue_out += output_residue
output_residues.append(output_residue)
if i==0: self.TBCoords[helix_num]['top'] = [x,y]
if i==res_num-1: self.TBCoords[helix_num]['bottom'] = [x,y]
if (row_pos==1 and row!=0) or (skip==1 and row_pos==2): # if need for trace
if row_length==3: points = "M "+str(prevX)+" "+str(prevY)+" Q"+str(prevX-40)+" "+str(prevY+30)+", "+str(x-21)+" "+str(y-8)+" T"+str(x)+" "+str(y)
if row_length>=4: points = "M "+str(prevX)+" "+str(prevY)+" Q"+str(prevX-40)+" "+str(prevY+30)+", "+str(x-24)+" "+str(y-7)+" T"+str(x)+" "+str(y)
# output_trace += "<path d='" + points + "' stroke='grey' fill='none' stroke-width='2' />"
# alternate between 4 and 3 res per row
if row_length>3 and row_pos>=row_length:
row_length=3
row_pos = 0
row += 1
bulgeX = 0
bulgeY = 0
indentX = -self.residue_radius+3
indentY = 3
elif row_length==3 and row_pos>=3:
row_length=4
row_pos = 0
row += 1
bulgeX = 0
bulgeY = 0
indentX = 0
indentY = 0
skip = 0
prevX = x
prevY = y
prevGeneric = rs[i][2]
temp = ''
if helix_num%2!=0: output_residues.reverse()
for res in output_residues:
temp += res
return output_trace+temp
def drawSnakePlotLoop(self, | |
len(args) == 3:
return args[:2]
else:
raise TypeError("Wrong number of arguments")
return self._array_converter(self.sip.pix2foc, None, *args)
sip_pix2foc.__doc__ = """
Convert pixel coordinates to focal plane coordinates using the
`SIP`_ polynomial distortion convention.
`Paper IV`_ table lookup distortion correction is not applied,
even if that information existed in the FITS file that
initialized this :class:`~astropy.wcs.WCS` object. To correct
for that, use `~astropy.wcs.WCS.pix2foc` or
`~astropy.wcs.WCS.p4_pix2foc`.
Parameters
----------
{0}
Returns
-------
{1}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(__.TWO_OR_MORE_ARGS('2', 8),
__.RETURNS('focal coordinates', 8))
def sip_foc2pix(self, *args):
if self.sip is None:
if len(args) == 2:
return args[0]
elif len(args) == 3:
return args[:2]
else:
raise TypeError("Wrong number of arguments")
return self._array_converter(self.sip.foc2pix, None, *args)
sip_foc2pix.__doc__ = """
Convert focal plane coordinates to pixel coordinates using the
`SIP`_ polynomial distortion convention.
`Paper IV`_ table lookup distortion correction is not applied,
even if that information existed in the FITS file that
initialized this `~astropy.wcs.WCS` object.
Parameters
----------
{0}
Returns
-------
{1}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(__.TWO_OR_MORE_ARGS('2', 8),
__.RETURNS('pixel coordinates', 8))
def to_fits(self, relax=False, key=None):
"""
Generate an `astropy.io.fits.HDUList` object with all of the
information stored in this object. This should be logically identical
to the input FITS file, but it will be normalized in a number of ways.
See `to_header` for some warnings about the output produced.
Parameters
----------
relax : bool or int, optional
Degree of permissiveness:
- `False` (default): Write all extensions that are
considered to be safe and recommended.
- `True`: Write all recognized informal extensions of the
WCS standard.
- `int`: a bit field selecting specific extensions to
write. See :ref:`relaxwrite` for details.
key : str
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"``
part of the ``CTYPEia`` cards.
Returns
-------
hdulist : `astropy.io.fits.HDUList`
"""
header = self.to_header(relax=relax, key=key)
hdu = fits.PrimaryHDU(header=header)
hdulist = fits.HDUList(hdu)
self._write_det2im(hdulist)
self._write_distortion_kw(hdulist)
return hdulist
def to_header(self, relax=False, key=None):
"""
Generate an `astropy.io.fits.Header` object with the basic WCS and SIP
information stored in this object. This should be logically
identical to the input FITS file, but it will be normalized in
a number of ways.
.. warning::
This function does not write out Paper IV distortion
information, since that requires multiple FITS header data
units. To get a full representation of everything in this
object, use `to_fits`.
Parameters
----------
relax : bool or int, optional
Degree of permissiveness:
- `False` (default): Write all extensions that are
considered to be safe and recommended.
- `True`: Write all recognized informal extensions of the
WCS standard.
- `int`: a bit field selecting specific extensions to
write. See :ref:`relaxwrite` for details.
key : str
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"``
part of the ``CTYPEia`` cards.
Returns
-------
header : `astropy.io.fits.Header`
Notes
-----
The output header will almost certainly differ from the input in a
number of respects:
1. The output header only contains WCS-related keywords. In
particular, it does not contain syntactically-required
keywords such as ``SIMPLE``, ``NAXIS``, ``BITPIX``, or
``END``.
2. Deprecated (e.g. ``CROTAn``) or non-standard usage will
be translated to standard (this is partially dependent on
whether `fix` was applied).
3. Quantities will be converted to the units used internally,
basically SI with the addition of degrees.
4. Floating-point quantities may be given to a different decimal
precision.
5. Elements of the ``PCi_j`` matrix will be written if and
only if they differ from the unit matrix. Thus, if the
matrix is unity then no elements will be written.
6. Additional keywords such as ``WCSAXES``, ``CUNITia``,
``LONPOLEa`` and ``LATPOLEa`` may appear.
7. The original keycomments will be lost, although
`to_header` tries hard to write meaningful comments.
8. Keyword order may be changed.
"""
if key is not None:
self.wcs.alt = key
if relax not in (True, False):
do_sip = relax & WCSHDO_SIP
relax &= ~WCSHDO_SIP
else:
do_sip = relax
if self.wcs is not None:
header_string = self.wcs.to_header(relax)
header = fits.Header.fromstring(header_string)
else:
header = fits.Header()
if do_sip and self.sip is not None:
for key, val in self._write_sip_kw().items():
header[key] = val
return header
def to_header_string(self, relax=False):
"""
Identical to `to_header`, but returns a string containing the
header cards.
"""
return str(self.to_header(relax))
def footprint_to_file(self, filename=None, color='green', width=2):
"""
Writes out a `ds9`_ style regions file. It can be loaded
directly by `ds9`_.
Parameters
----------
filename : str, optional
Output file name - default is ``'footprint.reg'``
color : str, optional
Color to use when plotting the line.
width : int, optional
Width of the region line.
"""
if not filename:
filename = 'footprint.reg'
comments = '# Region file format: DS9 version 4.0 \n'
comments += ('# global color=green font="helvetica 12 bold ' +
'select=1 highlite=1 edit=1 move=1 delete=1 ' +
'include=1 fixed=0 source\n')
f = open(filename, 'a')
f.write(comments)
f.write('linear\n')
f.write('polygon(')
self.calcFootprint().tofile(f, sep=',')
f.write(') # color={0}, width={1:d} \n'.format(color, width))
f.close()
naxis1 = deprecated_attribute('naxis1', '0.2')
naxis2 = deprecated_attribute('naxis2', '0.2')
@deprecated('0.2', message='This method should not be public')
def get_naxis(self, header=None):
return self._get_naxis(header=header)
def _get_naxis(self, header=None):
self._naxis1 = 0
self._naxis2 = 0
if (header is not None and
not isinstance(header, (six.text_type, six.binary_type))):
self._naxis1 = header.get('NAXIS1', 0)
self._naxis2 = header.get('NAXIS2', 0)
def rotateCD(self, theta):
_theta = np.deg2rad(theta)
_mrot = np.zeros(shape=(2, 2), dtype=np.double)
_mrot[0] = (np.cos(_theta), np.sin(_theta))
_mrot[1] = (-np.sin(_theta), np.cos(_theta))
new_cd = np.dot(self.wcs.cd, _mrot)
self.wcs.cd = new_cd
def printwcs(self):
"""
Temporary function for internal use.
"""
print('WCS Keywords\n')
if hasattr(self.wcs, 'cd'):
print('CD_11 CD_12: {!r} {!r}'.format(
self.wcs.cd[0, 0], self.wcs.cd[0, 1]))
print('CD_21 CD_22: {!r} {!r}'.format(
self.wcs.cd[1, 0], self.wcs.cd[1, 1]))
else:
print('PC_11 PC_12: {!r} {!r}'.format(
self.wcs.pc[0, 0], self.wcs.pc[0, 1]))
print('PC_21 PC_22: {!r} {!r}'.format(
self.wcs.pc[1, 0], self.wcs.pc[1, 1]))
print('CRVAL : {!r} {!r}'.format(
self.wcs.crval[0], self.wcs.crval[1]))
print('CRPIX : {!r} {!r}'.format(
self.wcs.crpix[0], self.wcs.crpix[1]))
if not self.wcs.has_cd():
print('CDELT : {!r} {!r}'.format(
self.wcs.cdelt[0], self.wcs.cdelt[1]))
print('NAXIS : {!r} {!r}'.format(
self.naxis1, self.naxis2))
def get_axis_types(self):
"""
Similar to `self.wcsprm.axis_types <_wcs.Wcsprm.axis_types>`
but provides the information in a more Python-friendly format.
Returns
-------
result : list of dicts
Returns a list of dictionaries, one for each axis, each
containing attributes about the type of that axis.
Each dictionary has the following keys:
- 'coordinate_type':
- None: Non-specific coordinate type.
- 'stokes': Stokes coordinate.
- 'celestial': Celestial coordinate (including ``CUBEFACE``).
- 'spectral': Spectral coordinate.
- 'scale':
- 'linear': Linear axis.
- 'quantized': Quantized axis (``STOKES``, ``CUBEFACE``).
- 'non-linear celestial': Non-linear celestial axis.
- 'non-linear spectral': Non-linear spectral axis.
- 'logarithmic': Logarithmic axis.
- 'tabular': Tabular axis.
- 'group'
- Group number, e.g. lookup table number
- 'number'
- For celestial axes:
- 0: Longitude coordinate.
- 1: Latitude coordinate.
- 2: ``CUBEFACE`` number.
- For lookup tables:
- the axis number in a multidimensional table.
``CTYPEia`` in ``"4-3"`` form with unrecognized algorithm code will
generate an error.
"""
if self.wcs is None:
raise AttributeError(
"This WCS object does not have a wcsprm object.")
coordinate_type_map = {
0: None,
1: 'stokes',
2: 'celestial',
3: 'spectral'}
scale_map = {
0: 'linear',
1: 'quantized',
2: 'non-linear celestial',
3: 'non-linear spectral',
4: 'logarithmic',
5: 'tabular'}
result = []
for axis_type in self.wcs.axis_types:
subresult = {}
coordinate_type = (axis_type // 1000) % 10
subresult['coordinate_type'] = coordinate_type_map[coordinate_type]
scale = (axis_type // 100) % 10
subresult['scale'] = scale_map[scale]
group = (axis_type // 10) % 10
subresult['group'] = group
number = axis_type % 10
subresult['number'] = number
result.append(subresult)
return result
def __reduce__(self):
"""
Support pickling of WCS objects. This is done by serializing
to an in-memory FITS file and dumping that as a string.
"""
hdulist = self.to_fits(relax=True)
buffer = io.BytesIO()
hdulist.writeto(buffer)
return (__WCS_unpickle__,
(self.__class__, self.__dict__, buffer.getvalue(),))
def __WCS_unpickle__(cls, dct, fits_data):
"""
Unpickles a WCS object from a serialized FITS string.
"""
self = cls.__new__(cls)
self.__dict__.update(dct)
buffer = io.BytesIO(fits_data)
hdulist = fits.open(buffer)
WCS.__init__(self, hdulist[0].header, hdulist)
return self
def find_all_wcs(header, relax=True, keysel=None, fix=True,
translate_units='',
_do_set=True):
"""
| |
to ignore zeros points caused by stopped/aborted scans
new_v_shape = len(channel_data) // datashape[1]
new_data = np.vstack(channel_data)
new_data = new_data.astype(np.float32, copy=False) # Change representation to np.float32
new_data = new_data[: new_v_shape * datashape[1], :]
new_data = new_data.reshape([new_v_shape, datashape[1], len(channel_data[1])])
if new_data.shape[2] != spectrum_len:
# merlin detector has spectrum len 2048
# make all the spectrum len to 4096, to avoid unpredicted error in fitting part
new_tmp = np.zeros([new_data.shape[0], new_data.shape[1], spectrum_len])
new_tmp[:, :, : new_data.shape[2]] = new_data
new_data = new_tmp
if fly_type in ("pyramid",):
new_data = flip_data(new_data, subscan_dims=subscan_dims)
if sum_data is None:
sum_data = np.copy(new_data)
else:
sum_data += new_data
if create_each_det:
data_assembled[detname] = new_data
if sum_data is not None:
data_assembled["det_sum"] = sum_data
# position data
pos_names, pos_data = get_name_value_from_db(pos_list, data, datashape)
# I don't have knowledge of all possible scenarios to change the following algorithm for
# naming 'x_pos' and 'y_pos'. It definitely covers the basic cases of having x and y axis.
# It will also produce good dataset if the naming is inconsistent.
for i in range(len(pos_names)):
if "x" in pos_names[i]:
pos_names[i] = "x_pos"
elif "y" in pos_names[i]:
pos_names[i] = "y_pos"
if "x_pos" not in pos_names or "y_pos" not in pos_names:
pos_names = ["x_pos", "y_pos"]
# need to change shape to sth like [2, 100, 100]
n_pos = min(pos_data.shape[2], len(pos_names))
data_temp = np.zeros([n_pos, pos_data.shape[0], pos_data.shape[1]])
for i in range(n_pos):
data_temp[i, :, :] = pos_data[:, :, i]
if fly_type in ("pyramid",):
for i in range(data_temp.shape[0]):
# flip position the same as data flip on det counts
data_temp[i, :, :] = flip_data(data_temp[i, :, :], subscan_dims=subscan_dims)
data_assembled["pos_names"] = pos_names
data_assembled["pos_data"] = data_temp[:, :new_v_shape, :]
# scaler data
scaler_names, scaler_data = get_name_value_from_db(scaler_list, data, datashape)
if fly_type in ("pyramid",):
scaler_data = flip_data(scaler_data, subscan_dims=subscan_dims)
if base_val is not None: # base line shift for detector, for SRX
base_val = np.array([base_val])
if len(base_val) == 1:
scaler_data = np.abs(scaler_data - base_val)
else:
for i in scaler_data.shape[2]:
scaler_data[:, :, i] = np.abs(scaler_data[:, :, i] - base_val[i])
data_assembled["scaler_names"] = scaler_names
data_assembled["scaler_data"] = scaler_data[:new_v_shape, :]
return data_assembled
def get_name_value_from_db(name_list, data, datashape):
"""
Get name and data from db.
"""
pos_names = []
pos_data = np.zeros([datashape[0], datashape[1], len(name_list)])
for i, v in enumerate(name_list):
posv = np.zeros(
datashape[0] * datashape[1]
) # keep shape unchanged, so stopped/aborted run can be handled.
data[v] = np.asarray(data[v]) # in case data might be list
posv[: data[v].shape[0]] = np.asarray(data[v])
pos_data[:, :, i] = posv.reshape([datashape[0], datashape[1]])
pos_names.append(str(v))
return pos_names, pos_data
def map_data2D(
data,
datashape,
det_list=("xspress3_ch1", "xspress3_ch2", "xspress3_ch3"),
pos_list=("zpssx[um]", "zpssy[um]"),
scaler_list=("sclr1_ch3", "sclr1_ch4"),
create_each_det=False,
fly_type=None,
subscan_dims=None,
spectrum_len=4096,
):
"""
Data is obained from databroker. Transfer items from data to a dictionay of
numpy array, which has 2D shape same as scanning area.
This function can handle stopped/aborted scans. Raster scan (snake scan) is
also considered.
Parameters
----------
data : pandas.core.frame.DataFrame
data from data broker
datashape : tuple or list
shape of two D image
det_list : list, tuple, optional
list of detector channels
pos_list : list, tuple, optional
list of pos pv
scaler_list : list, tuple, optional
list of scaler pv
fly_type : string or optional
raster scan (snake scan) or normal
subscan_dims : 1D array or optional
used at HXN, 2D of a large area is split into small area scans
spectrum_len : int, optional
standard spectrum length
Returns
-------
dict of numpy array
"""
data_output = {}
new_v_shape = datashape[0] # updated if scan is not completed
sum_data = None
for n, c_name in enumerate(det_list):
if c_name in data:
detname = "det" + str(n + 1)
logger.info("read data from %s" % c_name)
channel_data = data[c_name]
# new veritcal shape is defined to ignore zeros points caused by stopped/aborted scans
new_v_shape = len(channel_data) // datashape[1]
new_data = np.vstack(channel_data)
new_data = new_data.astype(np.float32, copy=False) # Change representation to np.float32
new_data = new_data[: new_v_shape * datashape[1], :]
new_data = new_data.reshape([new_v_shape, datashape[1], len(channel_data[1])])
if new_data.shape[2] != spectrum_len:
# merlin detector has spectrum len 2048
# make all the spectrum len to 4096, to avoid unpredicted error in fitting part
new_tmp = np.zeros([new_data.shape[0], new_data.shape[1], spectrum_len], dtype=np.float32)
new_tmp[:, :, : new_data.shape[2]] = new_data
new_data = new_tmp
if fly_type in ("pyramid",):
new_data = flip_data(new_data, subscan_dims=subscan_dims)
if create_each_det:
data_output[detname] = new_data
if sum_data is None:
# Note: Here is the place where the error was found!!!
# The assignment in the next line used to be written as
# sum_data = new_data
# i.e. reference to data from 'det1' was assigned to 'sum_data'.
# After computation of the sum, both 'sum_data' and detector 'det1'
# were referencing the same ndarray, holding the sum of values
# from detector channels 'det1', 'det2' and 'det3'. In addition, the sum is
# computed again before data is saved into '.h5' file.
# The algorithm for computing of the second sum is working correctly,
# but since 'det1' already contains the true sum 'det1'+'det2'+'det3',
# the computed sum equals 'det1'+2*'det2'+2*'det3'.
# The problem was fixed by replacing assignment of reference during
# initalization of 'sum_data' by copying the array.
# The error is documented because the code was used for a long time
# for initial processing of XRF imaging data at HXN beamline.
sum_data = np.copy(new_data)
else:
sum_data += new_data
data_output["det_sum"] = sum_data
# scanning position data
pos_names, pos_data = get_name_value_from_db(pos_list, data, datashape)
for i in range(len(pos_names)):
if "x" in pos_names[i]:
pos_names[i] = "x_pos"
elif "y" in pos_names[i]:
pos_names[i] = "y_pos"
if "x_pos" not in pos_names or "y_pos" not in pos_names:
pos_names = ["x_pos", "y_pos"]
if fly_type in ("pyramid",):
for i in range(pos_data.shape[2]):
# flip position the same as data flip on det counts
pos_data[:, :, i] = flip_data(pos_data[:, :, i], subscan_dims=subscan_dims)
new_p = np.zeros([len(pos_names), pos_data.shape[0], pos_data.shape[1]])
for i in range(len(pos_names)):
new_p[i, :, :] = pos_data[:, :, i]
data_output["pos_names"] = pos_names
data_output["pos_data"] = new_p
# scaler data
scaler_names, scaler_data = get_name_value_from_db(scaler_list, data, datashape)
if fly_type in ("pyramid",):
scaler_data = flip_data(scaler_data, subscan_dims=subscan_dims)
data_output["scaler_names"] = scaler_names
data_output["scaler_data"] = scaler_data
return data_output
def _get_fpath_not_existing(fpath):
# Returns path to the new file that is guaranteed to not exist
# The function cycles through paths obtained by inserting
# version number between name and extension in the prototype path ``fpath``
# The version number is inserted in the form ``filename_v2.ext``
if os.path.exists(fpath):
p, e = os.path.splitext(fpath)
n = 1
while True:
fpath = f"{p}_v{n}{e}"
if not os.path.exists(fpath):
break
n += 1
return fpath
def save_data_to_hdf5(
fpath, data, *, metadata=None, fname_add_version=False, file_overwrite_existing=False, create_each_det=True
):
"""
This is the function used to save raw experiment data into HDF5 file. The raw data is
represented as a dictionary with the following keys:
keys 'det1', 'det2' etc. - 3D ndarrays of size (N, M, K) where NxM are dimensions of the map
and K is the number of spectrum points (4096) contain data from the detector channels 1, 2, 3 etc.
key 'det_sum' - 3D ndarray with the same dimensions as 'det1' contains the sum of the channels
key 'scaler_names' - the list of scaler names
key 'scaler_data' - 3D ndarray of scaler values. The array shape is (N, M, P), where P is
the number of scaler names.
key 'pos_names' - the list of position (axis) names, must contain the names 'x_pos' and 'y_pos'
in correct order.
key 'pos_data' - 3D ndarray with position values. The array must have size (2, N, M). The first
index is the number of the position name 'pos_names' list.
Parameters
----------
fpath: str
Full path to the HDF5 file. The function creates an new HDF5 file. If file already exists
and ``file_overwrite_existing=False``, then the IOError exception is raised.
data : dict
The dictionary of raw data.
metadata : dict
Metadata to be saved in the HDF5 file. The function will add or overwrite the existing
metadata fields: ``file_type``, ``file_format``, ``file_format_version``, ``file_created_time``.
User may define metadata fields ``file_software`` and ``file_software_version``. If ``file_software``
is not defined, then the default values for ``file_software`` and ``file_software_version`` are added.
fname_add_version : boolean
True: if | |
a dictionary with
NumSPIBytesTransferred and SPIBytes.
"""
if not isinstance(SPIBytes, list):
raise LabJackException("SPIBytes MUST be a list of bytes")
numSPIBytes = len(SPIBytes)
oddPacket = False
if numSPIBytes%2 != 0:
SPIBytes.append(0)
numSPIBytes = numSPIBytes + 1
oddPacket = True
command = [ 0 ] * (13 + numSPIBytes)
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 4 + (numSPIBytes//2)
command[3] = 0x3A
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
if AutoCS:
command[6] |= (1 << 7)
if DisableDirConfig:
command[6] |= (1 << 6)
command[6] |= ( self.SPIModes[SPIMode] & 3 )
command[7] = SPIClockFactor
#command[8] = Reserved
command[9] = CSPINNum
command[10] = CLKPinNum
command[11] = MISOPinNum
command[12] = MOSIPinNum
command[13] = numSPIBytes
if oddPacket:
command[13] = numSPIBytes - 1
command[14:] = SPIBytes
result = self._writeRead(command, 8+numSPIBytes, [ 0xF8, 1+(numSPIBytes//2), 0x3A ])
if result[6] != 0:
raise LowlevelErrorException(result[6], "The spi command returned an error:\n %s" % lowlevelErrorToString(result[6]))
return { 'NumSPIBytesTransferred' : result[7], 'SPIBytes' : result[8:] }
spi.section = 2
def asynchConfig(self, Update = True, UARTEnable = True, DesiredBaud = 9600, olderHardware = False, configurePins = True ):
"""
Name: U3.asynchConfig(Update = True, UARTEnable = True,
DesiredBaud = 9600, olderHardware = False,
configurePins = True)
Args: See section 5.2.16 of the User's Guide.
olderHardware, If using hardware 1.21, please set olderHardware
to True and read the timer configuration first.
configurePins, Will call the configIO to set up pins for you.
Desc: Configures the U3 UART for asynchronous communication.
returns a dictionary:
{
'Update' : True means new parameters were written
'UARTEnable' : True means the UART is enabled
'BaudFactor' : The baud factor being used
}
Note: Requires U3 hardware version 1.21+.
"""
if configurePins:
self.configIO(EnableUART=True)
command = [ 0 ] * 10
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 0x02
command[3] = 0x14
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
#command[6] = 0x00
if Update:
command[7] |= ( 1 << 7 )
if UARTEnable:
command[7] |= ( 1 << 6 )
#command[8] = Reserved
if olderHardware:
command[9] = (2**8) - self.timerClockBase//DesiredBaud
else:
BaudFactor = (2**16) - 48000000//(2 * DesiredBaud)
t = struct.pack("<H", BaudFactor)
command[8] = t[0]
command[9] = t[1]
if olderHardware:
result = self._writeRead(command, 10, [0xF8, 0x02, 0x14])
else:
result = self._writeRead(command, 10, [0xF8, 0x02, 0x14])
returnDict = {}
if ( ( result[7] >> 7 ) & 1 ):
returnDict['Update'] = True
else:
returnDict['Update'] = False
if ( ( result[7] >> 6 ) & 1):
returnDict['UARTEnable'] = True
else:
returnDict['UARTEnable'] = False
if olderHardware:
returnDict['BaudFactor'] = result[9]
else:
returnDict['BaudFactor'] = struct.unpack("<H", struct.pack("BB", *result[8:]))[0]
return returnDict
asynchConfig.section = 2
def asynchTX(self, AsynchBytes):
"""
Name: U3.asynchTX(AsynchBytes)
Args: AsynchBytes, must be a list of bytes to transfer.
Desc: Sends bytes to the U3 UART which will be sent asynchronously on
the transmit line. See section 5.2.17 of the user's guide.
returns a dictionary:
{
'NumAsynchBytesSent' : Number of Asynch Bytes Sent
'NumAsynchBytesInRXBuffer' : How many bytes are currently in the
RX buffer.
}
Note: Requres U3 hardware version 1.21 or greater.
"""
if not isinstance(AsynchBytes, list):
raise LabJackException("AsynchBytes must be a list")
numBytes = len(AsynchBytes)
oddPacket = False
if numBytes%2 != 0:
AsynchBytes.append(0)
numBytes = numBytes+1
oddPacket = True
command = [ 0 ] * ( 8 + numBytes)
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 1 + ( numBytes//2 )
command[3] = 0x15
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
#command[6] = 0x00
command[7] = numBytes
if oddPacket:
command[7] = numBytes - 1
command[8:] = AsynchBytes
result = self._writeRead(command, 10, [0xF8, 0x02, 0x15])
return { 'NumAsynchBytesSent' : result[7], 'NumAsynchBytesInRXBuffer' : result[8] }
asynchTX.section = 2
def asynchRX(self, Flush = False):
"""
Name: U3.asynchRX(Flush = False)
Args: Flush, Set to True to flush
Desc: Reads the oldest 32 bytes from the U3 UART RX buffer
(received on receive terminal). The buffer holds 256 bytes. See
section 5.2.18 of the User's Guide.
returns a dictonary:
{
'AsynchBytes' : List of received bytes
'NumAsynchBytesInRXBuffer' : Number of AsynchBytes are in the RX
Buffer.
}
Note: Requres U3 hardware version 1.21 or greater.
"""
command = [ 0 ] * 8
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 0x01
command[3] = 0x16
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
#command[6] = 0x00
if Flush:
command[7] = 1
result = self._writeRead(command, 40, [0xF8, 0x11, 0x16])
return { 'AsynchBytes' : result[8:], 'NumAsynchBytesInRXBuffer' : result[7] }
asynchRX.section = 2
def i2c(self, Address, I2CBytes, EnableClockStretching = False, NoStopWhenRestarting = False, ResetAtStart = False, SpeedAdjust = 0, SDAPinNum = 6, SCLPinNum = 7, NumI2CBytesToReceive = 0, AddressByte = None):
"""
Name: U3.i2c(Address, I2CBytes, ResetAtStart = False,
EnableClockStretching = False, SpeedAdjust = 0,
SDAPinNum = 6, SCLPinNum = 7, NumI2CBytesToReceive = 0,
AddressByte = None)
Args: Address, the address (not shifted over)
I2CBytes, must be a list of bytes to send.
See section 5.2.19 of the user's guide.
AddressByte, use this if you don't want a shift applied.
This address will be put it in the low-level
packet directly and overrides Address. Optional.
Desc: Sends and receives serial data using I2C synchronous
communication.
Note: Requires hardware version 1.21 or greater.
"""
if not isinstance(I2CBytes, list):
raise LabJackException("I2CBytes must be a list")
numBytes = len(I2CBytes)
oddPacket = False
if numBytes%2 != 0:
I2CBytes.append(0)
numBytes = numBytes + 1
oddPacket = True
command = [ 0 ] * (14 + numBytes)
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 4 + (numBytes//2)
command[3] = 0x3B
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
if ResetAtStart:
command[6] |= (1 << 1)
if NoStopWhenRestarting:
command[6] |= (1 << 2)
if EnableClockStretching:
command[6] |= (1 << 3)
command[7] = SpeedAdjust
command[8] = SDAPinNum
command[9] = SCLPinNum
if AddressByte != None:
command[10] = AddressByte
else:
command[10] = Address << 1
command[12] = numBytes
if oddPacket:
command[12] = numBytes-1
command[13] = NumI2CBytesToReceive
command[14:] = I2CBytes
oddResponse = False
if NumI2CBytesToReceive%2 != 0:
NumI2CBytesToReceive = NumI2CBytesToReceive+1
oddResponse = True
result = self._writeRead(command, 12+NumI2CBytesToReceive, [0xF8, (3+(NumI2CBytesToReceive//2)), 0x3B])
if len(result) > 12:
if oddResponse:
return { 'AckArray' : result[8:12], 'I2CBytes' : result[12:-1] }
else:
return { 'AckArray' : result[8:12], 'I2CBytes' : result[12:] }
else:
return { 'AckArray' : result[8:], 'I2CBytes' : [] }
i2c.section = 2
def sht1x(self, DataPinNum = 4, ClockPinNum = 5, SHTOptions = 0xc0):
"""
Name: U3.sht1x(DataPinNum = 4, ClockPinNum = 5, SHTOptions = 0xc0)
Args: See section 5.2.20 of the user's guide.
SHTOptions, see below.
Desc: Reads temperature and humidity from a Sensirion SHT1X sensor
(which is used by the EI-1050).
Returns a dictonary:
{
'StatusReg' : SHT1X status register
'StatusRegCRC' : SHT1X status register CRC value
'Temperature' : The temperature in C
'TemperatureCRC' : The CRC value for the temperature
'Humidity' : The humidity
'HumidityCRC' : The CRC value for the humidity
}
Note: Requires hardware version 1.21 or greater.
SHTOptions (and proof people read documentation):
bit 7 = Read Temperature
bit 6 = Read Realtive Humidity
bit 2 = Heater. 1 = on, 0 = off
bit 1 = Reserved at 0
bit 0 = Resolution. 1 = 8 bit RH, 12 bit T; 0 = 12 RH, 14 bit T
"""
command = [ 0 ] * 10
#command[0] = Checksum8
command[1] = 0xF8
command[2] = 0x02
command[3] = 0x39
#command[4] = Checksum16 (LSB)
#command[5] = Checksum16 (MSB)
command[6] = DataPinNum
command[7] = ClockPinNum
#command[8] = Reserved
command[9] = SHTOptions
result = self._writeRead(command, 16, [0xF8, 0x05, 0x39])
val = (result[11]*256) + result[10]
temp = -39.60 + | |
the default of `align_corners` was ``None``, and it becomes ``True``
if `mode` is linear, otherwise ``False``.
.. warning::
Up to the version 1.8.0, the nearest `mode` interpolation corresponds to
the nearest `mode` and `half_pixel_for_nn` = ``True`` after the version 1.8.0.
'''
from .function_bases import interpolate as interpolate_base
import math
if scale is None and output_size is None:
raise ValueError('Either scale or output_size must be given')
elif output_size is None:
input_size = x.shape[-len(scale)-1:-1] if channel_last \
else x.shape[-len(scale):]
output_size = [int(math.floor(s * d))
for d, s in zip(input_size, scale)]
return interpolate_base(x, output_size, mode, align_corners, half_pixel, half_pixel_for_nn, channel_last)
def sort(x, axis=-1, reverse=False, with_index=False, only_index=False):
"""Sorts the elements of `x` along a given `axis` in ascending order
by value. A negative `axis` counts from the last dimension of `x`,
so the default of -1 sorts along the last dimension. If `reverse`
is True, then the elements are soreted in descending order.
If `with_index` is True, result is a tuple ``(sorted, indices)``
or only ``indices`` if `only_index` is True. Setting `only_index`
to True implies that `with_index` is also True.
.. code-block:: python
import numpy as np
import nnabla as nn
import nnabla.functions as F
nn.set_auto_forward(True)
x = nn.Variable.from_numpy_array(np.random.rand(2, 3, 4))
sorted = F.sort(x)
assert np.allclose(sorted.d, np.sort(x.d))
sorted, indices = F.sort(x, with_index=True)
assert np.allclose(sorted.d, np.sort(x.d))
assert np.all(indices.d == np.argsort(x.d))
indices = F.sort(x, only_index=True)
assert np.all(indices.d == np.argsort(x.d))
Args:
x(~nnabla.Variable): N-D array
axis(int): Axis along which to sort.
reverse(bool): Sort in descending order.
with_index(bool): Return sorted values and index.
only_index(bool): Return only the sort index.
Returns: ~nnabla.Variable `sorted` or ~nnabla.Variable `indices` or (~nnabla.Variable `sorted`, ~nnabla.Variable `indices`)
"""
from .function_bases import sort as sort_base
n_outputs = 2 if with_index and not only_index else 1
return sort_base(x, axis, reverse, with_index, only_index, n_outputs)
def tile(x, reps):
"""Forward `x` repeated the number of times given by `reps`. If `reps` is
a sequence, the output has dimension of ``d = max(len(reps), x.ndim)`` and
either `x` is promoted to be d-dimensional by prepending new axes or `reps`
is promoted to x.ndim by prepending 1's.
Args:
x(~nnabla.Variable): Input N-D array.
reps(int or sequence of int): Repetitions of `x` along each axis.
Returns:
~nnabla.Variable: N-D array.
>>> import numpy as np, nnabla as nn, nnabla.functions as F
>>> F.tile(nn.Variable([2, 3]), 3).shape # reps is promoted to [1, 3]
(2, 9)
>>> F.tile(nn.Variable([3]), [2, 3]).shape # x is promoted to shape (1, 3)
(2, 9)
>>> nn.set_auto_forward(True)
>>> x = nn.Variable.from_numpy_array(np.array([1, 2, 3]))
>>> print(F.tile(x, 3).d)
[1. 2. 3. 1. 2. 3. 1. 2. 3.]
>>> print(F.tile(x, [2, 3]).d)
[[1. 2. 3. 1. 2. 3. 1. 2. 3.]
[1. 2. 3. 1. 2. 3. 1. 2. 3.]]
>>> x = nn.Variable.from_numpy_array(np.array([[1, 3], [2, 4]]))
>>> print(F.tile(x, 3).d)
[[1. 3. 1. 3. 1. 3.]
[2. 4. 2. 4. 2. 4.]]
>>> print(F.tile(x, [2, 3]).d)
[[1. 3. 1. 3. 1. 3.]
[2. 4. 2. 4. 2. 4.]
[1. 3. 1. 3. 1. 3.]
[2. 4. 2. 4. 2. 4.]]
"""
from .function_bases import tile as tile_base
reps = [reps] if isinstance(reps, int) else reps
return tile_base(x, reps)
def stft(x, window_size, stride, fft_size, window_type='hanning', center=True, pad_mode='reflect'):
"""Computes the short-time Fourier transform
Args:
x (~nnabla.Variable): Time domain sequence of size `batch_size x sample_size`.
window_size (int): Size of STFT analysis window.
stride (int): Number of samples that we shift the window, also called `hop size`.
fft_size (int): Size of the FFT, the output will have `fft_size // 2+ 1` frequency bins.
window_type (str): Analysis window, can be either `hanning`, `hamming` or `rectangular`.
For convenience, also `window_type=None` is supported which is equivalent to `window_type='rectangular'`.
center (bool): If `True`, then the signal `x` is padded by half the FFT size using reflection padding.
pad_mode (str): Padding mode, which can be `'constant'` or `'reflect'`. `'constant'` pads with `0`.
Returns:
Returns real and imaginary parts of STFT result.
* :obj:`~nnabla.Variable`: Real part of STFT of size `batch_size x fft_size//2 + 1 x frame_size`.
* :obj:`~nnabla.Variable`: Imaginary part of STFT of size `batch x fft_size//2 + 1 x frame_size`.
"""
from .function_bases import stft as stft_base
if window_type is None:
window_type = "rectangular"
return stft_base(x, window_size, stride, fft_size, window_type, center, pad_mode)
def _stft_v1(x, window_size, stride, fft_size, window_type='hanning', center=True, pad_mode='reflect'):
"""Computes the short-time Fourier transform
Args:
x (~nnabla.Variable): Time domain sequence of size `batch_size x sample_size`.
window_size (int): Size of STFT analysis window.
stride (int): Number of samples that we shift the window, also called `hop size`.
fft_size (int): Size of the FFT, the output will have `fft_size // 2+ 1` frequency bins.
window_type (str): Analysis window, can be either `hanning`, `hamming` or `rectangular`.
For convenience, also `window_type=None` is supported which is equivalent to `window_type='rectangular'`.
center (bool): If `True`, then the signal `x` is padded by half the FFT size using reflection padding.
pad_mode (str): Padding mode, which can be `'constant'` or `'reflect'`. `'constant'` pads with `0`.
Returns:
Returns real and imaginary parts of STFT result.
* :obj:`~nnabla.Variable`: Real part of STFT of size `batch_size x fft_size//2 + 1 x frame_size`.
* :obj:`~nnabla.Variable`: Imaginary part of STFT of size `batch x fft_size//2 + 1 x frame_size`.
"""
from nnabla.parameter import get_parameter, get_parameter_or_create
conv_r = get_parameter('conv_r')
conv_i = get_parameter('conv_i')
if conv_r is None or conv_i is None:
if window_type == 'hanning':
window_func = np.hanning(window_size + 1)[:-1]
elif window_type == 'hamming':
window_func = np.hamming(window_size + 1)[:-1]
elif window_type == 'rectangular' or window_type is None:
window_func = np.ones(window_size)
else:
raise ValueError("Unknown window type {}.".format(window_type))
# pad window if `fft_size > window_size`
if fft_size > window_size:
diff = fft_size - window_size
window_func = np.pad(
window_func, (diff//2, diff - diff//2), mode='constant')
elif fft_size < window_size:
raise ValueError(
"FFT size has to be as least as large as window size.")
# compute STFT filter coefficients
mat_r = np.zeros((fft_size//2 + 1, 1, fft_size))
mat_i = np.zeros((fft_size//2 + 1, 1, fft_size))
for w in range(fft_size//2+1):
for t in range(fft_size):
mat_r[w, 0, t] = np.cos(2. * np.pi * w * t / fft_size)
mat_i[w, 0, t] = -np.sin(2. * np.pi * w * t / fft_size)
mat_r = mat_r * window_func
mat_i = mat_i * window_func
conv_r = get_parameter_or_create(
'conv_r', initializer=mat_r, need_grad=False)
conv_i = get_parameter_or_create(
'conv_i', initializer=mat_i, need_grad=False)
if center:
# pad at begin/end (per default this is a reflection padding)
x = pad(x, (fft_size // 2, fft_size // 2), mode=pad_mode)
# add channel dimension
x = reshape(x, (x.shape[0], 1, x.shape[1]))
# compute STFT
y_r = convolution(x, conv_r, stride=(stride,))
y_i = convolution(x, conv_i, stride=(stride,))
return y_r, y_i
def istft(y_r, y_i, window_size, stride, fft_size, window_type='hanning', center=True):
"""Computes the inverse shoft-time Fourier transform
Note: We use a constant square inverse window for the reconstruction
of the time-domain signal, therefore, the first and last
`window_size - stride` are not perfectly reconstructed.
Args:
y_r (~nnabla.Variable): Real part of STFT of size `batch_size x fft_size//2 + 1 x frame_size`.
y_i (~nnabla.Variable): Imaginary part of STFT of size `batch_size x fft_size//2 + 1 x frame_size`.
window_size (int): Size of STFT analysis window.
stride (int): Number of samples that we shift the window, also called `hop size`.
fft_size (int): Size of the FFT, (STFT has `fft_size // 2 + 1` frequency bins).
window_type (str): Analysis window, can be either `hanning`, `hamming` or `rectangular`.
For convenience, also `window_type=None` is supported which is equivalent to `window_type='rectangular'`.
center (bool): If `True`, then it is assumed that the time-domain signal has centered frames.
Returns:
~nnabla.Variable: Time domain sequence of size `batch_size x sample_size`.
"""
from .function_bases import istft as istft_base
if window_type is None:
window_type = "rectangular"
return istft_base(y_r, y_i, window_size, stride, fft_size, window_type, center)
def _istft_v1(y_r, y_i, window_size, stride, fft_size, window_type='hanning', center=True):
"""Computes the inverse shoft-time Fourier transform
Note: We use a constant square inverse window for the reconstruction
of the time-domain signal, therefore, the first and last
`window_size - stride` are not perfectly reconstructed.
Args:
y_r (~nnabla.Variable): Real part of STFT of size `batch_size x fft_size//2 + 1 x frame_size`.
| |
"""
Copyright: <NAME>
eMail: <EMAIL>
License: MIT
File which handles Network connections.
We have 5 different classes in this File. Decide the class you need:
NetServer: Simple TCP server with an update method and callbacks.
NetClient: Handles clients of a NetServer with receive and send method.
NetworkServer: TCP server with thread for handling incoming connections.
ClientHandler: Handles clients of a NetworkServer with threads for sending and receiving.
NetworkClient: TCP client that connects to a TCP server.
"""
import socket
import select
import threading
import sys
import struct
import time
from retrying import retry
from queue import Queue
DEBUG = False
ECHO_BACK = False
class NetServer():
"""
Class for a simple tcp server.
Using this module is as easy as:
.. code-block:: python3
netServer = NetServer("0.0.0.0", 54321)
netServer.start()
netClients = []
while True:
netClient = netServer.update()
if netClient is not None:
netClient.sendMsg("Hello")
netClients.append(netClient)
time.sleep(1.0)
netServer.stop()
Simple TCP Server. Connected sockets are returned in update method.
Or if updated in thread use cb.
Alternatively you can pass a CB.
"""
def __init__(self, address, port, connectedCallback=None, logger=print, updateInThread=False):
"""
Init the Network class.
:param address: Ip-adress
:type address: str
:param port: The port on which the server should listen
:type port: int
:param connectedCallback: Optional CB when clients connect
:type connectedCallback: function
"""
self.logger = logger
self.address = address
self.port = port
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.clients = []
self.connectedCallback = connectedCallback
self.updateInThread = updateInThread
self._running = False
self._updateThread = threading.Thread(target=self._update, daemon=True)
def start(self):
""" Start the TCP Server. """
self.server.bind((self.address, self.port))
# start listening for clients
self.server.listen(5)
self.server.setblocking(0)
if self.logger: self.logger("+++ Server listening on port %i" % self.port)
if self.updateInThread:
self._running = True
self._updateThread.start()
def stop(self):
self._running = True
if self.logger: self.logger("Stopping Server")
if self.updateInThread:
# Will not work because of blocking select call
# self._updateThread.join()
pass
self.server.close()
def _update(self):
while self._running:
self.update()
time.sleep(0.1)
def update(self):
"""
Update the server.
:return: Client connected or None
:rtype: NetClient
"""
try:
readable, writable, errored = select.select([self.server], [], [], 0)
except ValueError:
# The server was closed during above blocking call
return None
for s in readable:
if s is self.server:
clientSock, addr = self.server.accept()
client = NetClient(clientSock, addr[0], addr[1], logger=self.logger)
if self.connectedCallback: self.connectedCallback(client)
return client
return None
class NetClient():
"""
Class which handles a single tcp client.
This class uses no threads, instead you have to call an update method in a loop.
An equivalent class without threads is down below.
"""
# Init function
def __init__(self, clientsocket, address, port, disconnectedCallback=None, logger=print):
"""
Init the netclient.
:param clientsocket: The socket the client reads from and writes to
:type clientsocket: socket
:param address: The address of the given socket
:type address: str
:param port: The port of the given socket
:type port: int
"""
self.logger = logger
self.sock = clientsocket
self.address = address
self.port = port
self.connected()
self.sock.setblocking(0)
self.receiveBuffer = b''
self.send_queue = Queue(maxsize=0)
self.send_data = b''
self.disconnectedCallback = disconnectedCallback
def connected(self):
""" Is called in init to indicate connection. """
if self.logger: self.logger("Client with address: %s connected at port %s" % (self.address, str(self.port)))
def disconnected(self):
""" Is called when the client disconnects, CB is notified. """
if self.logger: self.logger("Client with address: %s disconnected" % (self.address))
if self.disconnectedCallback: self.disconnectedCallback(self)
def sendMsg(self, msg, blocking=False):
"""
Send message over the socket.
:param msg: The message to send to the client, r""r r""n is added.
:type msg: str:
:param blocking: If the msg should be send blocking or non blocking.
:type blocking: bool, default=False
"""
reply = bytes(msg + "\r\n", 'utf-8')
self.sendData(reply, lengthPrefix=False, blocking=blocking)
def sendData(self, data, lengthPrefix=False, blocking=False):
"""
Send data over the socket.
:param msg: The message to send to the client, r""r r""n is added.
:type msg: str
:param blocking: If the msg should be send blocking or non blocking.
:type blocking: bool, default=False
:param lengthPrefix: If the msg should be send blocking or non blocking.
:type lengthPrefix: bool, default=False
"""
s_data = b''
if lengthPrefix: s_data = struct.pack('!I', len(data))
s_data += data
if blocking:
try:
self.sock.sendall(s_data)
except socket.error as err:
if self.logger: self.logger("Send failed. Error: %s" % str(err))
if isinstance(err.args, tuple):
# Broken Pipe error
if err.errno == 11:
if self.logger: self.logger("ignoring..")
else: self.close()
else: self.close()
else:
self.send_queue.put(s_data)
def isSending(self):
"""
Returns true if sendbuffer is non empty.
:return: If still sth in send buffer
:rtype: bool
"""
return len(self.send_data) > 0 or not self.send_queue.empty()
def handleSend(self):
""" Function handling the sending. """
# handle non blocking sending
if len(self.send_data) > 0:
try:
sent = self.sock.send(self.send_data)
self.send_data = self.send_data[sent:]
except socket.error as err:
if self.logger: self.logger("Send failed. Error: %s" % str(err))
if isinstance(err.args, tuple):
# Broken Pipe error
if err.errno == 11:
if self.logger: self.logger("ignoring..")
else: self.close()
else: self.close()
else:
if not self.send_queue.empty():
self.send_data = self.send_queue.get()
self.send_queue.task_done()
def available(self, timeout=0):
"""
Returns true if data is available to read.
:param timeout: Optional timeout parameter for waiting.
:type timeout: float
:return: If data is available to be read
:rtype: bool
"""
ready_to_read, ready_to_write, in_error = select.select([self.sock], [], [self.sock], timeout)
return len(ready_to_read) > 0 or len(self.receiveBuffer) != 0
def receive(self, msgLen=512):
"""
Returns true if data is available to read.
:param msgLen: Optional msg length to be read. It will however stop if a newline char is found.
:type msgLen: int
:return: Data which is read. b'' if nothing is read, None if client disconnected.
:rtype: binary data
"""
bytesRcvd = 0
msg = b''
try:
while bytesRcvd < msgLen:
chunk = self.sock.recv(msgLen - bytesRcvd)
if not chunk and len(self.receiveBuffer) == 0:
self.disconnected()
return None
bytesRcvd += len(chunk)
self.receiveBuffer += chunk
index = self.receiveBuffer.find(b'\n')
if index:
msg = self.receiveBuffer[:index+1]
self.receiveBuffer = self.receiveBuffer[index+1:]
break
return msg
except socket.error:
self.disconnected()
# indicate that socket is dead
return None
class NetworkServer():
r"""
Class for a more advanced tcp server.
Using this module is as easy as:
.. code-block:: python3
net = NetworkServer(port=54321, ip="0.0.0.0", requestHandler=handlerFunction)
net.startServer()
while True:
net.broadcastMessage("Hello all")
time.sleep(1.0)
net.stopServer()
Can be singleton, using threads for incoming clients. Can broadcast data to all
Clients. You can pass a CB to be notified when clients connect.
"""
# Here will be the instance stored.
__instance = None
@staticmethod
def getInstance(port=2000, ip='127.0.0.1', requestHandler=None, logger=print):
""" Static access method. """
if SingletonNetwork.__instance == None:
NetworkServer(port=port, ip=ip, requestHandler=requestHandler, singleton=True, logger=logger)
return SingletonNetwork.__instance
def __init__(self, port=2000, ip='127.0.0.1', requestHandler=None, singleton=False, logger=print):
"""
Init the Network class.
:param port: The port on which the server should listen
:type port: int
:param ip: Ip-adress
:type ip: str
:param reqestHandler:
Callback, that is called when a tcp client senda a requenst.
Parameters are adress, message and object of class ClientHandler
:type reqestHandler: function
"""
""" Virtually private constructor. """
if singleton:
if NetworkServer.__instance != None:
raise Exception("This class is a singleton!")
else:
NetworkServer.__instance = self
self.logger = logger
# Set IP to localhost
# port can be chosen by the user
self.TCP_IP = ip
self.TCP_PORT = port
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.clientHandlers = []
self.running = False
self.thread = threading.Thread(target=self.__handleClients)
self.thread.daemon = True
self.requestHandlers = []
if requestHandler is not None:
self.requestHandlers.append(requestHandler)
def addRequestHandler(self, reqHandler):
"""
Add a reqeusthandler.
:param reqestHandler:
Callback, that is called when a tcp client senda a requenst.
Parameters are adress, message and object of class ClientHandler
:type reqestHandler: function
"""
if reqHandler in self.requestHandlers:
return
self.requestHandlers.append(reqHandler)
# Add it to all clienthandlers as well
for ch in self.clientHandlers:
ch.requestHandlers.append(reqHandler)
def removeRequestHandler(self, reqHandler):
"""
Add a reqeusthandler.
:param reqestHandler:
Callback, that is called when a tcp client senda a requenst.
Parameters are adress, message and object of class ClientHandler
:type reqestHandler: function
"""
if reqHandler not in self.requestHandlers:
return
# Add it to all clienthandlers as well
for ch in self.clientHandlers:
if reqHandler in ch.requestHandlers:
ch.requestHandlers.remove(reqHandler)
self.requestHandlers.remove(reqHandler)
def startServer(self):
"""
Call to start the server.
This function will start the tcp server on the specified port.
An exception is called if the socket fails to open.
"""
# start server at given ip adress and port
try:
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server.bind((self.TCP_IP, self.TCP_PORT))
| |
start: str or Link, optional
:raises ValueError: link not known or ambiguous
:raises ValueError: [description]
:raises TypeError: unknown type provided
:return: end-effector link, base link, and tool transform of gripper
if applicable
:rtype: Link, Elink, SE3 or None
Helper method to find or validate an end-effector and base link.
"""
# Try cache
# if self._cache_end is not None:
# return self._cache_end, self._cache_start, self._cache_end_tool
tool = None
if end is None:
if len(self.grippers) > 1:
end_ret = self.grippers[0].links[0]
tool = self.grippers[0].tool
if len(self.grippers) > 1:
# Warn user: more than one gripper
print("More than one gripper present, using robot.grippers[0]")
elif len(self.grippers) == 1:
end_ret = self.grippers[0].links[0]
tool = self.grippers[0].tool
# no grippers, use ee link if just one
elif len(self.ee_links) > 1:
end_ret = self.ee_links[0]
if len(self.ee_links) > 1:
# Warn user: more than one EE
print("More than one end-effector present, using robot.ee_links[0]")
else:
end_ret = self.ee_links[0]
# Cache result
self._cache_end = end
self._cache_end_tool = tool
else:
# Check if end corresponds to gripper
for gripper in self.grippers:
if end == gripper or end == gripper.name:
tool = gripper.tool
# end_ret = gripper.links[0]
# otherwise check for end in the links
end_ret = self._getlink(end)
if start is None:
start_ret = self.base_link
# Cache result
self._cache_start = start
else:
# start effector is specified
start_ret = self._getlink(start)
return end_ret, start_ret, tool
def _getlink(
self,
link: Union[Link, Gripper, str, None],
default: Union[Link, Gripper, str, None] = None,
) -> Link:
"""
Validate reference to Link
:param link: link
:raises ValueError: link does not belong to this ERobot
:raises TypeError: bad argument
:return: link reference
``robot._getlink(link)`` is a validated reference to a Link within
the ERobot ``robot``. If ``link`` is:
- an ``Link`` reference it is validated as belonging to
``robot``.
- a string, then it looked up in the robot's link name dictionary, and
a Link reference returned.
"""
if link is None:
link = default
if isinstance(link, str):
if link in self.link_dict:
return self.link_dict[link]
raise ValueError(f"no link named {link}")
elif isinstance(link, BaseLink):
if link in self.links:
return link
else:
for gripper in self.grippers:
if link in gripper.links:
return link
raise ValueError("link not in robot links")
elif isinstance(link, Gripper):
for gripper in self.grippers:
if link is gripper:
return gripper.links[0]
raise ValueError("Gripper not in robot")
else:
raise TypeError("unknown argument")
# =========================================================================== #
class ERobot(BaseERobot):
def __init__(self, arg, urdf_string=None, urdf_filepath=None, **kwargs):
if isinstance(arg, ERobot):
# We're passed an ERobot, clone it
# We need to preserve the parent link as we copy
# Copy each link within the robot
links = [deepcopy(link) for link in arg.links]
gripper_links = []
for gripper in arg.grippers:
glinks = []
for link in gripper.links:
glinks.append(deepcopy(link))
gripper_links.append(glinks[0])
links = links + glinks
# print(links[9] is gripper_links[0])
# print(gripper_links)
# Sever parent connection, but save the string
# The constructor will piece this together for us
for link in links:
link._children = []
if link.parent is not None:
link._parent_name = link.parent.name
link._parent = None
# gripper_parents = []
# # Make a list of old gripper links
# for gripper in arg.grippers:
# gripper_parents.append(gripper.links[0].name)
# gripper_links = []
# def dfs(node, node_copy):
# for child in node.children:
# child_copy = child.copy(node_copy)
# links.append(child_copy)
# # If this link was a gripper link, add to the list
# if child_copy.name in gripper_parents:
# gripper_links.append(child_copy)
# dfs(child, child_copy)
# link0 = arg.links[0]
# links.append(arg.links[0].copy())
# dfs(link0, links[0])
# print(gripper_links[0].jindex)
super().__init__(links, gripper_links=gripper_links, **kwargs)
for i, gripper in enumerate(self.grippers):
gripper.tool = arg.grippers[i].tool.copy()
# if arg.qdlim is not None:
# self.qdlim = arg.qdlim
self._urdf_string = arg.urdf_string
self._urdf_filepath = arg.urdf_filepath
else:
self._urdf_string = urdf_string
self._urdf_filepath = urdf_filepath
if isinstance(arg, DHRobot):
# we're passed a DHRobot object
# TODO handle dynamic parameters if given
arg = arg.ets
if isinstance(arg, ETS):
# we're passed an ETS string
links = []
# chop it up into segments, a link frame after every joint
parent = None
for j, ets_j in enumerate(arg.split()):
elink = Link(ETS(ets_j), parent=parent, name=f"link{j:d}")
if (
elink.qlim is None
and elink.v is not None
and elink.v.qlim is not None
):
elink.qlim = elink.v.qlim
parent = elink
links.append(elink)
elif islistof(arg, Link):
links = arg
else:
raise TypeError("constructor argument must be ETS or list of Link")
super().__init__(links, **kwargs)
@classmethod
def URDF(cls, file_path, gripper=None):
"""
Construct an ERobot object from URDF file
:param file_path: [description]
:type file_path: [type]
:param gripper: index or name of the gripper link(s)
:type gripper: int or str or list
:return: [description]
:rtype: [type]
If ``gripper`` is specified, links from that link outward are removed
from the rigid-body tree and folded into a ``Gripper`` object.
"""
links, name, _, _ = ERobot.URDF_read(file_path)
if gripper is not None:
if isinstance(gripper, int):
gripper = links[gripper]
elif isinstance(gripper, str):
for link in links:
if link.name == gripper:
gripper = link
break
else:
raise ValueError(f"no link named {gripper}")
else:
raise TypeError("bad argument passed as gripper")
links, name, urdf_string, urdf_filepath = ERobot.URDF_read(file_path)
print(cls)
return cls(
links,
name=name,
gripper_links=gripper,
urdf_string=urdf_string,
urdf_filepath=urdf_filepath,
)
@property
def urdf_string(self):
return self._urdf_string
@property
def urdf_filepath(self):
return self._urdf_filepath
# --------------------------------------------------------------------- #
def _to_dict(self, robot_alpha=1.0, collision_alpha=0.0):
# self._set_link_fk(self.q)
ob = []
for link in self.links:
if robot_alpha > 0:
for gi in link.geometry:
gi.set_alpha(robot_alpha)
ob.append(gi.to_dict())
if collision_alpha > 0:
for gi in link.collision:
gi.set_alpha(collision_alpha)
ob.append(gi.to_dict())
# Do the grippers now
for gripper in self.grippers:
for link in gripper.links:
if robot_alpha > 0:
for gi in link.geometry:
gi.set_alpha(robot_alpha)
ob.append(gi.to_dict())
if collision_alpha > 0:
for gi in link.collision:
gi.set_alpha(collision_alpha)
ob.append(gi.to_dict())
# for o in ob:
# print(o)
return ob
def _fk_dict(self, robot_alpha=1.0, collision_alpha=0.0):
ob = []
# Do the robot
for link in self.links:
if robot_alpha > 0:
for gi in link.geometry:
ob.append(gi.fk_dict())
if collision_alpha > 0:
for gi in link.collision:
ob.append(gi.fk_dict())
# Do the grippers now
for gripper in self.grippers:
for link in gripper.links:
if robot_alpha > 0:
for gi in link.geometry:
ob.append(gi.fk_dict())
if collision_alpha > 0:
for gi in link.collision:
ob.append(gi.fk_dict())
return ob
# --------------------------------------------------------------------- #
@staticmethod
def URDF_read(file_path, tld=None, xacro_tld=None):
"""
Read a URDF file as Links
:param file_path: File path relative to the xacro folder
:type file_path: str, in Posix file path fprmat
:param tld: A custom top-level directory which holds the xacro data,
defaults to None
:type tld: str, optional
:param xacro_tld: A custom top-level within the xacro data,
defaults to None
:type xacro_tld: str, optional
:return: Links and robot name
:rtype: tuple(Link list, str)
File should be specified relative to ``RTBDATA/URDF/xacro``
.. note:: If ``tld`` is not supplied, filepath pointing to xacro data should
be directly under ``RTBDATA/URDF/xacro`` OR under ``./xacro`` relative
to the model file calling this method. If ``tld`` is supplied, then
```file_path``` needs to be relative to ``tld``
"""
# get the path to the class that defines the robot
if tld is None:
base_path = rtb_path_to_datafile("xacro")
else:
base_path = PurePosixPath(tld)
# print("*** urdf_to_ets_args: ", classpath)
# add on relative path to get to the URDF or xacro file
# base_path = PurePath(classpath).parent.parent / 'URDF' / 'xacro'
file_path = base_path / PurePosixPath(file_path)
name, ext = splitext(file_path)
if ext == ".xacro":
# it's a xacro file, preprocess it
if xacro_tld is not None:
xacro_tld = base_path / PurePosixPath(xacro_tld)
urdf_string = xacro.main(file_path, xacro_tld)
try:
urdf = URDF.loadstr(urdf_string, file_path, base_path)
except BaseException as e:
print("error parsing URDF file", file_path)
raise e
else: # pragma nocover
urdf_string = open(file_path).read()
urdf = URDF.loadstr(urdf_string, file_path, base_path)
return urdf.elinks, urdf.name, urdf_string, file_path
# --------------------------------------------------------------------- #
def get_path(self, end=None, start=None):
"""
Find a path from start to end. The end must come after
the start (ie end must be further away from the base link
of the robot than start) in the kinematic chain and both links
must be a part of the same branch within the robot structure. This
method is a work in progress while an approach which generalises
to all applications is designed.
:param end: end-effector or gripper to | |
Backup. If set to true, cloud_backup must also be set to true.
"""
return pulumi.get(self, "pit_enabled")
@pit_enabled.setter
def pit_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "pit_enabled", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> Optional[pulumi.Input[str]]:
"""
The unique ID for the project to create the database user.
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter(name="providerAutoScalingComputeMaxInstanceSize")
def provider_auto_scaling_compute_max_instance_size(self) -> Optional[pulumi.Input[str]]:
"""
Maximum instance size to which your cluster can automatically scale (e.g., M40). Required if `autoScaling.compute.enabled` is `true`.
"""
return pulumi.get(self, "provider_auto_scaling_compute_max_instance_size")
@provider_auto_scaling_compute_max_instance_size.setter
def provider_auto_scaling_compute_max_instance_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provider_auto_scaling_compute_max_instance_size", value)
@property
@pulumi.getter(name="providerAutoScalingComputeMinInstanceSize")
def provider_auto_scaling_compute_min_instance_size(self) -> Optional[pulumi.Input[str]]:
"""
Minimum instance size to which your cluster can automatically scale (e.g., M10). Required if `autoScaling.compute.scaleDownEnabled` is `true`.
"""
return pulumi.get(self, "provider_auto_scaling_compute_min_instance_size")
@provider_auto_scaling_compute_min_instance_size.setter
def provider_auto_scaling_compute_min_instance_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provider_auto_scaling_compute_min_instance_size", value)
@property
@pulumi.getter(name="providerBackupEnabled")
def provider_backup_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Flag indicating if the cluster uses Cloud Backup for backups. **Deprecated** use `cloud_backup` instead.
"""
return pulumi.get(self, "provider_backup_enabled")
@provider_backup_enabled.setter
def provider_backup_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "provider_backup_enabled", value)
@property
@pulumi.getter(name="providerDiskIops")
def provider_disk_iops(self) -> Optional[pulumi.Input[int]]:
"""
The maximum input/output operations per second (IOPS) the system can perform. The possible values depend on the selected `provider_instance_size_name` and `disk_size_gb`. This setting requires that `provider_instance_size_name` to be M30 or greater and cannot be used with clusters with local NVMe SSDs. The default value for `provider_disk_iops` is the same as the cluster tier's Standard IOPS value, as viewable in the Atlas console. It is used in cases where a higher number of IOPS is needed and possible. If a value is submitted that is lower or equal to the default IOPS value for the cluster tier Atlas ignores the requested value and uses the default. More details available under the providerSettings.diskIOPS parameter: [MongoDB API Clusters](https://docs.atlas.mongodb.com/reference/api/clusters-create-one/)
* You do not need to configure IOPS for a STANDARD disk configuration but only for a PROVISIONED configuration.
"""
return pulumi.get(self, "provider_disk_iops")
@provider_disk_iops.setter
def provider_disk_iops(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "provider_disk_iops", value)
@property
@pulumi.getter(name="providerDiskTypeName")
def provider_disk_type_name(self) -> Optional[pulumi.Input[str]]:
"""
Azure disk type of the server’s root volume. If omitted, Atlas uses the default disk type for the selected providerSettings.instanceSizeName. Example disk types and associated storage sizes: P4 - 32GB, P6 - 64GB, P10 - 128GB, P15 - 256GB, P20 - 512GB, P30 - 1024GB, P40 - 2048GB, P50 - 4095GB. More information and the most update to date disk types/storage sizes can be located at https://docs.atlas.mongodb.com/reference/api/clusters-create-one/.
"""
return pulumi.get(self, "provider_disk_type_name")
@provider_disk_type_name.setter
def provider_disk_type_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provider_disk_type_name", value)
@property
@pulumi.getter(name="providerEncryptEbsVolume")
def provider_encrypt_ebs_volume(self) -> Optional[pulumi.Input[bool]]:
"""
**(Deprecated) The Flag is always true.** Flag that indicates whether the Amazon EBS encryption feature encrypts the host's root volume for both data at rest within the volume and for data moving between the volume and the cluster. Note: This setting is always enabled for clusters with local NVMe SSDs. **Atlas encrypts all cluster storage and snapshot volumes, securing all cluster data on disk: a concept known as encryption at rest, by default.**.
"""
return pulumi.get(self, "provider_encrypt_ebs_volume")
@provider_encrypt_ebs_volume.setter
def provider_encrypt_ebs_volume(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "provider_encrypt_ebs_volume", value)
@property
@pulumi.getter(name="providerEncryptEbsVolumeFlag")
def provider_encrypt_ebs_volume_flag(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "provider_encrypt_ebs_volume_flag")
@provider_encrypt_ebs_volume_flag.setter
def provider_encrypt_ebs_volume_flag(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "provider_encrypt_ebs_volume_flag", value)
@property
@pulumi.getter(name="providerInstanceSizeName")
def provider_instance_size_name(self) -> Optional[pulumi.Input[str]]:
"""
Atlas provides different instance sizes, each with a default storage capacity and RAM size. The instance size you select is used for all the data-bearing servers in your cluster. See [Create a Cluster](https://docs.atlas.mongodb.com/reference/api/clusters-create-one/) `providerSettings.instanceSizeName` for valid values and default resources.
**Note** free tier (M0) creation is not supported by the Atlas API and hence not supported by this provider.)
"""
return pulumi.get(self, "provider_instance_size_name")
@provider_instance_size_name.setter
def provider_instance_size_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provider_instance_size_name", value)
@property
@pulumi.getter(name="providerName")
def provider_name(self) -> Optional[pulumi.Input[str]]:
"""
Cloud service provider on which the servers are provisioned.
"""
return pulumi.get(self, "provider_name")
@provider_name.setter
def provider_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provider_name", value)
@property
@pulumi.getter(name="providerRegionName")
def provider_region_name(self) -> Optional[pulumi.Input[str]]:
"""
Physical location of your MongoDB cluster. The region you choose can affect network latency for clients accessing your databases. Requires the **Atlas region name**, see the reference list for [AWS](https://docs.atlas.mongodb.com/reference/amazon-aws/), [GCP](https://docs.atlas.mongodb.com/reference/google-gcp/), [Azure](https://docs.atlas.mongodb.com/reference/microsoft-azure/).
Do not specify this field when creating a multi-region cluster using the replicationSpec document or a Global Cluster with the replicationSpecs array.
"""
return pulumi.get(self, "provider_region_name")
@provider_region_name.setter
def provider_region_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provider_region_name", value)
@property
@pulumi.getter(name="providerVolumeType")
def provider_volume_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the volume. The possible values are: `STANDARD` and `PROVISIONED`. `PROVISIONED` is ONLY required if setting IOPS higher than the default instance IOPS.
"""
return pulumi.get(self, "provider_volume_type")
@provider_volume_type.setter
def provider_volume_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provider_volume_type", value)
@property
@pulumi.getter(name="replicationFactor")
def replication_factor(self) -> Optional[pulumi.Input[int]]:
"""
Number of replica set members. Each member keeps a copy of your databases, providing high availability and data redundancy. The possible values are 3, 5, or 7. The default value is 3.
"""
return pulumi.get(self, "replication_factor")
@replication_factor.setter
def replication_factor(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "replication_factor", value)
@property
@pulumi.getter(name="replicationSpecs")
def replication_specs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterReplicationSpecArgs']]]]:
"""
Configuration for cluster regions. See Replication Spec below for more details.
"""
return pulumi.get(self, "replication_specs")
@replication_specs.setter
def replication_specs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterReplicationSpecArgs']]]]):
pulumi.set(self, "replication_specs", value)
@property
@pulumi.getter(name="snapshotBackupPolicies")
def snapshot_backup_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClusterSnapshotBackupPolicyArgs']]]]:
"""
current snapshot schedule and retention settings for the cluster.
"""
return pulumi.get(self, "snapshot_backup_policies")
@snapshot_backup_policies.setter
def snapshot_backup_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClusterSnapshotBackupPolicyArgs']]]]):
pulumi.set(self, "snapshot_backup_policies", value)
@property
@pulumi.getter(name="srvAddress")
def srv_address(self) -> Optional[pulumi.Input[str]]:
"""
Connection string for connecting to the Atlas cluster. The +srv modifier forces the connection to use TLS/SSL. See the mongoURI for additional options.
"""
return pulumi.get(self, "srv_address")
@srv_address.setter
def srv_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "srv_address", value)
@property
@pulumi.getter(name="stateName")
def state_name(self) -> Optional[pulumi.Input[str]]:
"""
Current state of the cluster. The possible states are:
- IDLE
- CREATING
- UPDATING
- DELETING
- DELETED
- REPAIRING
"""
return pulumi.get(self, "state_name")
@state_name.setter
def state_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state_name", value)
class Cluster(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
advanced_configuration: Optional[pulumi.Input[pulumi.InputType['ClusterAdvancedConfigurationArgs']]] = None,
auto_scaling_compute_enabled: Optional[pulumi.Input[bool]] = None,
auto_scaling_compute_scale_down_enabled: Optional[pulumi.Input[bool]] = None,
auto_scaling_disk_gb_enabled: Optional[pulumi.Input[bool]] = None,
backing_provider_name: Optional[pulumi.Input[str]] = None,
backup_enabled: Optional[pulumi.Input[bool]] = None,
bi_connector: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
bi_connector_config: Optional[pulumi.Input[pulumi.InputType['ClusterBiConnectorConfigArgs']]] = None,
cloud_backup: Optional[pulumi.Input[bool]] = None,
cluster_type: Optional[pulumi.Input[str]] = None,
disk_size_gb: Optional[pulumi.Input[float]] = None,
encryption_at_rest_provider: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClusterLabelArgs']]]]] = None,
mongo_db_major_version: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
num_shards: Optional[pulumi.Input[int]] = None,
pit_enabled: Optional[pulumi.Input[bool]] = None,
project_id: Optional[pulumi.Input[str]] = None,
provider_auto_scaling_compute_max_instance_size: Optional[pulumi.Input[str]] = None,
provider_auto_scaling_compute_min_instance_size: Optional[pulumi.Input[str]] = None,
provider_backup_enabled: Optional[pulumi.Input[bool]] = None,
provider_disk_iops: Optional[pulumi.Input[int]] = None,
provider_disk_type_name: Optional[pulumi.Input[str]] = None,
provider_encrypt_ebs_volume: Optional[pulumi.Input[bool]] = None,
provider_instance_size_name: Optional[pulumi.Input[str]] = None,
provider_name: Optional[pulumi.Input[str]] = None,
provider_region_name: Optional[pulumi.Input[str]] = None,
provider_volume_type: Optional[pulumi.Input[str]] = None,
replication_factor: Optional[pulumi.Input[int]] = None,
replication_specs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClusterReplicationSpecArgs']]]]] = None,
__props__=None):
"""
## Import
Clusters can be imported using project ID and cluster name, in the format `PROJECTID-CLUSTERNAME`, e.g.
```sh
$ pulumi import mongodbatlas:index/cluster:Cluster my_cluster 1112222b3bf99403840e8934-Cluster0
```
See detailed information for arguments and attributes[MongoDB API Clusters](https://docs.atlas.mongodb.com/reference/api/clusters-create-one/)
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] auto_scaling_compute_enabled: Specifies whether cluster tier auto-scaling is enabled. The default is false.
- Set to `true` to enable cluster tier auto-scaling. If enabled, you must specify a value for `providerSettings.autoScaling.compute.maxInstanceSize`.
- Set to `false` to disable cluster tier auto-scaling.
:param pulumi.Input[bool] auto_scaling_compute_scale_down_enabled: Set to `true` to enable the cluster tier to scale down. This option is only available if `autoScaling.compute.enabled` is `true`.
- If this option is enabled, you must specify a value for `providerSettings.autoScaling.compute.minInstanceSize`
:param pulumi.Input[bool] auto_scaling_disk_gb_enabled: Specifies whether disk auto-scaling is enabled. The default is true.
- Set to `true` to enable disk auto-scaling.
- Set to `false` to disable disk auto-scaling.
:param pulumi.Input[str] backing_provider_name: Cloud service provider on which the server for a multi-tenant cluster is provisioned.
:param pulumi.Input[bool] backup_enabled: Clusters running MongoDB FCV 4.2 or later and any new Atlas clusters of any type do not support this parameter
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] bi_connector: Specifies BI Connector for Atlas configuration on this cluster. BI Connector for Atlas is only available for M10+ clusters. See BI Connector below for more details. **DEPRECATED** | |
# Copyright (c) 2020, MD2K Center of Excellence
# - <NAME> <<EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import pandas as pd
from pyspark.sql import functions as F
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql.group import GroupedData
from pyspark.sql.types import *
from pyspark.sql.types import StructType
from pyspark.sql.window import Window
from cerebralcortex.core.datatypes.datastream import DataStream
from cerebralcortex.core.metadata_manager.stream.metadata import Metadata
def complementary_filter(ds, freq: int = 16, accelerometer_x: str = "accelerometer_x",
accelerometer_y: str = "accelerometer_y", accelerometer_z: str = "accelerometer_z",
gyroscope_x: str = "gyroscope_x", gyroscope_y: str = "gyroscope_y",
gyroscope_z: str = "gyroscope_z"):
"""
Compute complementary filter on gyro and accel data.
Args:
ds (DataStream ): Non-Windowed/grouped dataframe
freq (int): frequency of accel/gryo. Assumption is that frequency is equal for both gyro and accel.
accelerometer_x (str): name of the column
accelerometer_y (str): name of the column
accelerometer_z (str): name of the column
gyroscope_x (str): name of the column
gyroscope_y (str): name of the column
gyroscope_z (str): name of the column
"""
dt = 1.0 / freq # 1/16.0;
M_PI = math.pi;
hpf = 0.90;
lpf = 0.10;
window = Window.partitionBy(ds._data['user']).orderBy(ds._data['timestamp'])
data = ds._data.withColumn("thetaX_accel",
((F.atan2(-F.col(accelerometer_z), F.col(accelerometer_y)) * 180 / M_PI)) * lpf) \
.withColumn("roll",
(F.lag("thetaX_accel").over(window) + F.col(gyroscope_x) * dt) * hpf + F.col("thetaX_accel")).drop(
"thetaX_accel") \
.withColumn("thetaY_accel",
((F.atan2(-F.col(accelerometer_x), F.col(accelerometer_z)) * 180 / M_PI)) * lpf) \
.withColumn("pitch",
(F.lag("thetaY_accel").over(window) + F.col(gyroscope_y) * dt) * hpf + F.col("thetaY_accel")).drop(
"thetaY_accel") \
.withColumn("thetaZ_accel",
((F.atan2(-F.col(accelerometer_y), F.col(accelerometer_x)) * 180 / M_PI)) * lpf) \
.withColumn("yaw",
(F.lag("thetaZ_accel").over(window) + F.col(gyroscope_z) * dt) * hpf + F.col("thetaZ_accel")).drop(
"thetaZ_accel")
return DataStream(data=data.dropna(), metadata=Metadata())
def compute_zero_cross_rate(ds, exclude_col_names: list = [],
feature_names=['zero_cross_rate']):
"""
Compute statistical features.
Args:
ds (DataStream ): Windowed/grouped dataframe
exclude_col_names list(str): name of the columns on which features should not be computed
feature_names list(str): names of the features. Supported features are ['mean', 'median', 'stddev', 'variance', 'max', 'min', 'skew',
'kurt', 'sqr', 'zero_cross_rate'
windowDuration (int): duration of a window in seconds
slideDuration (int): slide duration of a window
groupByColumnName List[str]: groupby column names, for example, groupby user, col1, col2
startTime (datetime): The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide startTime as 15 minutes. First time of data will be used as startTime if none is provided
Returns:
DataStream object
"""
exclude_col_names.extend(["timestamp", "localtime", "user", "version"])
data = ds._data.drop(*exclude_col_names)
df_column_names = data.columns
basic_schema = StructType([
StructField("timestamp", TimestampType()),
StructField("localtime", TimestampType()),
StructField("user", StringType()),
StructField("version", IntegerType()),
StructField("start_time", TimestampType()),
StructField("end_time", TimestampType())
])
features_list = []
for cn in df_column_names:
for sf in feature_names:
features_list.append(StructField(cn + "_" + sf, FloatType(), True))
features_schema = StructType(basic_schema.fields + features_list)
def calculate_zero_cross_rate(series):
"""
How often the signal changes sign (+/-)
"""
series_mean = np.mean(series)
series = [v - series_mean for v in series]
zero_cross_count = (np.diff(np.sign(series)) != 0).sum()
return zero_cross_count / len(series)
@pandas_udf(features_schema, PandasUDFType.GROUPED_MAP)
def get_features_udf(df):
results = []
timestamp = df['timestamp'].iloc[0]
localtime = df['localtime'].iloc[0]
user = df['user'].iloc[0]
version = df['version'].iloc[0]
start_time = timestamp
end_time = df['timestamp'].iloc[-1]
df.drop(exclude_col_names, axis=1, inplace=True)
if "zero_cross_rate" in feature_names:
df_zero_cross_rate = df.apply(calculate_zero_cross_rate)
df_zero_cross_rate.index += '_zero_cross_rate'
results.append(df_zero_cross_rate)
output = pd.DataFrame(pd.concat(results)).T
basic_df = pd.DataFrame([[timestamp, localtime, user, int(version), start_time, end_time]],
columns=['timestamp', 'localtime', 'user', 'version', 'start_time', 'end_time'])
return basic_df.assign(**output)
# check if datastream object contains grouped type of DataFrame
if not isinstance(ds._data, GroupedData):
raise Exception(
"DataStream object is not grouped data type. Please use 'window' operation on datastream object before running this algorithm")
data = ds._data.apply(get_features_udf)
return DataStream(data=data, metadata=Metadata())
def compute_FFT_features(ds, exclude_col_names: list = [],
feature_names=["fft_centroid", 'fft_spread', 'spectral_entropy', 'fft_flux',
'spectral_falloff']):
"""
Transforms data from time domain to frequency domain.
Args:
exclude_col_names list(str): name of the columns on which features should not be computed
feature_names list(str): names of the features. Supported features are fft_centroid, fft_spread, spectral_entropy, spectral_entropy_old, fft_flux, spectral_falloff
windowDuration (int): duration of a window in seconds
slideDuration (int): slide duration of a window
groupByColumnName List[str]: groupby column names, for example, groupby user, col1, col2
startTime (datetime): The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide startTime as 15 minutes. First time of data will be used as startTime if none is provided
Returns:
DataStream object with all the existing data columns and FFT features
"""
eps = 0.00000001
exclude_col_names.extend(["timestamp", "localtime", "user", "version"])
data = ds._data.drop(*exclude_col_names)
df_column_names = data.columns
basic_schema = StructType([
StructField("timestamp", TimestampType()),
StructField("localtime", TimestampType()),
StructField("user", StringType()),
StructField("version", IntegerType()),
StructField("start_time", TimestampType()),
StructField("end_time", TimestampType())
])
features_list = []
for cn in df_column_names:
for sf in feature_names:
features_list.append(StructField(cn + "_" + sf, FloatType(), True))
features_schema = StructType(basic_schema.fields + features_list)
def stSpectralCentroidAndSpread(X, fs):
"""Computes spectral centroid of frame (given abs(FFT))"""
ind = (np.arange(1, len(X) + 1)) * (fs / (2.0 * len(X)))
Xt = X.copy()
Xt = Xt / Xt.max()
NUM = np.sum(ind * Xt)
DEN = np.sum(Xt) + eps
# Centroid:
C = (NUM / DEN)
# Spread:
S = np.sqrt(np.sum(((ind - C) ** 2) * Xt) / DEN)
# Normalize:
C = C / (fs / 2.0)
S = S / (fs / 2.0)
return (C, S)
def stSpectralFlux(X, Xprev):
"""
Computes the spectral flux feature of the current frame
ARGUMENTS:
X: the abs(fft) of the current frame
Xpre: the abs(fft) of the previous frame
"""
# compute the spectral flux as the sum of square distances:
sumX = np.sum(X + eps)
sumPrevX = np.sum(Xprev + eps)
F = np.sum((X / sumX - Xprev / sumPrevX) ** 2)
return F
def stSpectralRollOff(X, c, fs):
"""Computes spectral roll-off"""
totalEnergy = np.sum(X ** 2)
fftLength = len(X)
Thres = c * totalEnergy
# Ffind the spectral rolloff as the frequency position where the respective spectral energy is equal to c*totalEnergy
CumSum = np.cumsum(X ** 2) + eps
[a, ] = np.nonzero(CumSum > Thres)
if len(a) > 0:
mC = np.float64(a[0]) / (float(fftLength))
else:
mC = 0.0
return (mC)
def stSpectralEntropy(X, numOfShortBlocks=10):
"""Computes the spectral entropy"""
L = len(X) # number of frame samples
Eol = np.sum(X ** 2) # total spectral energy
subWinLength = int(np.floor(L / numOfShortBlocks)) # length of sub-frame
if L != subWinLength * numOfShortBlocks:
X = X[0:subWinLength * numOfShortBlocks]
subWindows = X.reshape(subWinLength, numOfShortBlocks,
order='F').copy() # define sub-frames (using matrix reshape)
s = np.sum(subWindows ** 2, axis=0) / (Eol + eps) # compute spectral sub-energies
En = -np.sum(s * np.log2(s + eps)) # compute spectral entropy
return En
def spectral_entropy(data, sampling_freq, bands=None):
psd = np.abs(np.fft.rfft(data)) ** 2
psd /= np.sum(psd) # psd as a pdf (normalised to one)
if bands is None:
power_per_band = psd[psd > 0]
else:
freqs = np.fft.rfftfreq(data.size, 1 / float(sampling_freq))
bands = np.asarray(bands)
freq_limits_low = np.concatenate([[0.0], bands])
freq_limits_up = np.concatenate([bands, [np.Inf]])
power_per_band = [np.sum(psd[np.bitwise_and(freqs >= low, freqs < up)])
for low, up in zip(freq_limits_low, freq_limits_up)]
power_per_band = | |
<filename>rstfinder/tree_util.py
"""
Classes and functions pertaining to syntactic (constituency) trees.
:author: <NAME>
:author: <NAME>
:organization: ETS
"""
import re
from nltk.tree import ParentedTree
TREE_PRINT_MARGIN = 1000000000
_ptb_paren_mapping = {'(': r"-LRB-",
')': r"-RRB-",
'[': r"-LSB-",
']': r"-RSB-",
'{': r"-LCB-",
'}': r"-RCB-"}
_reverse_ptb_paren_mapping = {bracket_replacement: bracket_type
for bracket_type, bracket_replacement
in _ptb_paren_mapping.items()}
class HeadedParentedTree(ParentedTree):
"""
Modify ``nltk.tree.ParentedTree`` to also return heads.
This subclass of ``nltk.tree.ParentedTree`` also returns heads
using head rules from Michael Collins's 1999 thesis, Appendix A.
See the ``head()`` method below.
"""
start_points = {"ADJP": "L",
"ADVP": "R",
"CONJP": "R",
"FRAG": "R",
"INTJ": "L",
"LST": "R",
"NAC": "L",
"PP": "R",
"PRN": "L",
"PRT": "R",
"QP": "L",
"RRC": "R",
"S": "L",
"SBAR": "L",
"SBARQ": "L",
"SINV": "L",
"SQ": "L",
"UCP": "R",
"VP": "L",
"WHADJP": "L",
"WHADVP": "R",
"WHNP": "L",
"WHPP": "R",
"NX": "L",
"X": "L"}
priority_list = {"ADJP": ["NNS", "QP", "NN", "$", "ADVP", "JJ", "VBN",
"VBG", "ADJP", "JJR", "NP", "JJS", "DT", "FW",
"RBR", "RBS", "SBAR", "RB"],
"ADVP": ["RB", "RBR", "RBS", "FW", "ADVP", "TO", "CD",
"JJR", "JJ", "IN", "NP", "JJS", "NN"],
"CONJP": ["CC", "RB", "IN"],
"FRAG": [],
"INTJ": [],
"LST": ["LS", ":"],
"NAC": ["NN", "NNS", "NNP", "NNPS", "NP", "NAC", "EX",
"$", "CD", "QP", "PRP", "VBG", "JJ", "JJS",
"JJR", "ADJP", "FW"],
"PP": ["IN", "TO", "VBG", "VBN", "RP", "FW"],
"PRN": [],
"PRT": ["RP"],
"QP": ["$", "IN", "NNS", "NN", "JJ", "RB", "DT", "CD",
"NCD", "QP", "JJR", "JJS"],
"RRC": ["VP", "NP", "ADVP", "ADJP", "PP"],
"S": ["TO", "IN", "VP", "S", "SBAR", "ADJP", "UCP", "NP"],
"SBAR": ["WHNP", "WHPP", "WHADVP", "WHADJP", "IN", "DT",
"S", "SQ", "SINV", "SBAR", "FRAG"],
"SBARQ": ["SQ", "S", "SINV", "SBARQ", "FRAG"],
"SINV": ["VBZ", "VBD", "VBP", "VB", "MD", "VP", "S",
"SINV", "ADJP", "NP"],
"SQ": ["VBZ", "VBD", "VBP", "VB", "MD", "VP", "SQ"],
"UCP": [],
"VP": ["TO", "VBD", "VBN", "MD", "VBZ", "VB", "VBG",
"VBP", "VP", "ADJP", "NN", "NNS", "NP"],
"WHADJP": ["CC", "WRB", "JJ", "ADJP"],
"WHADVP": ["CC", "WRB"],
"WHNP": ["WDT", "WP", "WP$", "WHADJP", "WHPP", "WHNP"],
"WHPP": ["IN", "TO", "FW"],
"NX": [],
"X": []}
def __init__(self, node_or_str, children=None):
"""Initialize the tree."""
self._head = None
super(HeadedParentedTree, self).__init__(node_or_str, children)
def _search_children(self, search_list, start_point):
"""
Find heads of noun phrases.
This is a helper function for finding heads of noun phrases.
It finds the first node whose label is in search_list, starting
from start_point, either "L" for left (i.e., 0) or "R" for right.
Parameters
----------
search_list : list
List of labels.
start_point : str
The starting point for the search.
Returns
-------
head_index : int
The positional index of the head node.
"""
assert start_point == "L" or start_point == "R"
head_index = None
num_children = len(self)
children = list(self)
# reverse the list if we start from the right
if start_point == "R":
children.reverse()
for i, child in enumerate(children):
if child.label() in search_list:
head_index = i
break
# correct the index if we reversed the list to start from the right
if start_point == "R" and head_index is not None:
head_index = num_children - 1 - head_index
return head_index
def head(self):
"""
Find the head of the tree.
This method uses the head finding rules, following <NAME>s'
head rules (from his 1999 Ph.D. thesis, Appendix A).
A default of the leftmost child was added for NX nodes, which aren't
discussed in Collin's thesis. This follows the Stanford Parser
(http://nlp.stanford.edu/nlp/javadoc/javanlp/edu/stanford/nlp/trees/CollinsHeadFinder.html).
"""
if self._head is None:
num_children = len(self)
head_index = None
if num_children < 2:
# shortcut for when there is only one child
self._head = self[0]
return self._head
# special case: NPs
if self.label() == 'NP':
# If last node is POS, that's the head
if self[-1].label() == "POS":
head_index = num_children - 1
# Otherwise, look right to left for NN, NNP, NNPS, NNS, NX,
# POS, or JJR.
if head_index is None:
head_index = self._search_children(["NN", "NNP", "NNPS",
"NNS", "NX", "POS",
"JJR"],
"R")
# Otherwise, search left to right for NP.
if head_index is None:
head_index = self._search_children(["NP"],
"L")
# Otherwise, search right to left for $, ADJP, PRN.
if head_index is None:
head_index = self._search_children(["$", "ADJP", "PRN"],
"R")
# Otherwise, search right to left for CD.
if head_index is None:
head_index = self._search_children(["CD"],
"R")
# Otherwise, search right to left for JJ, JJS, RB, or QP.
if head_index is None:
head_index = self._search_children(["JJ", "JJS", "RB",
"QP"],
"R")
# Otherwise, return the last child.
if head_index is None:
head_index = num_children - 1
else: # typical cases
start_point = self.start_points[self.label()]
# Try looking for each symbol in the priority list.
# Stop at the first match.
for symbol in self.priority_list[self.label()]:
head_index = self._search_children([symbol], start_point)
if head_index is not None:
break
if head_index is None:
# If none of the symbols given in the priority list
# for this label was found, then default to the first
# child from the left or right, as specified by the
# starting points table.
head_index = 0 if start_point == 'L' else num_children - 1
# special case: coordination.
# After finding the head, check to see if its left sibling is a
# conjunction. If so, move the head index left 2.
if 'CC' in {x.label() for x in self}:
if head_index > 2 and self[head_index - 1].label() == 'CC':
head_index -= 2
# cache the result
self._head = self[head_index]
return self._head
def find_maximal_head_node(self):
"""
Find the topmost node that has this node as its head.
Returns itself if the parent has a different head.
"""
res = self
parent = res.parent()
while parent is not None and parent.head() == res:
res = parent
parent = res.parent()
return res
def head_preterminal(self):
"""Return the head preterminal."""
res = self
while not isinstance(res[0], str):
res = res.head()
return res
def head_word(self):
"""Return the head word."""
return self.head_preterminal()[0]
def head_pos(self):
"""Return the part-of-speech for the head."""
return self.head_preterminal().label()
def extract_preterminals(tree):
"""Extract the preterminals from the given tree."""
return [node for node in tree.subtrees() if node.height() == 2]
def convert_paren_tokens_to_ptb_format(toks):
"""Convert parentheses tokens in given list to PTB format."""
return [_ptb_paren_mapping.get(tok, tok) for tok in toks]
def convert_parens_to_ptb_format(sent):
"""Convert parentheses in given string to PTB format."""
for key, val in _ptb_paren_mapping.items():
sent = sent.replace(key, f" {val} ")
# remove extra spaces added by normalizing brackets
sent = re.sub(r'\s+', r' ', sent).strip()
return sent
def extract_converted_terminals(tree):
"""Extract all converted terminals in tree, e.g., parentheses & quotes."""
res = []
prev_w = ""
for w in tree.leaves():
if prev_w and prev_w == "U.S." and w == '.':
continue
if w in _reverse_ptb_paren_mapping:
w = _reverse_ptb_paren_mapping[w]
elif w == '``' or w == "''":
w = '"'
prev_w = w
res.append(w)
return res
def convert_ptb_tree(tree):
"""Convert PTB tree to remove traces etc."""
for subtree in [st for st in
tree.subtrees(filter=lambda x: x.label() == "-NONE-")]:
curtree = subtree
while curtree.label() == "-NONE-" or len(curtree) == 0:
parent = curtree.parent()
parent.remove(curtree)
curtree = parent
# remove suffixes that don't appear in typical parser output
# (e.g., "-SBJ-1" in "NP-SBJ-1"); leave labels starting with
# "-" as is (e.g., "-LRB-").
for subtree in tree.subtrees():
label = subtree.label()
if '-' in label and label[0] != '-':
subtree.set_label(label[:label.index('-')])
label = subtree.label()
if '=' in label and label[0] != '=':
subtree.set_label(label[:label.index('=')])
# remove escape sequences from words (e.g., "3\\/4")
for subtree in tree.subtrees():
if isinstance(subtree[0], str):
for i in range(len(subtree)):
subtree[i] = re.sub(r'\\', r'', subtree[i])
def find_first_common_ancestor(node1, node2):
"""
Find the first common ancestor for two nodes in the same tree.
Parameters
----------
node1 : nltk.tree.ParentedTree
The first node.
node2 : nltk.tree.ParentedTree
The second node.
Returns
-------
ancestor_node : nltk.tree.ParentedTree
The first common ancestor for two nodes in the same tree.
"""
# make sure we are in the same tree
assert node1.root() == node2.root()
# make a set of all ancestors of node1
node1_ancestor_treepositions = set()
node1_parent = node1.parent()
while node1_parent is not None:
# note: storing treepositions isn't particularly efficient since
# treeposition() walks up the tree; using memory addresses like
# id(node1_parent) would be faster, but seems potentially
# hazardous/confusing
node1_ancestor_treepositions.add(node1_parent.treeposition())
node1_parent = node1_parent.parent()
# find the first ancestor of node2 | |
<reponame>kos-kaggle/pytorch_advanced
"""
第2章SSDで実装した内容をまとめたファイル
"""
# パッケージのimport
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.autograd import Function
import torch.utils.data as data
import torch
import cv2
import numpy as np
import os.path as osp
from itertools import product as product
from math import sqrt as sqrt
# XMLをファイルやテキストから読み込んだり、加工したり、保存したりするためのライブラリ
import xml.etree.ElementTree as ET
# フォルダ「utils」のdata_augumentation.pyからimport。入力画像の前処理をするクラス
from utils.data_augumentation import Compose, ConvertFromInts, ToAbsoluteCoords, PhotometricDistort, Expand, RandomSampleCrop, RandomMirror, ToPercentCoords, Resize, SubtractMeans
# フォルダ「utils」にある関数matchを記述したmatch.pyからimport
from utils.match import match
# 学習、検証の画像データとアノテーションデータへのファイルパスリストを作成する
def make_datapath_list(rootpath):
"""
データへのパスを格納したリストを作成する。
Parameters
----------
rootpath : str
データフォルダへのパス
Returns
-------
ret : train_img_list, train_anno_list, val_img_list, val_anno_list
データへのパスを格納したリスト
"""
# 画像ファイルとアノテーションファイルへのパスのテンプレートを作成
imgpath_template = osp.join(rootpath, 'JPEGImages', '%s.jpg')
annopath_template = osp.join(rootpath, 'Annotations', '%s.xml')
# 訓練と検証、それぞれのファイルのID(ファイル名)を取得する
train_id_names = osp.join(rootpath + 'ImageSets/Main/train.txt')
val_id_names = osp.join(rootpath + 'ImageSets/Main/val.txt')
# 訓練データの画像ファイルとアノテーションファイルへのパスリストを作成
train_img_list = list()
train_anno_list = list()
for line in open(train_id_names):
file_id = line.strip() # 空白スペースと改行を除去
img_path = (imgpath_template % file_id) # 画像のパス
anno_path = (annopath_template % file_id) # アノテーションのパス
train_img_list.append(img_path) # リストに追加
train_anno_list.append(anno_path) # リストに追加
# 検証データの画像ファイルとアノテーションファイルへのパスリストを作成
val_img_list = list()
val_anno_list = list()
for line in open(val_id_names):
file_id = line.strip() # 空白スペースと改行を除去
img_path = (imgpath_template % file_id) # 画像のパス
anno_path = (annopath_template % file_id) # アノテーションのパス
val_img_list.append(img_path) # リストに追加
val_anno_list.append(anno_path) # リストに追加
return train_img_list, train_anno_list, val_img_list, val_anno_list
# 「XML形式のアノテーション」を、リスト形式に変換するクラス
class Anno_xml2list(object):
"""
1枚の画像に対する「XML形式のアノテーションデータ」を、画像サイズで規格化してからリスト形式に変換する。
Attributes
----------
classes : リスト
VOCのクラス名を格納したリスト
"""
def __init__(self, classes):
self.classes = classes
def __call__(self, xml_path, width, height):
"""
1枚の画像に対する「XML形式のアノテーションデータ」を、画像サイズで規格化してからリスト形式に変換する。
Parameters
----------
xml_path : str
xmlファイルへのパス。
width : int
対象画像の幅。
height : int
対象画像の高さ。
Returns
-------
ret : [[xmin, ymin, xmax, ymax, label_ind], ... ]
物体のアノテーションデータを格納したリスト。画像内に存在する物体数分のだけ要素を持つ。
"""
# 画像内の全ての物体のアノテーションをこのリストに格納します
ret = []
# xmlファイルを読み込む
xml = ET.parse(xml_path).getroot()
# 画像内にある物体(object)の数だけループする
for obj in xml.iter('object'):
# アノテーションで検知がdifficultに設定されているものは除外
difficult = int(obj.find('difficult').text)
if difficult == 1:
continue
# 1つの物体に対するアノテーションを格納するリスト
bndbox = []
name = obj.find('name').text.lower().strip() # 物体名
bbox = obj.find('bndbox') # バウンディングボックスの情報
# アノテーションの xmin, ymin, xmax, ymaxを取得し、0~1に規格化
pts = ['xmin', 'ymin', 'xmax', 'ymax']
for pt in (pts):
# VOCは原点が(1,1)なので1を引き算して(0, 0)に
cur_pixel = int(bbox.find(pt).text) - 1
# 幅、高さで規格化
if pt == 'xmin' or pt == 'xmax': # x方向のときは幅で割算
cur_pixel /= width
else: # y方向のときは高さで割算
cur_pixel /= height
bndbox.append(cur_pixel)
# アノテーションのクラス名のindexを取得して追加
label_idx = self.classes.index(name)
bndbox.append(label_idx)
# resに[xmin, ymin, xmax, ymax, label_ind]を足す
ret += [bndbox]
return np.array(ret) # [[xmin, ymin, xmax, ymax, label_ind], ... ]
# 入力画像の前処理をするクラス
class DataTransform():
"""
画像とアノテーションの前処理クラス。訓練と推論で異なる動作をする。
画像のサイズを300x300にする。
学習時はデータオーギュメンテーションする。
Attributes
----------
input_size : int
リサイズ先の画像の大きさ。
color_mean : (B, G, R)
各色チャネルの平均値。
"""
def __init__(self, input_size, color_mean):
self.data_transform = {
'train': Compose([
ConvertFromInts(), # intをfloat32に変換
ToAbsoluteCoords(), # アノテーションデータの規格化を戻す
PhotometricDistort(), # 画像の色調などをランダムに変化
Expand(color_mean), # 画像のキャンバスを広げる
RandomSampleCrop(), # 画像内の部分をランダムに抜き出す
RandomMirror(), # 画像を反転させる
ToPercentCoords(), # アノテーションデータを0-1に規格化
Resize(input_size), # 画像サイズをinput_size×input_sizeに変形
SubtractMeans(color_mean) # BGRの色の平均値を引き算
]),
'val': Compose([
ConvertFromInts(), # intをfloatに変換
Resize(input_size), # 画像サイズをinput_size×input_sizeに変形
SubtractMeans(color_mean) # BGRの色の平均値を引き算
])
}
def __call__(self, img, phase, boxes, labels):
"""
Parameters
----------
phase : 'train' or 'val'
前処理のモードを指定。
"""
return self.data_transform[phase](img, boxes, labels)
class VOCDataset(data.Dataset):
"""
VOC2012のDatasetを作成するクラス。PyTorchのDatasetクラスを継承。
Attributes
----------
img_list : リスト
画像のパスを格納したリスト
anno_list : リスト
アノテーションへのパスを格納したリスト
phase : 'train' or 'test'
学習か訓練かを設定する。
transform : object
前処理クラスのインスタンス
transform_anno : object
xmlのアノテーションをリストに変換するインスタンス
"""
def __init__(self, img_list, anno_list, phase, transform, transform_anno):
self.img_list = img_list
self.anno_list = anno_list
self.phase = phase # train もしくは valを指定
self.transform = transform # 画像の変形
self.transform_anno = transform_anno # アノテーションデータをxmlからリストへ
def __len__(self):
'''画像の枚数を返す'''
return len(self.img_list)
def __getitem__(self, index):
'''
前処理をした画像のテンソル形式のデータとアノテーションを取得
'''
im, gt, h, w = self.pull_item(index)
return im, gt
def pull_item(self, index):
'''前処理をした画像のテンソル形式のデータ、アノテーション、画像の高さ、幅を取得する'''
# 1. 画像読み込み
image_file_path = self.img_list[index]
img = cv2.imread(image_file_path) # [高さ][幅][色BGR]
height, width, channels = img.shape # 画像のサイズを取得
# 2. xml形式のアノテーション情報をリストに
anno_file_path = self.anno_list[index]
anno_list = self.transform_anno(anno_file_path, width, height)
# 3. 前処理を実施
img, boxes, labels = self.transform(
img, self.phase, anno_list[:, :4], anno_list[:, 4])
# 色チャネルの順番がBGRになっているので、RGBに順番変更
# さらに(高さ、幅、色チャネル)の順を(色チャネル、高さ、幅)に変換
img = torch.from_numpy(img[:, :, (2, 1, 0)]).permute(2, 0, 1)
# BBoxとラベルをセットにしたnp.arrayを作成、変数名「gt」はground truth(答え)の略称
gt = np.hstack((boxes, np.expand_dims(labels, axis=1)))
return img, gt, height, width
def od_collate_fn(batch):
"""
Datasetから取り出すアノテーションデータのサイズが画像ごとに異なります。
画像内の物体数が2個であれば(2, 5)というサイズですが、3個であれば(3, 5)など変化します。
この変化に対応したDataLoaderを作成するために、
カスタイマイズした、collate_fnを作成します。
collate_fnは、PyTorchでリストからmini-batchを作成する関数です。
ミニバッチ分の画像が並んでいるリスト変数batchに、
ミニバッチ番号を指定する次元を先頭に1つ追加して、リストの形を変形します。
"""
targets = []
imgs = []
for sample in batch:
imgs.append(sample[0]) # sample[0] は画像imgです
targets.append(torch.FloatTensor(sample[1])) # sample[1] はアノテーションgtです
# imgsはミニバッチサイズのリストになっています
# リストの要素はtorch.Size([3, 300, 300])です。
# このリストをtorch.Size([batch_num, 3, 300, 300])のテンソルに変換します
imgs = torch.stack(imgs, dim=0)
# targetsはアノテーションデータの正解であるgtのリストです。
# リストのサイズはミニバッチサイズです。
# リストtargetsの要素は [n, 5] となっています。
# nは画像ごとに異なり、画像内にある物体の数となります。
# 5は [xmin, ymin, xmax, ymax, class_index] です
return imgs, targets
# 35層にわたる、vggモジュールを作成
def make_vgg():
layers = []
in_channels = 3 # 色チャネル数
# vggモジュールで使用する畳み込み層やマックスプーリングのチャネル数
cfg = [64, 64, 'M', 128, 128, 'M', 256, 256,
256, 'MC', 512, 512, 512, 'M', 512, 512, 512]
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'MC':
# ceilは出力サイズを、計算結果(float)に対して、切り上げで整数にするモード
# デフォルトでは出力サイズを計算結果(float)に対して、切り下げで整数にするfloorモード
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)
conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
layers += [pool5, conv6,
nn.ReLU(inplace=True), conv7, nn.ReLU(inplace=True)]
return nn.ModuleList(layers)
# 8層にわたる、extrasモジュールを作成
def make_extras():
layers = []
in_channels = 1024 # vggモジュールから出力された、extraに入力される画像チャネル数
# extraモジュールの畳み込み層のチャネル数を設定するコンフィギュレーション
cfg = [256, 512, 128, 256, 128, 256, 128, 256]
layers += [nn.Conv2d(in_channels, cfg[0], kernel_size=(1))]
layers += [nn.Conv2d(cfg[0], cfg[1], kernel_size=(3), stride=2, padding=1)]
layers += [nn.Conv2d(cfg[1], cfg[2], kernel_size=(1))]
layers += [nn.Conv2d(cfg[2], cfg[3], kernel_size=(3), stride=2, padding=1)]
layers += [nn.Conv2d(cfg[3], cfg[4], kernel_size=(1))]
layers += [nn.Conv2d(cfg[4], cfg[5], kernel_size=(3))]
layers += [nn.Conv2d(cfg[5], cfg[6], kernel_size=(1))]
layers += [nn.Conv2d(cfg[6], cfg[7], kernel_size=(3))]
return nn.ModuleList(layers)
# デフォルトボックスのオフセットを出力するloc_layers、
# デフォルトボックスに対する各クラスの確率を出力するconf_layersを作成
def make_loc_conf(num_classes=21, bbox_aspect_num=[4, 6, 6, 6, 4, 4]):
loc_layers = []
conf_layers = []
# VGGの22層目、conv4_3(source1)に対する畳み込み層
loc_layers += [nn.Conv2d(512, bbox_aspect_num[0]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(512, bbox_aspect_num[0]
* num_classes, kernel_size=3, padding=1)]
# VGGの最終層(source2)に対する畳み込み層
loc_layers += [nn.Conv2d(1024, bbox_aspect_num[1]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(1024, bbox_aspect_num[1]
* num_classes, kernel_size=3, padding=1)]
# extraの(source3)に対する畳み込み層
loc_layers += [nn.Conv2d(512, bbox_aspect_num[2]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(512, bbox_aspect_num[2]
* num_classes, kernel_size=3, padding=1)]
# extraの(source4)に対する畳み込み層
loc_layers += [nn.Conv2d(256, bbox_aspect_num[3]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(256, bbox_aspect_num[3]
* num_classes, kernel_size=3, padding=1)]
# extraの(source5)に対する畳み込み層
loc_layers += [nn.Conv2d(256, bbox_aspect_num[4]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(256, bbox_aspect_num[4]
* num_classes, kernel_size=3, padding=1)]
# extraの(source6)に対する畳み込み層
loc_layers += [nn.Conv2d(256, bbox_aspect_num[5]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(256, bbox_aspect_num[5]
* num_classes, kernel_size=3, padding=1)]
return nn.ModuleList(loc_layers), nn.ModuleList(conf_layers)
# convC4_3からの出力をscale=20のL2Normで正規化する層
class L2Norm(nn.Module):
def __init__(self, input_channels=512, scale=20):
super(L2Norm, self).__init__() # 親クラスのコンストラクタ実行
self.weight = nn.Parameter(torch.Tensor(input_channels))
self.scale = scale # 係数weightの初期値として設定する値
self.reset_parameters() # パラメータの初期化
self.eps = 1e-10
def reset_parameters(self):
'''結合パラメータを大きさscaleの値にする初期化を実行'''
init.constant_(self.weight, self.scale) # weightの値がすべてscale(=20)になる
def forward(self, x):
'''38×38の特徴量に対して、512チャネルにわたって2乗和のルートを求めた
38×38個の値を使用し、各特徴量を正規化してから係数をかけ算する層'''
# 各チャネルにおける38×38個の特徴量のチャネル方向の2乗和を計算し、
# さらにルートを求め、割り算して正規化する
# normのテンソルサイズはtorch.Size([batch_num, 1, 38, 38])になります
norm = x.pow(2).sum(dim=1, keepdim=True).sqrt()+self.eps
x = torch.div(x, norm)
# 係数をかける。係数はチャネルごとに1つで、512個の係数を持つ
# self.weightのテンソルサイズはtorch.Size([512])なので
# torch.Size([batch_num, 512, 38, 38])まで変形します
weights = self.weight.unsqueeze(
0).unsqueeze(2).unsqueeze(3).expand_as(x)
out = weights * x
return out
# デフォルトボックスを出力するクラス
class DBox(object):
def __init__(self, cfg):
super(DBox, self).__init__()
# 初期設定
self.image_size = cfg['input_size'] # 画像サイズの300
# [38, 19, …] 各sourceの特徴量マップのサイズ
self.feature_maps = cfg['feature_maps']
self.num_priors = len(cfg["feature_maps"]) # sourceの個数=6
self.steps = cfg['steps'] # [8, 16, …] DBoxのピクセルサイズ
self.min_sizes = cfg['min_sizes']
# [30, 60, …] 小さい正方形のDBoxのピクセルサイズ(正確には面積)
self.max_sizes = cfg['max_sizes']
# [60, 111, …] 大きい正方形のDBoxのピクセルサイズ(正確には面積)
self.aspect_ratios = cfg['aspect_ratios'] # 長方形のDBoxのアスペクト比
def make_dbox_list(self):
'''DBoxを作成する'''
mean = []
# 'feature_maps': [38, 19, 10, 5, 3, 1]
for k, f in enumerate(self.feature_maps):
for i, j in product(range(f), repeat=2): # fまでの数で2ペアの組み合わせを作る f_P_2 個
# 特徴量の画像サイズ
# 300 / 'steps': [8, 16, 32, 64, 100, 300],
f_k = self.image_size / self.steps[k]
# DBoxの中心座標 x,y ただし、0~1で規格化している
cx = (j + 0.5) / f_k
cy = (i + 0.5) / f_k
# アスペクト比1の小さいDBox [cx,cy, width, height]
# 'min_sizes': [30, 60, 111, 162, 213, 264]
s_k = self.min_sizes[k]/self.image_size
mean += [cx, cy, s_k, s_k]
# アスペクト比1の大きいDBox [cx,cy, width, height]
# 'max_sizes': [60, 111, 162, 213, 264, 315],
s_k_prime = sqrt(s_k * (self.max_sizes[k]/self.image_size))
mean += [cx, cy, s_k_prime, s_k_prime]
# その他のアスペクト比のdefBox [cx,cy, width, height]
for ar in self.aspect_ratios[k]:
mean += [cx, cy, s_k*sqrt(ar), s_k/sqrt(ar)]
mean += [cx, cy, s_k/sqrt(ar), s_k*sqrt(ar)]
# DBoxをテンソルに変換 torch.Size([8732, 4])
output = torch.Tensor(mean).view(-1, 4)
# DBoxの大きさが1を超えている場合は1にする
output.clamp_(max=1, min=0)
return output
# オフセット情報を使い、DBoxをBBoxに変換する関数
def decode(loc, dbox_list):
"""
オフセット情報を使い、DBoxをBBoxに変換する。
Parameters
----------
loc: [8732,4]
SSDモデルで推論するオフセット情報。
dbox_list: [8732,4]
DBoxの情報
Returns
-------
boxes : [xmin, ymin, xmax, ymax]
BBoxの情報
"""
# DBoxは[cx, cy, width, height]で格納されている
# locも[Δcx, Δcy, Δwidth, Δheight]で格納されている
# オフセット情報からBBoxを求める
boxes = torch.cat((
dbox_list[:, :2] + loc[:, :2] * 0.1 * dbox_list[:, 2:],
dbox_list[:, 2:] * torch.exp(loc[:, 2:] * 0.2)), dim=1)
# boxesのサイズはtorch.Size([8732, 4])となります
# BBoxの座標情報を[cx, cy, width, height]から[xmin, ymin, xmax, ymax] に
boxes[:, | |
<gh_stars>100-1000
from __future__ import annotations
import logging
from types import TracebackType
from typing import Optional, Tuple, Type, Union
from typing_extensions import Protocol
from .effect import Depends, Success, add_repr, depend, from_io_bound_callable
from .either import Right
from .functions import curry
from .immutable import Immutable
ExcInfo = Tuple[Type[BaseException], BaseException, TracebackType]
class Logger(Immutable):
"""
Wrapper around built-in `logging.Logger` class that
calls logging methods as effects
"""
logger: logging.Logger
def debug(
self,
msg: str,
stack_info: bool = False,
exc_info: Union[bool, ExcInfo] = False
) -> Success[None]:
"""
Create an effect that calls built-in `logging.debug`
Example:
>>> import logging
>>> Logger(logging.getLogger('foo')).debug('hello!').run(None)
DEBUG:foo:hello!
Args:
msg: the log message
stack_info: whether to include stack information in the \
log message
exc_info: whether to include exception info in the log message
Return:
`Effect` that calls `logging.debug` with `msg`
"""
def f(_: object) -> Right[None]:
self.logger.debug(msg, stack_info=stack_info, exc_info=exc_info)
return Right(None)
return from_io_bound_callable(f)
def info(
self,
msg: str,
stack_info: bool = False,
exc_info: Union[bool, ExcInfo] = False
) -> Success[None]:
"""
Create an effect that calls built-in `logging.info`
Example:
>>> import logging
>>> Logger(logging.getLogger('foo')).info('hello!').run(None)
INFO:foo:hello!
Args:
msg: the log message
stack_info: whether to include stack information in the \
log message
exc_info: whether to include exception info in the log message
Return:
`Effect` that calls `logging.info` with `msg`
"""
def f(_: object) -> Right[None]:
self.logger.info(msg, stack_info=stack_info, exc_info=exc_info)
return Right(None)
return from_io_bound_callable(f)
def warning(
self,
msg: str,
stack_info: bool = False,
exc_info: Union[bool, ExcInfo] = False
) -> Success[None]:
"""
Create an effect that calls built-in `logging.warning`
Example:
>>> import logging
>>> Logger(logging.getLogger('foo')).warning('hello!').run(None)
WARNING:foo:hello!
Args:
msg: the log message
stack_info: whether to include stack information in the \
log message
exc_info: whether to include exception info in the log message
Return:
`Effect` that calls `logging.warning` with `msg`
"""
def f(_: object) -> Right[None]:
self.logger.warning(msg, stack_info=stack_info, exc_info=exc_info)
return Right(None)
return from_io_bound_callable(f)
def error(
self,
msg: str,
stack_info: bool = False,
exc_info: Union[bool, ExcInfo] = False
) -> Success[None]:
"""
Create an effect that calls built-in `logging.error`
Example:
>>> import logging
>>> Logger(logging.getLogger('foo')).error('hello!').run(None)
ERROR:foo:hello!
Args:
msg: the log message
stack_info: whether to include stack information in the \
log message
exc_info: whether to include exception info in the log message
Return:
`Effect` that calls `logging.error` with `msg`
"""
def f(_: object) -> Right[None]:
self.logger.error(msg, stack_info=stack_info, exc_info=exc_info)
return Right(None)
return from_io_bound_callable(f)
def critical(
self,
msg: str,
stack_info: bool = False,
exc_info: Union[bool, ExcInfo] = False
) -> Success[None]:
"""
Create an effect that calls built-in `logging.critical`
Example:
>>> import logging
>>> Logger(logging.getLogger('foo')).critical('hello!').run(None)
CRITICAL:foo:hello!
Args:
msg: the log message
stack_info: whether to include stack information in the \
log message
exc_info: whether to include exception info in the log message
Return:
`Effect` that calls `logging.critical` with `msg`
"""
def f(_: object) -> Right[None]:
self.logger.critical(msg, stack_info=stack_info, exc_info=exc_info)
return Right(None)
return from_io_bound_callable(f)
def exception(
self,
msg: str,
stack_info: bool = True,
exc_info: Union[bool, ExcInfo] = True
) -> Success[None]:
"""
Create an effect that calls built-in `logging.exception`
Example:
>>> import logging
>>> Logger(logging.getLogger('foo')).exception('hello!').run(None)
EXCEPTION:foo:hello!
Args:
msg: the log message
stack_info: whether to include stack information in the \
log message
exc_info: whether to include exception info in the log message
Return:
`Effect` that calls `logging.exception` with `msg`
"""
def f(_: object) -> Right[None]:
self.logger.exception(
msg, stack_info=stack_info, exc_info=exc_info
)
return Right(None)
return from_io_bound_callable(f)
class Logging:
"""
Module providing logging capability
"""
def get_logger(self, name: Optional[str] = None) -> Logger:
"""
Create an effect that produces a `Logger` by calling built-in
logging.getLogger
Example:
>>> Logging().get_logger('foo').and_then(
... lambda logger: logger.info('hello!')
... ).run(None)
INFO:foo:hello!
Args:
name: name of logger
Return:
`Effect` that produces a `Logger`
"""
return Logger(logging.getLogger(name))
def debug(
self,
msg: str,
stack_info: bool = False,
exc_info: Union[bool, ExcInfo] = False
) -> Success[None]:
"""
Create an effect that calls built-in `logging.debug`
Example:
>>> Logging().debug('hello!').run(None)
DEBUG:root:hello!
Args:
msg: the log message
stack_info: whether to include stack information in the \
log message
exc_info: whether to include exception info in the log message
Return:
`Effect` that calls `logging.debug` with `msg`
"""
def f(_: object) -> Right[None]:
logging.debug(msg, stack_info=stack_info, exc_info=exc_info)
return Right(None)
return from_io_bound_callable(f)
def info(
self,
msg: str,
stack_info: bool = False,
exc_info: Union[bool, ExcInfo] = False
) -> Success[None]:
"""
Create an effect that calls built-in `logging.info`
Example:
>>> Logging().info('hello!').run(None)
INFO:root:hello!
Args:
msg: the log message
stack_info: whether to include stack information in the \
log message
exc_info: whether to include exception info in the log message
Return:
`Effect` that calls `logging.info` with `msg`
"""
def f(_: object) -> Right[None]:
logging.info(msg, stack_info=stack_info, exc_info=exc_info)
return Right(None)
return from_io_bound_callable(f)
def warning(
self,
msg: str,
stack_info: bool = False,
exc_info: Union[bool, ExcInfo] = False
) -> Success[None]:
"""
Create an effect that calls built-in `logging.warning`
Example:
>>> Logging().warning('hello!').run(None)
WARNING:root:hello!
Args:
msg: the log message
stack_info: whether to include stack information in the \
log message
exc_info: whether to include exception info in the log message
Return:
`Effect` that calls `logging.warning` with `msg`
"""
def f(_: object) -> Right[None]:
logging.warning(msg, stack_info=stack_info, exc_info=exc_info)
return Right(None)
return from_io_bound_callable(f)
def error(
self,
msg: str,
stack_info: bool = False,
exc_info: Union[bool, ExcInfo] = False
) -> Success[None]:
"""
Create an effect that calls built-in `logging.error`
Example:
>>> Logging().error('hello!').run(None)
ERROR:root:hello!
Args:
msg: the log message
stack_info: whether to include stack information in the \
log message
exc_info: whether to include exception info in the log message
Return:
`Effect` that calls `logging.error` with `msg`
"""
def f(_: object):
logging.error(msg, stack_info=stack_info, exc_info=exc_info)
return Right(None)
return from_io_bound_callable(f)
def critical(
self,
msg: str,
stack_info: bool = False,
exc_info: Union[bool, ExcInfo] = False
) -> Success[None]:
"""
Create an effect that calls built-in `logging.info`
Example:
>>> Logging().critical('hello!').run(None)
CRITICAL:root:hello!
Args:
msg: the log message
stack_info: whether to include stack information in the \
log message
exc_info: whether to include exception info in the log message
Return:
`Effect` that calls `logging.critical` with `msg`
"""
def f(_: object) -> Right[None]:
logging.critical(msg, stack_info=stack_info, exc_info=exc_info)
return Right(None)
return from_io_bound_callable(f)
def exception(
self,
msg: str,
stack_info: bool = True,
exc_info: Union[bool, ExcInfo] = True
) -> Success[None]:
"""
Create an effect that calls built-in `logging.exception`
Example:
>>> Logging().exception('hello!').run(None)
ERROR:root:hello!
Args:
msg: the log message
stack_info: whether to include stack information in the \
log message
exc_info: whether to include exception info in the log message
Return:
`Effect` that calls `logging.exception` with `msg`
"""
def f(_: object) -> Right[None]:
logging.exception(msg, stack_info=stack_info, exc_info=exc_info)
return Right(None)
return from_io_bound_callable(f)
class HasLogging(Protocol):
"""
Module provider for logging capability
:type logging: Logging
:attribute logging: The logging instance
"""
logging: Logging
@add_repr
def get_logger(name: Optional[str] = None) -> Depends[HasLogging, Logger]:
"""
Create an effect that produces a `Logger` by calling built-in
logging.getLogger
Example:
>>> class Env:
... logging = Logging()
>>> get_logger('foo').and_then(
... lambda logger: logger.info('hello!')
... ).run(None)
INFO:foo:hello!
Args:
name: name of logger
Return:
`Effect` that produces a `Logger`
"""
return depend().map(lambda env: env.logging.get_logger(name))
@curry
@add_repr
def debug(
msg: str, stack_info: bool = False, exc_info: Union[bool, ExcInfo] = False
) -> Depends[HasLogging, None]:
"""
Create an effect that calls built-in `logging.debug`
Example:
>>> class Env:
... logging = Logging()
>>> debug('hello!').run(Env())
DEBUG:root:hello!
Args:
msg: the log message
stack_info: whether to include stack information in the log message
exc_info: whether to include exception info in the log message
Return:
`Effect` that calls `logging.debug` with `msg`
"""
return depend().and_then(
lambda env: env.logging.
debug(msg, stack_info=stack_info, exc_info=exc_info)
)
@curry
@add_repr
def info(
msg: str, stack_info: bool = False, exc_info: Union[bool, ExcInfo] = False
) -> Depends[HasLogging, None]:
"""
Create an effect that calls built-in `logging.info`
Example:
>>> class Env:
... logging = Logging()
>>> info('hello!').run(Env())
INFO:root:hello!
Args:
msg: the log message
stack_info: whether to include stack information in the log message
exc_info: whether to include exception info in the log message
Return:
`Effect` that calls `logging.info` with `msg`
"""
return depend().and_then(
lambda env: env.logging.
info(msg, stack_info=stack_info, exc_info=exc_info)
)
@curry
@add_repr
def warning(
msg: str, stack_info: bool = False, exc_info: Union[bool, ExcInfo] = False
) -> Depends[HasLogging, None]:
"""
Create an effect that calls built-in `logging.warning`
Example:
>>> class Env:
... logging = Logging()
>>> warning('hello!').run(Env())
WARNING:root:hello!
Args:
msg: | |
#!/usr/bin/env python2
# Copyright 2016 <NAME>. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
"""
builtins.py - Metadata for all builtins, and some implementations.
Metadata:
- Is used for lookup in cmd_exec.py
- Should be used for completion
- complete names of builtins
- complete flags they take
- handle aliases : . and source, [ and test
- Should be reflected in the contents of the 'help' builtin
NOTE: bash has help -d -m -s. Default is -s, like a man page.
Links on special builtins:
http://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_14
- syntax errors in special builtins may cause the shell to abort, but NOT for
regular builtins?
"""
from __future__ import print_function
import termios # for read -n
import sys
from _devbuild.gen import osh_help # generated file
from _devbuild.gen.runtime_asdl import (
value_e, scope_e, span_e, builtin_e
)
from core import ui
from core import util
from frontend import args
from pylib import os_path
from osh import state
import libc
import posix_ as posix
# Special builtins can't be redefined by functions. On the other hand, 'cd'
# CAN be redefined.
#
# http://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_14
# https://www.gnu.org/software/bash/manual/html_node/Special-Builtins.html
_SPECIAL_BUILTINS = {
":": builtin_e.COLON,
".": builtin_e.DOT,
"eval": builtin_e.EVAL,
"exec": builtin_e.EXEC,
"set": builtin_e.SET,
"shift": builtin_e.SHIFT,
#"times": builtin_e.TIMES, # no implemented
"trap": builtin_e.TRAP,
"unset": builtin_e.UNSET,
"builtin": builtin_e.BUILTIN,
# Not treated as builtins by OSH. TODO: Need to auto-complete these
# break continue return
}
_SPECIAL_ASSIGN_BUILTINS = {
# May be a builtin or an assignment
"readonly": builtin_e.READONLY,
"local": builtin_e.LOCAL,
"declare": builtin_e.DECLARE,
"typeset": builtin_e.TYPESET,
"export": builtin_e.EXPORT,
}
_NORMAL_BUILTINS = {
"read": builtin_e.READ,
"echo": builtin_e.ECHO,
"printf": builtin_e.PRINTF,
"cd": builtin_e.CD,
"pushd": builtin_e.PUSHD,
"popd": builtin_e.POPD,
"dirs": builtin_e.DIRS,
"pwd": builtin_e.PWD,
"source": builtin_e.SOURCE, # note that . alias is special
"umask": builtin_e.UMASK,
"wait": builtin_e.WAIT,
"jobs": builtin_e.JOBS,
"fg": builtin_e.FG,
"bg": builtin_e.BG,
"shopt": builtin_e.SHOPT,
"complete": builtin_e.COMPLETE,
"compgen": builtin_e.COMPGEN,
"compopt": builtin_e.COMPOPT,
"compadjust": builtin_e.COMPADJUST,
"true": builtin_e.TRUE,
"false": builtin_e.FALSE,
"test": builtin_e.TEST,
"[": builtin_e.BRACKET,
"getopts": builtin_e.GETOPTS,
"command": builtin_e.COMMAND,
"type": builtin_e.TYPE,
"hash": builtin_e.HASH,
"help": builtin_e.HELP,
"history": builtin_e.HISTORY,
"declare": builtin_e.DECLARE,
"typeset": builtin_e.TYPESET,
"alias": builtin_e.ALIAS,
"unalias": builtin_e.UNALIAS,
# OSH only
"repr": builtin_e.REPR,
"push": builtin_e.PUSH,
"use": builtin_e.USE,
"json": builtin_e.JSON,
}
# This is used by completion.
BUILTIN_NAMES = (
_SPECIAL_BUILTINS.keys() + _SPECIAL_ASSIGN_BUILTINS.keys() +
_NORMAL_BUILTINS.keys()
)
class BuiltinDef(object):
"""
NOTE: This isn't used anywhere! We're registering nothing.
We want to complete the flags to builtins. So this is a mapping from name
to arg spec. There might not be any flags.
"""
def __init__(self):
# Is this what we want?
names = set()
names.update(_NORMAL_BUILTINS.keys())
names.update(_SPECIAL_BUILTINS.keys())
names.update(_SPECIAL_ASSIGN_BUILTINS.keys())
# TODO: Also complete keywords first for, while, etc. Bash/zsh/fish/yash
# all do this. See osh/lex/{_KEYWORDS, _MORE_KEYWORDS}.
self.arg_specs = {}
self.to_complete = sorted(names)
def Register(self, name, help_topic=None):
# The help topics are in the quick ref. TODO: We should match them up?
#help_topic = help_topic or name
arg_spec = args.BuiltinFlags()
self.arg_specs[name] = arg_spec
return arg_spec
# Global instance for "metaprogramming" before main().
BUILTIN_DEF = BuiltinDef()
def _Register(name, help_topic=None):
return BUILTIN_DEF.Register(name, help_topic=help_topic)
def ResolveSpecial(argv0):
"""Is it a special builtin?"""
return _SPECIAL_BUILTINS.get(argv0, builtin_e.NONE)
def ResolveAssign(argv0):
"""Is it an assignment builtin?"""
return _SPECIAL_ASSIGN_BUILTINS.get(argv0, builtin_e.NONE)
def Resolve(argv0):
"""Is it any other builtin?"""
return _NORMAL_BUILTINS.get(argv0, builtin_e.NONE)
#
# Implementation of builtins.
#
# The Read builtin splits using IFS.
#
# Summary:
# - Split with IFS, except \ can escape them! This is different than the
# algorithm for splitting words (at least the way I've represented it.)
# Bash manual:
# - If there are more words than names, the remaining words and their
# intervening delimiters are assigned to the last name.
# - If there are fewer words read from the input stream than names, the
# remaining names are assigned empty values.
# - The characters in the value of the IFS variable are used to split the line
# into words using the same rules the shell uses for expansion (described
# above in Word Splitting).
# - The backslash character '\' may be used to remove any special meaning for
# the next character read and for line continuation.
def _AppendParts(s, spans, max_results, join_next, parts):
"""
Args:
s: The original string
spans: List of (span, end_index)
max_results: the maximum number of parts we want
join_next: Whether to join the next span to the previous part. This
happens in two cases:
- when we have '\ '
- and when we have more spans # than max_results.
"""
start_index = 0
# If the last span was black, and we get a backslash, set join_next to merge
# two black spans.
last_span_was_black = False
for span_type, end_index in spans:
if span_type == span_e.Black:
if join_next and parts:
parts[-1] += s[start_index:end_index]
join_next = False
else:
parts.append(s[start_index:end_index])
last_span_was_black = True
elif span_type == span_e.Delim:
if join_next:
parts[-1] += s[start_index:end_index]
join_next = False
last_span_was_black = False
elif span_type == span_e.Backslash:
if last_span_was_black:
join_next = True
last_span_was_black = False
if max_results and len(parts) >= max_results:
join_next = True
start_index = end_index
done = True
if spans:
#log('%s %s', s, spans)
#log('%s', spans[-1])
last_span_type, _ = spans[-1]
if last_span_type == span_e.Backslash:
done = False
#log('PARTS %s', parts)
return done, join_next
READ_SPEC = _Register('read')
READ_SPEC.ShortFlag('-r')
READ_SPEC.ShortFlag('-n', args.Int)
READ_SPEC.ShortFlag('-a', args.Str) # name of array to read into
# sys.stdin.readline() in Python has buffering! TODO: Rewrite this tight loop
# in C? Less garbage probably.
# NOTE that dash, mksh, and zsh all read a single byte at a time. It appears
# to be required by POSIX? Could try libc getline and make this an option.
def ReadLineFromStdin():
chars = []
while True:
c = posix.read(0, 1)
if not c:
break
chars.append(c)
if c == '\n':
break
return ''.join(chars)
class Read(object):
def __init__(self, splitter, mem):
self.splitter = splitter
self.mem = mem
def __call__(self, arg_vec):
arg, i = READ_SPEC.ParseVec(arg_vec)
names = arg_vec.strs[i:]
if arg.n is not None: # read a certain number of bytes
stdin = sys.stdin.fileno()
try:
name = names[0]
except IndexError:
name = 'REPLY' # default variable name
s = ""
if sys.stdin.isatty(): # set stdin to read in unbuffered mode
orig_attrs = termios.tcgetattr(stdin)
attrs = termios.tcgetattr(stdin)
# disable canonical (buffered) mode
# see `man termios` for an extended discussion
attrs[3] &= ~termios.ICANON
try:
termios.tcsetattr(stdin, termios.TCSANOW, attrs)
# posix.read always returns a single character in unbuffered mode
while arg.n > 0:
s += posix.read(stdin, 1)
arg.n -= 1
finally:
termios.tcsetattr(stdin, termios.TCSANOW, orig_attrs)
else:
s_len = 0
while arg.n > 0:
buf = posix.read(stdin, arg.n)
# EOF
if buf == '':
break
arg.n -= len(buf)
s += buf
state.SetLocalString(self.mem, name, s)
# NOTE: Even if we don't get n bytes back, there is no error?
return 0
if not names:
names.append('REPLY')
# leftover words assigned to the last name
if arg.a:
max_results = 0 # no max
else:
max_results = len(names)
# We have to read more than one line if there is a line continuation (and
# it's not -r).
parts = []
join_next = False
while True:
line = ReadLineFromStdin()
#log('LINE %r', line)
if not line: # EOF
status = 1
break
if line.endswith('\n'): # strip trailing newline
line = line[:-1]
status = 0
else:
# odd bash behavior: fail even if we can set variables.
status = 1
spans = self.splitter.SplitForRead(line, not arg.r)
done, join_next = _AppendParts(line, spans, max_results, join_next, parts)
#log('PARTS %s continued %s', parts, continued)
if done:
break
if arg.a:
state.SetArrayDynamic(self.mem, arg.a, parts)
else:
for i in xrange(max_results):
try:
s = parts[i]
except IndexError:
s = '' # if there are too many variables
#log('read: %s = %s', names[i], s)
state.SetStringDynamic(self.mem, names[i], s)
return status
CD_SPEC = _Register('cd')
CD_SPEC.ShortFlag('-L')
CD_SPEC.ShortFlag('-P')
class Cd(object):
def __init__(self, mem, dir_stack, ex, errfmt):
self.mem = mem
self.dir_stack = dir_stack
self.ex = ex # To run blocks
self.errfmt = errfmt
def __call__(self, cmd_val):
arg, i = CD_SPEC.ParseCmdVal(cmd_val)
try:
dest_dir = cmd_val.argv[i]
except IndexError:
val = self.mem.GetVar('HOME')
if val.tag == value_e.Undef:
self.errfmt.Print("$HOME isn't defined")
return 1
elif val.tag == value_e.Str:
dest_dir = val.s
elif val.tag == value_e.MaybeStrArray:
# User would have to unset $HOME to get rid of exported flag
self.errfmt.Print("$HOME shouldn't be an array")
return 1
if dest_dir == '-':
old = self.mem.GetVar('OLDPWD', scope_e.GlobalOnly)
if old.tag == value_e.Undef:
self.errfmt.Print('$OLDPWD not set')
return 1
elif old.tag == value_e.Str:
dest_dir = old.s
| |
},
"outputs": [
{
"data": {
"text/plain": [
"[(1, 'Aliquid iste optio reiciendi', 0, 0, 10, 1, 1, 1, 1),\n",
" (2, 'Optio dolorem ex a', 0, 0, 10, 1, 1, 1, 1),\n",
" (3, 'Minus c', 0, 0, 10, 1, 1, 1, 1),\n",
" (4, 'Sit ut repr', 0, 0, 10, 1, 1, 1, 1),\n",
" (5, 'At id recusandae expl', 0, 0, 10, 1, 1, 1, 1),\n",
" (6, 'Non nobis et of', 0, 0, 10, 1, 1, 1, 1),\n",
" (7, 'Perferendis', 0, 0, 10, 1, 1, 1, 1),\n",
" (8, 'Accusantium amet quidem eve', 0, 0, 10, 1, 1, 1, 1),\n",
" (9, 'Sed nostrum inventore error m', 0, 0, 10, 1, 1, 1, 1),\n",
" (10, 'Harum repellendus omnis od', 0, 0, 10, 1, 1, 1, 1),\n",
" (11, 'Itaque ut commodi,', 0, 0, 10, 1, 1, 1, 1),\n",
" (12, 'Molestiae quis', 0, 0, 10, 1, 1, 1, 1),\n",
" (13, 'Ali', 0, 0, 10, 1, 1, 1, 1),\n",
" (14, 'Tempora quod optio possimus il', 0, 0, 10, 1, 1, 1, 1),\n",
" (15, 'Sed itaque beatae pari', 0, 0, 10, 1, 1, 1, 1),\n",
" (16, 'Quam dolor', 0, 0, 10, 1, 1, 1, 1),\n",
" (17, 'Molestias expedita', 0, 0, 10, 1, 1, 1, 1),\n",
" (18, 'Lauda', 0, 0, 10, 1, 1, 1, 1),\n",
" (19, 'Incidunt sint perferen', 0, 0, 10, 1, 1, 1, 1),\n",
" (20, 'Laboriosa', 0, 0, 10, 1, 1, 1, 1),\n",
" (21, 'Dolore esse nesciunt fugit com', 0, 0, 10, 1, 1, 1, 1),\n",
" (22, 'Dolorum nam reic', 0, 0, 10, 1, 1, 1, 1),\n",
" (23, 'Repellat ad numquam volu', 0, 0, 10, 1, 1, 1, 1),\n",
" (24, 'Facere enim velit eligend', 0, 0, 10, 1, 1, 1, 1),\n",
" (25, 'Sed ratione quis rep', 0, 0, 10, 1, 1, 1, 1),\n",
" (26, 'Doloribus neque', 0, 0, 10, 1, 1, 1, 1),\n",
" (27, 'Ab voluptas se', 0, 0, 10, 1, 1, 1, 1),\n",
" (28, 'Molestias m', 0, 0, 10, 1, 1, 1, 1),\n",
" (29, 'In pariatur corpori', 0, 0, 10, 1, 1, 1, 1),\n",
" (30, 'Possimus ad dignissimos vel, a', 0, 0, 10, 1, 1, 1, 1),\n",
" (31, 'At minus accusa', 0, 0, 10, 1, 1, 1, 1),\n",
" (32, 'Ad necess', 0, 0, 10, 1, 1, 1, 1),\n",
" (33, 'Expedita c', 0, 0, 10, 1, 1, 1, 1),\n",
" (34, 'Voluptates sunt voluptas volu', 0, 0, 10, 1, 1, 1, 1),\n",
" (35, 'Autem mollitia fuga lauda', 0, 0, 10, 1, 1, 1, 1),\n",
" (36, 'Sint quibusdam ob', 0, 0, 10, 1, 1, 1, 1),\n",
" (37, 'Rerum et o', 0, 0, 10, 1, 1, 1, 1),\n",
" (38, 'Doloribus dolore r', 0, 0, 10, 1, 1, 1, 1),\n",
" (39, 'Eaque su', 0, 0, 10, 1, 1, 1, 1),\n",
" (40, 'Vel molestias numqua', 0, 0, 10, 1, 1, 1, 1),\n",
" (41, 'Iste assumenda repellat q', 0, 0, 10, 1, 1, 1, 1),\n",
" (42, 'Animi labo', 0, 0, 10, 1, 1, 1, 1),\n",
" (43, 'Eum culpa eaque ea omn', 0, 0, 10, 1, 1, 1, 1),\n",
" (44, 'Harum provident vel quam', 0, 0, 10, 1, 1, 1, 1),\n",
" (45, 'Aspe', 0, 0, 10, 1, 1, 1, 1),\n",
" (46, 'Nisi nequ', 0, 0, 10, 1, 1, 1, 1),\n",
" (47, 'Quod tempora', 0, 0, 10, 1, 1, 1, 1),\n",
" (48, 'Porro aliq', 0, 0, 10, 1, 1, 1, 1),\n",
" (49, 'Quas', 0, 0, 10, 1, 1, 1, 1),\n",
" (50, 'Magnam eligendi quia animi', 0, 0, 10, 1, 1, 1, 1),\n",
" (51, 'Officiis se', 0, 0, 10, 1, 1, 1, 1),\n",
" (52, 'Id assumend', 0, 0, 10, 1, 1, 1, 1),\n",
" (53, 'Voluptatibus fu', 0, 0, 10, 1, 1, 1, 1),\n",
" (54, 'Odit rat', 0, 0, 10, 1, 1, 1, 1),\n",
" (55, 'Debit', 0, 0, 10, 1, 1, 1, 1),\n",
" (56, 'Cum aut quas repudia', 0, 0, 10, 1, 1, 1, 1),\n",
" (57, 'Deleniti qui quae quidem', 0, 0, 10, 1, 1, 1, 1),\n",
" (58, 'Adipisci voluptas', 0, 0, 10, 1, 1, 1, 1),\n",
" (59, 'Debitis sit ratione eos nam', 0, 0, 10, 1, 1, 1, 1),\n",
" (60, 'Esse illo molestias archi', 0, 0, 10, 1, 1, 1, 1),\n",
" (61, 'Sunt at itaque voluptatum d', 0, 0, 10, 1, 1, 1, 1),\n",
" (62, 'Est totam', 0, 0, 10, 1, 1, 1, 1),\n",
" (63, 'Reprehenderit commodi eius', 0, 0, 10, 1, 1, 1, 1),\n",
" (64, 'Debit', 0, 0, 10, 1, 1, 1, 1),\n",
" (65, 'Soluta dol', 0, 0, 10, 1, 1, 1, 1),\n",
" (66, 'Vel nesc', 0, 0, 10, 1, 1, 1, 1),\n",
" (67, 'Ratione quia ali', 0, 0, 10, 1, 1, 1, 1),\n",
" (68, 'Rerum recusandae minima', 0, 0, 10, 1, 1, 1, 1),\n",
" (69, 'Totam natus eius fugiat volu', 0, 0, 10, 1, 1, 1, 1),\n",
" (70, 'Perferendis commodi null', 0, 0, 10, 1, 1, 1, 1),\n",
" (71, 'Laudantiu', 0, 0, 10, 1, 1, 1, 1),\n",
" (72, 'Voluptat', 0, 0, 10, 1, 1, 1, 1),\n",
" (73, 'Incidunt nesciun', 0, 0, 10, 1, 1, 1, 1),\n",
" (74, 'Illum amet vero', 0, 0, 10, 1, 1, 1, 1),\n",
" (75, 'Suscipit exercitationem re', 0, 0, 10, 1, 1, 1, 1),\n",
" (76, 'Quas enim error maxime nisi m', 0, 0, 10, 1, 1, 1, 1),\n",
" (77, 'Labore qu', 0, 0, 10, 1, 1, 1, 1),\n",
" (78, 'Repudiandae deleniti unde', 0, 0, 10, 1, 1, 1, 1),\n",
" (79, 'Ut do', 0, 0, 10, 1, 1, 1, 1),\n",
" (80, 'Quaerat esse labore q', 0, 0, 10, 1, 1, 1, 1),\n",
" (81, 'Quidem aliq', 0, 0, 10, 1, 1, 1, 1),\n",
" (82, 'Aperiam vitae eos dolor sed', 0, 0, 10, 1, 1, 1, 1),\n",
" (83, 'Minus nobis porro', 0, 0, 10, 1, 1, 1, 1),\n",
" (84, 'In similique', 0, 0, 10, 1, 1, 1, 1),\n",
" (85, 'Culpa repellat unde', 0, 0, 10, 1, 1, 1, 1),\n",
" (86, 'Architecto i', 0, 0, 10, 1, 1, 1, 1),\n",
" (87, 'A sed pariatur qua', 0, 0, 10, 1, 1, 1, 1),\n",
" (88, 'Tempore assumenda aperiam', 0, 0, 10, 1, 1, 1, 1),\n",
" (89, 'Sed ullam tempora iusto co', 0, 0, 10, 1, 1, 1, 1),\n",
" (90, 'Ipsa', 0, 0, 10, 1, 1, 1, 1),\n",
" (91, 'Fugiat incidun', 0, 0, 10, 1, 1, 1, 1),\n",
" (92, 'Molestiae of', 0, 0, 10, 1, 1, 1, 1),\n",
" (93, 'Quae quisquam cons', 0, 0, 10, 1, 1, 1, 1),\n",
" (94, 'Repellendus ea non facil', 0, 0, 10, 1, 1, 1, 1),\n",
" (95, 'Quod non quibu', 0, 0, 10, 1, 1, 1, 1),\n",
" (96, 'Numquam velit distinctio', 0, 0, 10, 1, 1, 1, 1),\n",
" (97, 'Necessitatibus nihil ex debi', 0, 0, 10, 1, 1, 1, 1),\n",
" (98, 'Velit tempore nemo, na', 0, 0, 10, 1, 1, 1, 1),\n",
" (99, 'Nesciunt v', 0, 0, 10, 1, 1, 1, 1),\n",
" (100, 'Dicta enim debitis accusantiu', 0, 0, 10, 1, 1, 1, 1),\n",
" (101, 'Vitae a', 0, 0, 10, 1, 1, 1, 1),\n",
" (102, 'Praesentium voluptas u', 0, 0, 10, 1, 1, 1, 1),\n",
" (103, 'Unde ullam mollitia? Nu', 0, 0, 10, 1, 1, 1, 1),\n",
" (104, 'Neque molestias qu', 0, 0, 10, 1, 1, 1, 1),\n",
" (105, 'Officiis es', 0, 0, 10, 1, 1, 1, 1),\n",
" (106, 'Beatae mi', 0, 0, 10, 1, 1, 1, 1),\n",
" (107, 'Mollitia nam corporis temp', 0, 0, | |
self.cmbSegments.clear()
seasonOn = 1
if (self.cmbSeries.currentIndex() != 0):
series = self.cmbSeries.currentText()
episodes = self.d.getEpisodesBySeries(series)
for segment in self.d.getSegments():
name = segment[0]
length = segment[1]
if (name == 'Sleep'):
toAdd = name + " - " + str(length / 60) + " hours"
else:
toAdd = name + " - " + str(length) + " mins"
self.cmbSegments.addItem(toAdd)
self.cmbSegments.addItem('--------------------')
for episode in episodes:
season = episode[2]
episodeNo = episode[3]
name = episode[4]
length = episode[5]
if (season != seasonOn):
self.cmbSegments.addItem('--------------------')
seasonOn += 1
if (name == "Season " + str(season) + " Episode " + str(episodeNo)):
name = ""
else:
name = ": " + name
toAdd = series + " - S" + str(season) + "E" + str(episodeNo) + name + " - " + str(length) + " mins"
self.cmbSegments.addItem(toAdd)
else:
self.cmbSegments.addItem('Select a segment...')
def save(self):
name = self.edtName.text()
chosenSeries = (self.cmbSeries.currentIndex() != 0)
addedSegment = (self.lstSegments.count() > 0)
addedEpisode = self.episodeAdded()
enteredName = (len(name) > 0)
if not chosenSeries:
QtWidgets.QMessageBox.warning(self, 'Choose a series!', 'Choose a series!',
QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.Ok)
if not addedSegment:
QtWidgets.QMessageBox.warning(self, 'Choose at least one segment!', 'Choose at least one segment!',
QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.Ok)
if not addedEpisode:
QtWidgets.QMessageBox.warning(self, 'Choose at least one episode!', 'Choose at least one episode!',
QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.Ok)
if not enteredName:
QtWidgets.QMessageBox.warning(self, 'Enter a namet!', 'Enter a name!',
QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.Ok)
if chosenSeries and addedSegment and addedEpisode and enteredName:
foodLength = self.lstFood.count()
addedFood = (foodLength != 0)
foodContinue = addedFood
if not foodContinue:
yesButton = 16384
result = QtWidgets.QMessageBox.warning(self, 'Add food?', 'No food added! Continue?',
QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Yes,
QtWidgets.QMessageBox.No)
foodContinue = (result == yesButton)
if foodContinue:
series = self.cmbSeries.currentText()
length = self.lblLength.text().split(':')[1].lstrip()
cost = ''.join(c for c in self.lblCost.text().split(':')[1].lstrip() if c.isnumeric() or c == '.')
bingeID = self.d.saveBinge(series, name, length, cost, 'Normal')
for i in range(self.lstSegments.count()):
segment = self.lstSegments.item(i).text()
split = segment.split('-')
if (len(split) == 3):
segName = segment.split(':')[0]
else:
segName = segment.split('-')[0].rstrip()
segLength = int(segment.split()[-2])
self.d.saveBingeSegment(bingeID, segName, segLength)
for i in range(self.lstFood.count()):
food = self.lstFood.item(i).text()
split = food.split()
item = split[0]
shop = split[2]
price = ''.join(c for c in split[4] if c.isnumeric() or c == '.')
self.d.saveBingeShop(bingeID, item, shop, price)
QtWidgets.QMessageBox.information(self, 'Saved!', 'Saved!',
QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.Ok)
self.edtName.setText('')
self.cmbSeries.clear()
self.cmbSeries.addItem('Select a series...')
self.cmbSegments.clear()
self.cmbSegments.addItem('Select a segment...')
self.lstSegments.clear()
self.lstFood.clear()
self.updateLabels()
self.edtName.setFocus()
def back(self):
l.addBinge.close()
l.launchMenu()
def keyPressEvent(self, event):
ENTER_KEY = 16777220
if (event.key() == ENTER_KEY):
self.save()
else:
super().keyPressEvent(event)
class ViewBinge(QtWidgets.QMainWindow, viewBingeUi):
def __init__(self, parent=None):
QtWidgets.QMainWindow.__init__(self, parent)
self.setupUi(self)
self.d = Databases()
self.bIDs = []
for series in self.d.getSeriesNames():
self.cmbSeries.addItem(series)
self.cmbSeries.currentIndexChanged.connect(self.findBinges)
self.btnGetDetails.clicked.connect(self.getBingeDetails)
self.btnBack.clicked.connect(self.back)
self.updateLabels()
def updateLabels(self):
if (self.lstSegments.count() == 0):
self.lblLength.setText("Estimated Length: 0 Days 0 Hours 0 Mins")
self.lblLength.repaint()
else:
self.lblLength.setText("Estimated Length: " + self.getCurrentLength())
self.lblLength.repaint()
if (self.lstFood.count() == 0):
self.lblCost.setText("Estimated Cost: " + self.d.symbol + "0.00")
self.lblCost.repaint()
else:
self.lblCost.setText("Estimated Cost: " + self.d.symbol + self.getCurrentCost())
self.lblCost.repaint()
def getCurrentCost(self):
cost = 0.0
for i in range(self.lstFood.count()):
cost += float(''.join(c for c in self.lstFood.item(i).text().split('-')[-1] if c.isnumeric() or c == '.'))
return '{0:,.2f}'.format(cost)
def getCurrentLength(self):
length = 0
minsInHour = 60
hoursInDay = 24
minsInDay = minsInHour * hoursInDay
for i in range(self.lstSegments.count()):
temp = self.lstSegments.item(i).text().split('-')[-1].lstrip()
mult = 1
if (temp.split()[1].lstrip().rstrip() == 'hours'):
mult = 60
length += math.ceil(float(temp.split()[0]) * mult)
days = math.floor(length / minsInDay)
length -= (days * minsInDay)
hours = math.floor(length / minsInHour)
length -= (hours * minsInHour)
mins = length
result = str(days) + " Days " + str(hours) + " Hours " + str(mins) + " Mins"
return result
def getBingeDetails(self):
name = self.cmbBinges.currentText()
if (name != 'Select a binge...'):
self.lstSegments.clear()
self.lstFood.clear()
bingeID = self.bIDs[self.cmbBinges.currentIndex() - 1]
segments = self.d.getSegmentsByBingeID(bingeID)
food = self.d.getFoodByBingeID(bingeID)
for segment in segments:
segType = segment[3]
if segType == 'Episode':
epTitle = segment[1]
split = epTitle.split()
series = split[0]
season = int(split[2].split('S')[1].split('E')[0])
episode = int(split[2].split('E')[1])
data = self.d.getEpisode(series, season, episode)
name = data[0]
length = data[1]
if (name == "Season " + str(season) + " Episode " + str(episode)):
name = ""
else:
name = ": " + name
toAdd = series + " - S" + str(season) + "E" + str(episode) + name + " - " + str(length) + " mins"
else:
name = segment[1]
if name == 'Sleep':
toAdd = name + " - " + str(segment[2] / 60) + " hours"
else:
toAdd = name + " - " + str(segment[2]) + " mins"
self.lstSegments.addItem(toAdd)
for item in food:
name = item[1]
shop = item[2]
cost = item[3]
toAdd = name + " - " + shop + " - " + self.d.getSymbol() + '{0:,.2f}'.format(float(cost))
self.lstFood.addItem(toAdd)
self.updateLabels()
def findBinges(self):
self.cmbBinges.clear()
self.bIDs = []
if (self.cmbSeries.currentIndex() != 0):
series = self.cmbSeries.currentText()
bingeNames = self.d.getBingeNamesBySeries(series)
if (len(bingeNames) > 0):
for name in bingeNames:
self.bIDs.append(name[0])
self.cmbBinges.addItem(name[1])
else:
self.cmbBinges.addItem('Select a binge...')
else:
self.cmbBinges.addItem('Select a binge...')
self.lstSegments.clear()
self.lstFood.clear()
self.updateLabels()
def back(self):
l.viewBinge.close()
l.launchMenu()
class DesignBinge(QtWidgets.QMainWindow, designUi):
def __init__(self, parent=None):
QtWidgets.QMainWindow.__init__(self, parent)
self.setupUi(self)
self.d = Databases()
for series in self.d.getSeriesNames():
self.cmbSeries.addItem(series)
self.getFood()
self.btnBack.clicked.connect(self.back)
self.btnMake.clicked.connect(self.make)
self.btnAddFood.clicked.connect(self.addFood)
self.btnRemoveFood.clicked.connect(self.removeFood)
def getFood(self):
for food in self.d.getShoppingData():
name = food[0]
shop = food[1]
cost = food[2]
toAdd = name + " - " + shop + " - " + self.d.symbol + ('{0:,.2f}'.format(float(cost)))
self.cmbFood.addItem(toAdd)
def addFood(self):
if (self.cmbFood.currentIndex() != 0):
self.lstFood.addItem(self.cmbFood.currentText())
def removeFood(self):
listItems = self.lstFood.selectedItems()
if not listItems:
QtWidgets.QMessageBox.warning(self, 'Please select a food item!', 'Please select a food item!',
QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.Ok)
return
for item in listItems:
self.lstFood.takeItem(self.lstFood.row(item))
def make(self):
chosenSeries = (self.cmbSeries.currentIndex() != 0)
name = self.edtName.text()
validName = (len(name) > 0)
breakView = self.spnTimeWithoutBreak.value()
breakLength = self.spnBreakLength.value()
withoutSleep = self.spnTimeWithoutSleep.value()
sleepLength = self.spnSleepLength.value()
validBreakTime = (breakView > 0)
validBreakLength = (breakLength > 0)
validSleepTime = (withoutSleep > 0)
validSleepLength = (sleepLength > 0)
if not chosenSeries:
QtWidgets.QMessageBox.warning(self, 'Choose a series!', 'Choose a series!',
QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.Ok)
if not validName:
QtWidgets.QMessageBox.warning(self, 'Enter a name!', 'Enter a name!',
QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.Ok)
if not validBreakTime:
QtWidgets.QMessageBox.warning(self, 'Select a time without a break!', 'Select a time without a break!',
QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.Ok)
if not validBreakLength:
QtWidgets.QMessageBox.warning(self, 'Select a break length!', 'Select a break length!',
QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.Ok)
if not validSleepTime:
QtWidgets.QMessageBox.warning(self, 'Select a time without sleep!', 'Select a time without sleep!',
QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.Ok)
if not validSleepLength:
QtWidgets.QMessageBox.warning(self, 'Select a sleep length!', 'Select a sleep length!',
QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.Ok)
if chosenSeries and validName and validBreakTime and validBreakLength and validSleepTime and validSleepLength:
foodLength = self.lstFood.count()
addedFood = (foodLength != 0)
foodContinue = addedFood
if not foodContinue:
yesButton = 16384
result = QtWidgets.QMessageBox.warning(self, 'Add food?', 'No food added! Continue?',
QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
foodContinue = (result == yesButton)
if foodContinue:
series = self.cmbSeries.currentText()
minLength = self.d.getSeriesMinLength(series)
minsInHour = 60
breakTot = (math.floor(minLength / breakView) * breakLength)
breakTot += math.ceil((math.floor(minLength / (withoutSleep * minsInHour)) * (sleepLength * minsInHour)))
length = minLength + breakTot
cost = 0.0
for i in range(self.lstFood.count()):
foodItem = self.lstFood.item(i).text()
cost += float(''.join(c for c in foodItem.split()[4] if c.isnumeric() or c == '.'))
bingeID = self.d.saveBinge(series, name, length, cost, 'Design')
for i in range(self.lstFood.count()):
foodItem = self.lstFood.item(i).text()
split = foodItem.split()
name = split[0]
shop = split[2]
cost = ''.join(c for c in split[4] if c.isnumeric() or c == '.')
self.d.saveBingeShop(bingeID, name, shop, cost)
episodes = self.d.getEpisodesBySeries(series)
segments = []
episodeCount = 0
done = False
timeSinceBreak = 0
timeSinceSleep = 0
self.d.saveSegment('Sleep', sleepLength * minsInHour)
self.d.saveSegment('Break', breakLength)
while not done:
episode = episodes[episodeCount]
season = episode[2]
episodeNo = episode[3]
name = episode[4]
length = episode[5]
if (name == "Season " + str(season) + " Episode " + str(episodeNo)):
name = ""
else:
name = ": " + name
toAdd = series + " - S" + str(season) + "E" + str(episodeNo) + name + " - " + str(length) + " mins"
segments.append(toAdd)
timeSinceBreak += length
timeSinceSleep += length
if episodeCount < len(episodes):
if timeSinceSleep >= (withoutSleep * minsInHour):
toAdd = "Sleep - " + str(sleepLength) + " hours"
timeSinceSleep = 0
timeSinceBreak = 0
segments.append(toAdd)
elif timeSinceBreak >= breakView:
toAdd = "Break - " + str(breakLength) + " mins"
timeSinceBreak = 0
segments.append(toAdd)
episodeCount += 1
if (episodeCount == len(episodes)):
done | |
<gh_stars>10-100
import glob
import math
import os.path as osp
import pickle
import random
import gin
import numpy as np
import torch
import torch.nn.functional as F
from absl import flags
from skimage.measure import regionprops
from torch.utils.data import Dataset
from . import image_utils
from . import transformations
curr_path = osp.dirname(osp.abspath(__file__))
cache_path = osp.join(curr_path, '..', 'misc', 'cachedir')
opts = flags.FLAGS
def quaternion_from_matrix(matrix, isprecise=False):
"""Return quaternion from rotation matrix.
If isprecise is True, the input matrix is assumed to be a precise rotation
matrix and a faster algorithm is used.
"""
M = np.array(matrix, dtype=np.float64, copy=False)[:4, :4]
if isprecise:
q = np.empty((4,))
t = np.trace(M)
if t > M[3, 3]:
q[0] = t
q[3] = M[1, 0] - M[0, 1]
q[2] = M[0, 2] - M[2, 0]
q[1] = M[2, 1] - M[1, 2]
else:
i, j, k = 0, 1, 2
if M[1, 1] > M[0, 0]:
i, j, k = 1, 2, 0
if M[2, 2] > M[i, i]:
i, j, k = 2, 0, 1
t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
q[i] = t
q[j] = M[i, j] + M[j, i]
q[k] = M[k, i] + M[i, k]
q[3] = M[k, j] - M[j, k]
q = q[[3, 0, 1, 2]]
q *= 0.5 / math.sqrt(t * M[3, 3])
else:
m00 = M[0, 0]
m01 = M[0, 1]
m02 = M[0, 2]
m10 = M[1, 0]
m11 = M[1, 1]
m12 = M[1, 2]
m20 = M[2, 0]
m21 = M[2, 1]
m22 = M[2, 2]
# symmetric matrix K
K = np.array([[m00 - m11 - m22, 0.0, 0.0, 0.0],
[m01 + m10, m11 - m00 - m22, 0.0, 0.0],
[m02 + m20, m12 + m21, m22 - m00 - m11, 0.0],
[m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22]])
K /= 3.0
# quaternion is eigenvector of K that corresponds to largest eigenvalue
w, V = np.linalg.eigh(K)
q = V[[3, 0, 1, 2], np.argmax(w)]
if q[0] < 0.0:
np.negative(q, q)
return q
@gin.configurable
class TigDogDataset_MultiFrame(Dataset):
"""TigDog dataset."""
def __init__(self, root, category, sample_to_vid, samples_per_vid, num_frames=2, transforms=None, normalize=True,
remove_neck_kp=True, split='train', img_size=256, mirror=False, scale=True, crop=True, offset=5,
padding_frac=0.05, tight_bboxes=True, v2_crop=True, sequential=False):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
print('Num frames:', num_frames)
self.mirror = mirror
self.sequential = sequential
self.tight_bboxes = tight_bboxes
self.v2_crop = v2_crop
self.category = category
self.root_dir = root
self.num_frames = num_frames
self.normalize = normalize
self.file_paths = glob.glob(self.root_dir + category + '/*.pkl')
self.file_paths.sort()
self.file_paths = np.array(self.file_paths)
self.transforms = transforms
self.scale = scale
self.crop = crop
self.split = split
self.img_size = img_size
self.sample_to_vid = sample_to_vid
self.samples_per_vid = samples_per_vid
# neck keypoint is inconsistent wrt to orientation
self.remove_neck_kp = remove_neck_kp
self.offset = 3
if category == 'cow':
self.kp_perm = np.array([1, 0, 2, 4, 3, 5, 6, 7, 9, 8, 11, 10, 13, 12, 15, 14])
else:
self.kp_perm = np.array([2, 1, 3, 5, 4, 7, 6, 8, 10, 9, 12, 11, 14, 13, 16, 15, 18, 17, 19]) - 1
self.jitter_frac = 0
self.padding_frac = padding_frac
if transforms is not None:
self.transform = transformations.RandomAffine(scale=(0.8, 1.05), translate=(0.05, 0.05), resample=3)
def __len__(self):
return len(self.file_paths)
def centralize(self, img1, img2):
rgb_mean = torch.cat((img1, img2), 2)
rgb_mean = rgb_mean.view(rgb_mean.shape[0], 3, -1).mean(2)
rgb_mean = rgb_mean.view(rgb_mean.shape[0], 3, 1, 1)
return img1 - rgb_mean, img2 - rgb_mean, rgb_mean
def __getitem__(self, idx_loader):
file_path = self.root_dir + self.category + '/' + str(idx_loader) + '.pkl'
idx_loader = file_path.split('/')[-1]
idx_loader = int(idx_loader.replace('.pkl', ''))
vid = self.sample_to_vid[idx_loader]
samples = self.samples_per_vid[vid].copy()
if self.sequential:
frames = [idx_loader]
if self.num_frames > 1:
frames += [min(idx_loader + 1, len(samples) + samples[0] - 1)]
else:
idx_loader_pose = samples.index(idx_loader)
of_left = max(idx_loader_pose - self.offset - 1, 0)
of_right = min(idx_loader_pose + self.offset - 1, len(samples))
samples = samples[of_left:of_right]
samples.remove(idx_loader)
frames = [idx_loader]
if self.num_frames > 1:
frames += random.sample(samples, self.num_frames - 1)
frames.sort()
images = []
segmentations = []
bboxes = []
landmarks = []
sfm_poses = []
for f in frames:
sample = pickle.load(open(self.root_dir + self.category + '/' + str(f) + '.pkl', 'rb'))
images.append(sample['video'])
segmentations.append(sample['segmentations'])
bboxes.append(sample['bboxes'])
landmarks.append(sample['landmarks'])
sfm_poses.append(sample['sfm_poses'])
images = np.array(images)
segmentations = np.array(segmentations)
bboxes = np.array(bboxes)
landmarks = np.array(landmarks)
sfm_poses = np.array(sfm_poses)
if self.tight_bboxes:
bboxes_new = []
for s in segmentations:
rp = regionprops(s.astype(np.uint8))
bbox = rp[0].bbox
bboxes_new.append(bbox)
bboxes = np.array(bboxes_new)
bboxes_ = bboxes.copy()
bboxes[:, 0] = bboxes[:, 1]
bboxes[:, 1] = bboxes_[:, 0]
bboxes[:, 2] = bboxes[:, 3]
bboxes[:, 3] = bboxes_[:, 2]
bboxes = image_utils.peturb_bbox(
np.array(bboxes), pf=self.padding_frac, jf=0)
bboxes = image_utils.square_bbox(bboxes)
if self.crop:
# crop image around bbox, translate kps
images, segmentations, landmarks, sfm_poses = self.crop_image(images, segmentations, bboxes,
landmarks, sfm_poses, self.v2_crop)
if self.scale:
# scale image, and mask. And scale kps.
images, segmentations, landmarks, sfm_poses = self.scale_image(images, segmentations,
landmarks, sfm_poses)
# Mirror image on random.
mirror_flag = torch.zeros(images.shape[0])
if self.mirror:
images, segmentations, landmarks, sfm_poses, mirror_flag = self.mirror_image(images,
segmentations,
landmarks,
sfm_poses)
transform_params = np.zeros((images.shape[0], 4))
transform_params[:, 0] = 1 # default scale is 1
if self.transforms:
images, segmentations, landmarks, transform_params = self.transform(images, segmentations, landmarks)
optical_flows = np.zeros((images.shape[0], images.shape[1], images.shape[2], 2), dtype=np.float32)
# Normalize kp to be [-1, 1]
if self.normalize:
img_h, img_w = images.shape[1:3]
landmarks, sfm_poses, optical_flows = self.normalize_kp(landmarks, sfm_poses, optical_flows, img_h, img_w)
if self.remove_neck_kp:
landmarks = landmarks[:, :-1]
sample = {'img': images.transpose(0, 3, 1, 2).astype(np.float32), 'kp': landmarks.astype(np.float32),
'mask': segmentations.astype(np.float32), 'sfm_pose': sfm_poses, 'optical_flows': optical_flows,
'frames_idx': frames, 'mirror_flag': mirror_flag, 'transforms': transform_params.astype(np.float32)}
return sample
def mirror_image(self, images, segmentations_pred, landmarks, sfm_poses):
kp_perm = self.kp_perm
flag = torch.zeros(images.shape[0])
if torch.rand(1) > 0.5:
flag = torch.ones(images.shape[0])
# Need copy bc torch collate doesnt like neg strides
images_flip = images[:, :, ::-1, :].copy()
segmentations_pred_flip = segmentations_pred[:, :, ::-1].copy()
# Flip kps.
new_x = images.shape[2] - landmarks[:, :, 0] - 1
kp_flip = np.concatenate((new_x[:, :, None], landmarks[:, :, 1:]), axis=-1)
kp_flip = kp_flip[:, kp_perm, :]
# Flip sfm_pose Rot.
sfm_poses_flip = sfm_poses.copy()
for sfm_pose in sfm_poses_flip:
R = transformations.quaternion_matrix(sfm_pose[3:])
flip_R = np.diag([-1, 1, 1, 1]).dot(R.dot(np.diag([-1, 1, 1, 1])))
sfm_pose[3:] = transformations.quaternion_from_matrix(flip_R, isprecise=True)
# Flip tx
tx = -1 * sfm_pose[1]
sfm_pose[1] = tx
return images_flip, segmentations_pred_flip, kp_flip, sfm_poses_flip, flag
else:
return images, segmentations_pred, landmarks, sfm_poses, flag
def crop_image(self, images, segmentations, bboxes_pred, landmarks, sfm_poses, v2_crop=True):
# crop image and mask and translate kps
images_new, segmentations_new, landmarks_new, sfm_poses_new, optical_flows_new = [], [], [], [], []
for img, mask, bbox, landmark, sfm_pose in zip(images, segmentations,
bboxes_pred, landmarks,
sfm_poses):
if v2_crop:
landmark = image_utils.crop_landmarks(landmark, img, bbox, img.shape)
img = image_utils.crop(img, bbox, bgval=1, mode='img')
mask = image_utils.crop(mask[..., None], bbox, bgval=0, mode=None)[..., 0]
else:
img = image_utils.crop_v2(img, bbox, bgval=1)
mask = image_utils.crop_v2(mask[..., None], bbox, bgval=0)[..., 0]
landmark[:, 0] -= bbox[0].astype(int)
landmark[:, 1] -= bbox[1].astype(int)
vis = (landmark[:, 0] > 0) & (landmark[:, 1] > 0)
landmark[..., 2] = vis
images_new.append(img)
segmentations_new.append(mask)
landmarks_new.append(landmark)
sfm_poses_new.append(sfm_pose)
return images_new, segmentations_new, landmarks_new, sfm_poses_new
def scale_image(self, images, segmentations_pred, landmarks, sfm_poses):
# Scale image so largest bbox size is img_size
images_new, segmentations_pred_new, landmarks_new, sfm_poses_new, optical_flows_new = [], [], [], [], []
for img, mask, landmark, sfm_pose in zip(images, segmentations_pred, landmarks,
sfm_poses):
bwidth = np.shape(img)[0]
bheight = np.shape(img)[1]
scale = self.img_size / float(max(bwidth, bheight))
img_scale, _ = resize_img(img, scale)
vis = landmark[:, 2] > 0
mask_scale, _ = resize_img(mask.astype(np.float32), scale)
mask_scale = mask_scale.astype(np.bool)
landmark[vis, :2] = np.round(landmark[vis, :2].astype(np.float32) * scale)
images_new.append(img_scale)
segmentations_pred_new.append(mask_scale)
landmarks_new.append(landmark)
sfm_poses_new.append(sfm_pose)
return np.array(images_new), np.array(segmentations_pred_new), np.array(landmarks_new), \
np.array(sfm_poses_new)
def normalize_kp(self, landmarks, sfm_poses, optical_flows, img_h, img_w):
sfm_poses_new = sfm_poses.copy()
kp = landmarks[:, :, :2]
vis_kp = landmarks[:, :, 2][..., None]
new_kp = np.stack([2 * (kp[:, :, 0] / img_w) - 1,
2 * (kp[:, :, 1] / img_h) - 1]).transpose(1, 2, 0)
new_landmarks = np.concatenate((vis_kp * new_kp, vis_kp), axis=-1)
return new_landmarks, sfm_poses_new, optical_flows
import cv2
def resize_img(img, scale_factor):
new_size = (np.round(np.array(img.shape[:2]) * scale_factor)).astype(int)
new_img = cv2.resize(img, (new_size[1], new_size[0]))
# This is scale factor of [height, width] i.e. [y, x]
actual_factor = [new_size[0] / float(img.shape[0]),
new_size[1] / float(img.shape[1])]
return new_img, actual_factor
# a simple custom collate function, just to show the idea
def TigDog_collate(batch):
# find max number of frames
min_f = | |
<gh_stars>0
import os
import sys
import time
import lxml.etree
import Bcfg2.Server
import Bcfg2.Server.Plugin
from mock import Mock, MagicMock, patch
# add all parent testsuite directories to sys.path to allow (most)
# relative imports in python 2.4
path = os.path.dirname(__file__)
while path != "/":
if os.path.basename(path).lower().startswith("test"):
sys.path.append(path)
if os.path.basename(path) == "testsuite":
break
path = os.path.dirname(path)
from common import XI_NAMESPACE, XI, inPy3k, call, builtins, u, can_skip, \
skip, skipIf, skipUnless, Bcfg2TestCase, DBModelTestCase, syncdb, \
patchIf, datastore
from Bcfg2.Server.Plugins.Probes import *
from TestPlugin import TestEntrySet, TestProbing, TestConnector, \
TestDatabaseBacked
# test data for JSON and YAML tests
test_data = dict(a=1, b=[1, 2, 3], c="test")
class FakeList(list):
pass
class TestProbesDB(DBModelTestCase):
if has_django:
models = [ProbesGroupsModel, ProbesDataModel]
class TestClientProbeDataSet(Bcfg2TestCase):
def test__init(self):
ds = ClientProbeDataSet()
self.assertLessEqual(ds.timestamp, time.time())
self.assertIsInstance(ds, dict)
self.assertNotIn("timestamp", ds)
ds = ClientProbeDataSet(timestamp=123)
self.assertEqual(ds.timestamp, 123)
self.assertNotIn("timestamp", ds)
class TestProbeData(Bcfg2TestCase):
def test_str(self):
# a value that is not valid XML, JSON, or YAML
val = "'test"
# test string behavior
data = ProbeData(val)
self.assertIsInstance(data, str)
self.assertEqual(data, val)
# test 1.2.0-1.2.2 broken behavior
self.assertEqual(data.data, val)
# test that formatted data accessors return None
self.assertIsNone(data.xdata)
self.assertIsNone(data.yaml)
self.assertIsNone(data.json)
def test_xdata(self):
xdata = lxml.etree.Element("test")
lxml.etree.SubElement(xdata, "test2")
data = ProbeData(lxml.etree.tostring(xdata,
xml_declaration=False).decode('UTF-8'))
self.assertIsNotNone(data.xdata)
self.assertIsNotNone(data.xdata.find("test2"))
@skipUnless(has_json, "JSON libraries not found, skipping JSON tests")
def test_json(self):
jdata = json.dumps(test_data)
data = ProbeData(jdata)
self.assertIsNotNone(data.json)
self.assertItemsEqual(test_data, data.json)
@skipUnless(has_yaml, "YAML libraries not found, skipping YAML tests")
def test_yaml(self):
jdata = yaml.dump(test_data)
data = ProbeData(jdata)
self.assertIsNotNone(data.yaml)
self.assertItemsEqual(test_data, data.yaml)
class TestProbeSet(TestEntrySet):
test_obj = ProbeSet
basenames = ["test", "_test", "test-test"]
ignore = ["foo~", ".#foo", ".foo.swp", ".foo.swx", "probed.xml"]
bogus_names = ["test.py"]
def get_obj(self, path=datastore, fam=None, encoding=None,
plugin_name="Probes", basename=None):
# get_obj() accepts the basename argument, accepted by the
# parent get_obj() method, and just throws it away, since
# ProbeSet uses a regex for the "basename"
if fam is None:
fam = Mock()
rv = self.test_obj(path, fam, encoding, plugin_name)
rv.entry_type = MagicMock()
return rv
def test__init(self):
fam = Mock()
ps = self.get_obj(fam=fam)
self.assertEqual(ps.plugin_name, "Probes")
fam.AddMonitor.assert_called_with(datastore, ps)
TestEntrySet.test__init(self)
def test_HandleEvent(self):
ps = self.get_obj()
ps.handle_event = Mock()
# test that events on the data store itself are skipped
evt = Mock()
evt.filename = datastore
ps.HandleEvent(evt)
self.assertFalse(ps.handle_event.called)
# test that events on probed.xml are skipped
evt.reset_mock()
evt.filename = "probed.xml"
ps.HandleEvent(evt)
self.assertFalse(ps.handle_event.called)
# test that other events are processed appropriately
evt.reset_mock()
evt.filename = "fooprobe"
ps.HandleEvent(evt)
ps.handle_event.assert_called_with(evt)
@patch("%s.list" % builtins, FakeList)
def test_get_probe_data(self):
ps = self.get_obj()
# build some fairly complex test data for this. in the end,
# we want the probe data to include only the most specific
# version of a given probe, and by basename only, not full
# (specific) name. We don't fully test the specificity stuff,
# we just check to make sure sort() is called and trust that
# sort() does the right thing on Specificity objects. (I.e.,
# trust that Specificity is well-tested. Hah!) We also test
# to make sure the interpreter is determined correctly.
ps.get_matching = Mock()
matching = FakeList()
matching.sort = Mock()
p1 = Mock()
p1.specific = Bcfg2.Server.Plugin.Specificity(group=True, prio=10)
p1.name = "fooprobe.G10_foogroup"
p1.data = """#!/bin/bash
group-specific"""
matching.append(p1)
p2 = Mock()
p2.specific = Bcfg2.Server.Plugin.Specificity(all=True)
p2.name = "fooprobe"
p2.data = "#!/bin/bash"
matching.append(p2)
p3 = Mock()
p3.specific = Bcfg2.Server.Plugin.Specificity(all=True)
p3.name = "barprobe"
p3.data = "#! /usr/bin/env python"
matching.append(p3)
p4 = Mock()
p4.specific = Bcfg2.Server.Plugin.Specificity(all=True)
p4.name = "bazprobe"
p4.data = ""
matching.append(p4)
ps.get_matching.return_value = matching
metadata = Mock()
pdata = ps.get_probe_data(metadata)
ps.get_matching.assert_called_with(metadata)
# we can't create a matching operator.attrgetter object, and I
# don't feel the need to mock that out -- this is a good
# enough check
self.assertTrue(matching.sort.called)
self.assertEqual(len(pdata), 3,
"Found: %s" % [p.get("name") for p in pdata])
for probe in pdata:
if probe.get("name") == "fooprobe":
self.assertIn("group-specific", probe.text)
self.assertEqual(probe.get("interpreter"), "/bin/bash")
elif probe.get("name") == "barprobe":
self.assertEqual(probe.get("interpreter"),
"/usr/bin/env python")
elif probe.get("name") == "bazprobe":
self.assertIsNotNone(probe.get("interpreter"))
else:
assert False, "Strange probe found in get_probe_data() return"
class TestProbes(TestProbing, TestConnector, TestDatabaseBacked):
test_obj = Probes
def get_test_probedata(self):
test_xdata = lxml.etree.Element("test")
lxml.etree.SubElement(test_xdata, "test", foo="foo")
rv = dict()
rv["foo.example.com"] = ClientProbeDataSet(timestamp=time.time())
rv["foo.example.com"]["xml"] = \
ProbeData(lxml.etree.tostring(test_xdata,
xml_declaration=False).decode('UTF-8'))
rv["foo.example.com"]["text"] = ProbeData("freeform text")
rv["foo.example.com"]["multiline"] = ProbeData("""multiple
lines
of
freeform
text
""")
rv["bar.example.com"] = ClientProbeDataSet(timestamp=time.time())
rv["bar.example.com"]["empty"] = ProbeData("")
if has_yaml:
rv["bar.example.com"]["yaml"] = ProbeData(yaml.dump(test_data))
if has_json:
rv["bar.example.com"]["json"] = ProbeData(json.dumps(test_data))
return rv
def get_test_cgroups(self):
return {"foo.example.com": ["group", "group with spaces",
"group-with-dashes"],
"bar.example.com": []}
def get_probes_object(self, use_db=False, load_data=None):
core = Mock()
core.setup.cfp.getboolean = Mock()
core.setup.cfp.getboolean.return_value = use_db
if load_data is None:
load_data = MagicMock()
# we have to patch load_data() in a funny way because
# different versions of Mock have different scopes for
# patching. in some versions, a patch applied to
# get_probes_object() would only apply to that function, while
# in others it would also apply to the calling function (e.g.,
# test__init(), which relies on being able to check the calls
# of load_data(), and thus on load_data() being consistently
# mocked)
@patch("Bcfg2.Server.Plugins.Probes.Probes.load_data", new=load_data)
def inner():
return Probes(core, datastore)
return inner()
def test__init(self):
mock_load_data = Mock()
probes = self.get_probes_object(load_data=mock_load_data)
probes.core.fam.AddMonitor.assert_called_with(os.path.join(datastore,
probes.name),
probes.probes)
mock_load_data.assert_any_call()
self.assertEqual(probes.probedata, ClientProbeDataSet())
self.assertEqual(probes.cgroups, dict())
@patch("Bcfg2.Server.Plugins.Probes.Probes.load_data", Mock())
def test__use_db(self):
probes = self.get_probes_object()
self.assertFalse(probes._use_db)
probes.core.setup.cfp.getboolean.assert_called_with("probes",
"use_database",
default=False)
@skipUnless(has_django, "Django not found, skipping")
@patch("Bcfg2.Server.Plugins.Probes.Probes._write_data_db", Mock())
@patch("Bcfg2.Server.Plugins.Probes.Probes._write_data_xml", Mock())
def test_write_data_xml(self):
probes = self.get_probes_object(use_db=False)
probes.write_data("test")
probes._write_data_xml.assert_called_with("test")
self.assertFalse(probes._write_data_db.called)
@skipUnless(has_django, "Django not found, skipping")
@patch("Bcfg2.Server.Plugins.Probes.Probes._write_data_db", Mock())
@patch("Bcfg2.Server.Plugins.Probes.Probes._write_data_xml", Mock())
def test_write_data_db(self):
probes = self.get_probes_object(use_db=True)
probes.write_data("test")
probes._write_data_db.assert_called_with("test")
self.assertFalse(probes._write_data_xml.called)
@patch("%s.open" % builtins)
def test__write_data_xml(self, mock_open):
probes = self.get_probes_object(use_db=False)
probes.probedata = self.get_test_probedata()
probes.cgroups = self.get_test_cgroups()
probes._write_data_xml(None)
mock_open.assert_called_with(os.path.join(datastore, probes.name,
"probed.xml"), "w")
data = lxml.etree.XML(mock_open.return_value.write.call_args[0][0])
self.assertEqual(len(data.xpath("//Client")), 2)
foodata = data.find("Client[@<EMAIL>='<EMAIL>']")
self.assertIsNotNone(foodata)
self.assertIsNotNone(foodata.get("timestamp"))
self.assertEqual(len(foodata.findall("Probe")),
len(probes.probedata['foo.example.com']))
self.assertEqual(len(foodata.findall("Group")),
len(probes.cgroups['foo.example.com']))
xml = foodata.find("Probe[@name='xml']")
self.assertIsNotNone(xml)
self.assertIsNotNone(xml.get("value"))
xdata = lxml.etree.XML(xml.get("value"))
self.assertIsNotNone(xdata)
self.assertIsNotNone(xdata.find("test"))
self.assertEqual(xdata.find("test").get("foo"), "foo")
text = foodata.find("Probe[@name='text']")
self.assertIsNotNone(text)
self.assertIsNotNone(text.get("value"))
multiline = foodata.find("Probe[@name='multiline']")
self.assertIsNotNone(multiline)
self.assertIsNotNone(multiline.get("value"))
self.assertGreater(len(multiline.get("value").splitlines()), 1)
bardata = data.find("Client[<EMAIL>='<EMAIL>']")
self.assertIsNotNone(bardata)
self.assertIsNotNone(bardata.get("timestamp"))
self.assertEqual(len(bardata.findall("Probe")),
len(probes.probedata['bar.example.com']))
self.assertEqual(len(bardata.findall("Group")),
len(probes.cgroups['bar.example.com']))
empty = bardata.find("Probe[@name='empty']")
self.assertIsNotNone(empty)
self.assertIsNotNone(empty.get("value"))
self.assertEqual(empty.get("value"), "")
if has_yaml:
ydata = bardata.find("Probe[@name='yaml']")
self.assertIsNotNone(ydata)
self.assertIsNotNone(ydata.get("value"))
self.assertItemsEqual(test_data, yaml.load(ydata.get("value")))
if has_json:
jdata = bardata.find("Probe[@name='json']")
self.assertIsNotNone(jdata)
self.assertIsNotNone(jdata.get("value"))
self.assertItemsEqual(test_data, json.loads(jdata.get("value")))
@skipUnless(has_django, "Django not found, skipping")
def test__write_data_db(self):
syncdb(TestProbesDB)
probes = self.get_probes_object(use_db=True)
probes.probedata = self.get_test_probedata()
probes.cgroups = self.get_test_cgroups()
for cname in ["foo.example.com", "bar.example.com"]:
client = Mock()
client.hostname = cname
probes._write_data_db(client)
pdata = ProbesDataModel.objects.filter(hostname=cname).all()
self.assertEqual(len(pdata), len(probes.probedata[cname]))
for probe in pdata:
self.assertEqual(probe.hostname, client.hostname)
self.assertIsNotNone(probe.data)
if probe.probe == "xml":
xdata = lxml.etree.XML(probe.data)
self.assertIsNotNone(xdata)
self.assertIsNotNone(xdata.find("test"))
self.assertEqual(xdata.find("test").get("foo"), "foo")
elif probe.probe == "text":
pass
elif probe.probe == "multiline":
self.assertGreater(len(probe.data.splitlines()), 1)
elif probe.probe == "empty":
self.assertEqual(probe.data, "")
elif probe.probe == "yaml":
self.assertItemsEqual(test_data, yaml.load(probe.data))
elif probe.probe == "json":
self.assertItemsEqual(test_data, json.loads(probe.data))
else:
assert False, "Strange probe found in _write_data_db data"
pgroups = ProbesGroupsModel.objects.filter(hostname=cname).all()
self.assertEqual(len(pgroups), len(probes.cgroups[cname]))
# test that old probe data is removed properly
cname = 'foo.example.com'
del probes.probedata[cname]['text']
probes.cgroups[cname].pop()
client = Mock()
client.hostname = cname
probes._write_data_db(client)
pdata = ProbesDataModel.objects.filter(hostname=cname).all()
self.assertEqual(len(pdata), len(probes.probedata[cname]))
pgroups = ProbesGroupsModel.objects.filter(hostname=cname).all()
self.assertEqual(len(pgroups), len(probes.cgroups[cname]))
@skipUnless(has_django, "Django not found, skipping")
@patch("Bcfg2.Server.Plugins.Probes.Probes._load_data_db", Mock())
@patch("Bcfg2.Server.Plugins.Probes.Probes._load_data_xml", Mock())
def test_load_data_xml(self):
probes = self.get_probes_object(use_db=False)
probes.load_data()
probes._load_data_xml.assert_any_call()
self.assertFalse(probes._load_data_db.called)
@skipUnless(has_django, "Django not found, skipping")
@patch("Bcfg2.Server.Plugins.Probes.Probes._load_data_db", Mock())
@patch("Bcfg2.Server.Plugins.Probes.Probes._load_data_xml", Mock())
def test_load_data_db(self):
probes = self.get_probes_object(use_db=True)
probes.load_data()
probes._load_data_db.assert_any_call()
self.assertFalse(probes._load_data_xml.called)
@patch("%s.open" % builtins)
@patch("lxml.etree.parse")
def test__load_data_xml(self, mock_parse, mock_open):
probes = self.get_probes_object(use_db=False)
# to get the value for lxml.etree.parse to parse, we call
# _write_data_xml, mock the open() call, and grab the data
# that gets "written" to probed.xml
probes.probedata = self.get_test_probedata()
probes.cgroups = self.get_test_cgroups()
probes._write_data_xml(None)
xdata = \
lxml.etree.XML(str(mock_open.return_value.write.call_args[0][0]))
mock_parse.return_value = xdata.getroottree()
probes.probedata = dict()
probes.cgroups = dict()
probes._load_data_xml()
mock_parse.assert_called_with(os.path.join(datastore, probes.name,
'probed.xml'),
parser=Bcfg2.Server.XMLParser)
self.assertItemsEqual(probes.probedata, self.get_test_probedata())
self.assertItemsEqual(probes.cgroups, self.get_test_cgroups())
@skipUnless(has_django, "Django not found, skipping")
def test__load_data_db(self):
syncdb(TestProbesDB)
probes = self.get_probes_object(use_db=True)
probes.probedata = self.get_test_probedata()
probes.cgroups = self.get_test_cgroups()
for cname in probes.probedata.keys():
client = Mock()
client.hostname = cname
probes._write_data_db(client)
probes.probedata = dict()
probes.cgroups = dict()
probes._load_data_db()
self.assertItemsEqual(probes.probedata, self.get_test_probedata())
# the db backend does not store groups at all if a client has
# no groups set, so we can't just use assertItemsEqual here,
# because loading saved data may _not_ result in the original
# data if some clients had no groups set.
test_cgroups = self.get_test_cgroups()
for cname, groups in test_cgroups.items():
if cname in probes.cgroups:
self.assertEqual(groups, probes.cgroups[cname])
else:
self.assertEqual(groups, [])
@patch("Bcfg2.Server.Plugins.Probes.ProbeSet.get_probe_data")
def test_GetProbes(self, mock_get_probe_data):
probes = self.get_probes_object()
metadata = Mock()
probes.GetProbes(metadata)
mock_get_probe_data.assert_called_with(metadata)
@patch("Bcfg2.Server.Plugins.Probes.Probes.write_data")
@patch("Bcfg2.Server.Plugins.Probes.Probes.ReceiveDataItem")
def test_ReceiveData(self, mock_ReceiveDataItem, mock_write_data):
# we use a simple (read: bogus) datalist here to make this
# easy to test
datalist = | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
import os,sys
import re
import inspect
import codecs
import cpp_header_parser as chp
import glob
import json
import fnmatch
#import CppToPyUtil #
import copy
######
def get_chp(filename,declspec_list=[]):
result=None
try:
result=chp.CppHeader(filename,declspec_list=declspec_list)
except chp.CppParseError as e:
print(e)
result=None
return result
######
########################
def has_keywords_for_ptr(name,keywords_for_ptr,ignore_keywords_for_ptr):
return ( is_any_of_words_in_str(keywords_for_ptr, name) ) or (not is_any_of_words_in_str(ignore_keywords_for_ptr, name))
def is_any_of_words_in_str(words=[],s=''):
for w in words:
if w in s:
return True
return False
def is_all_of_words_in_str(words=[],s=''):
for w in words:
if not w in s:
return False
return True
######
def get_files_by_pattern(directory='./',pattern='*'):
filepattern=("%s/%s") % (directory,pattern)
return glob.glob(filepattern)
def get_filenames_by_pattern(directory='./',pattern='*'):
files=get_files_by_pattern(directory,pattern)
if files is None or len(files)==0:
return None
filenames=[]
for f in files:
filename=f.split('/')[-1]
filename=filename.split('\\')[-1]
filenames.append(filename)
return filenames
def get_subdirs_by_pattern(directory='./',pattern='*'):
dirpattern=("%s/%s") % (directory,pattern)
subdirs=glob.glob(dirpattern)
results=[]
for d in subdirs:
if os.path.isdir(d):
results.append(d)
return results
def get_sub_name_for_path_or_file(fullname):
name=fullname.replace('\\','/')
items=name.split('/')
return items[-1]
def get_upper_dir_for_path_or_file(fullname):
name=fullname.replace('\\','/')
items=name.split('/')
return '/'.join(items[0:-1])
def get_files_by_pattern_r(directory='./',pattern='*'):
results=[]
for dirpath,dirnames,files in os.walk(directory):
for f in fnmatch.filter(files,pattern):
results.append(os.path.join(dirpath,f))
return results
def get_subdirs_r(directory='./',pattern='*'):
results=[]
for dirpath,dirnames,files in os.walk(directory):
for d in fnmatch.filter(dirnames,pattern):
results.append(os.path.join(dirpath,d))
return results
######################
def convert_tiledb_enum_file(inputfile='../../TileDB/tiledb/sm/c_api/tiledb_enum.h',outputfile='./tiledb_enum.h'):
outfile=open(outputfile,'w')
with codecs.open(inputfile,'r') as f:
lines=f.readlines()
isenum_block=False
for line in lines:
line=line.replace('\n','')
if "#ifdef" in line and 'TILEDB_' in line and '_ENUM' in line:
isenum_block=True
items=line.split()
typename=items[1].replace('ENUM','t').lower()
if typename=='tiledb_object_type_t':
typename='tiledb_object_t'
outfile.write('enum %s{ //%s\n' % (typename,line))
elif '#endif' in line:
outfile.write('};//%s\n' % (line))
isenum_block=False
elif isenum_block and ('(' in line) and (')' in line) and ('TILEDB_' in line) and ('_ENUM' in line):
items=line.split('=')
value_part=items[-1].replace('\n','')
name_part=items[0].split('(')[-1].split(')')[0]
outfile.write(' TILEDB_%s = %s //%s\n' % (name_part,value_part,line) )
else:
outfile.write('%s\n' % (line))
outfile.close()
return
######
def get_default_cfginfo():
cfginfo={"dir":"../cpp/src/tiledb/cxx_api", "namespaces":["tiledb"],"outputdir":"../cpp/src/tiledb_pybind","ignore_file_keywords":["XTAppBase","XTConfig","XTTimer","XTSingleton","XTStartExit"]}
ignore_method_keywords=['compareBar','operator','std::thread','boost::array','boost::signals2','boost::unordered','std::unordered','tbb::concurrent_','sf::contfree_safe_ptr']
ignore_method_keywords.extend(["emplace","ConcurrentQueue"])
# ignore_method_keywords.extend(['std::vector<StringMapPtr'])
ignore_method_keywords.extend(['std::list','std::set','std::deque','std::map<int, int'])
# ignore_method_keywords.extend(['static T GCD','static T RoundUp','T fromString','const T'])
# ignore_method_keywords.extend(['google::protobuf::RepeatedField','google::protobuf::Map'])
# ignore_method_keywords.extend(['default_instance','New(','Swap(','CopyFrom(','MergeFrom(','ivec('])
ignore_method_keywords.extend(['time_t'])
##ignore_method_keywords.extend(['Ptr','_ptr'])
# ignore_method_keywords.extend(['getApi()','md()','trd()'])
# ignore_method_keywords.extend(['toRaw','fromRaw','strOfRaw','csvStrOfRaw','processOn','RawPtr'])
ignore_method_keywords.extend(['FILE','ptr()','std::function','std::vector<tiledb::Dimension>','attributes()','std::map<std::string,tiledb::Attribute>','arrow::Array','arrow::Table','arrow::Schema']) # for tiledb
ignore_method_keywords.extend(['tiledb_array_t ','tiledb_buffer_t','tiledb_buffer_list_t','tiledb_config_t','tiledb_config_iter_t'])#for tiledb
ignore_method_keywords.extend(['tiledb_ctx_t','tiledb_error_t','tiledb_attribute_t','tiledb_array_schema_t','tiledb_dimension_t','tiledb_query_condition_t']) #for tiledb
ignore_method_keywords.extend(['tiledb_domain_t','tiledb_filter_t ','tiledb_filter_list_t','submit_async','tiledb_query_status_t'])#,'tiledb_query_t']) #for tiledb
ignore_method_keywords.extend(['tiledb_vfs_t','tiledb_vfs_fh_t','const void','const void*','void *','std::pair<T,','ConfigIter','cell_num'])#for tiledb
cfginfo['ignore_method_keywords']=ignore_method_keywords
#
ignore_ptr_keywords=['Enum','Util','Comparator','Mgr','CThostFtdc']
cfginfo['ignore_ptr_keywords']=ignore_ptr_keywords
ptr_keywords=['MgrData','CfgData']
cfginfo['ptr_keywords']=ptr_keywords
ignore_class_keywords=[]
cfginfo['ignore_class_keywords']=ignore_class_keywords
cfginfo['outputdir']="./temp"
cfginfo['include_str']='#include "common_include.h" \n\n'
return cfginfo
def get_file_cfginfos_for_dir_cfginfo(dir_cfginfo):
result=[]
dirname=dir_cfginfo.get("dir","./")
default_cfginfo=get_default_cfginfo()
ns=dir_cfginfo.get("namespaces",['tiledb'])
outputdir=dir_cfginfo.get('outputdir',"./temp")
hfilepattern=dir_cfginfo.get('hfilepattern','*.h')
#
hfiles=get_files_by_pattern(dirname,hfilepattern)
if hfiles is None or len(hfiles)==0:
return ""
hfiles=[f.replace("\\","/") for f in hfiles]
for hfile in hfiles:
if is_any_of_words_in_str(dir_cfginfo['ignore_file_keywords'],hfile):
linfo='ignore file:%s' % (hfile)
print(linfo)
continue
else:
linfo='start to process %s' % (hfile)
print(linfo)
file_cfginfo=copy.deepcopy(default_cfginfo)
file_cfginfo.update(dir_cfginfo)
file_cfginfo['filename']=hfile
result.append(file_cfginfo)
return result
#########
def get_pybind_for_file_cfginfo(file_cfginfo):
dd={}
## base_cfginfo=get_base_cfginfo()
if file_cfginfo is None:
return dd
if not "filename" in file_cfginfo:
return dd
#
ignore_ptr_keywords=file_cfginfo.get('ignore_ptr_keywords',[])
ptr_keywords=file_cfginfo.get('ptr_keywords',[])
ignore_class_keywords=file_cfginfo.get('ignore_class_keywords',[])
ignore_method_keywords=file_cfginfo.get('ignore_method_keywords',[])
#
outputdir=file_cfginfo.get('outputdir','./temp')
filename=file_cfginfo['filename']
tempitems=filename.split('/')
full_filename=tempitems[-1]
tempitems=tempitems[-1].split('.')
tempitem=tempitems[0]
stem_filename=tempitem
protobuf_inttypes=["google::protobuf::int32","google::protobuf::uint32","google::protobuf::int64","google::protobuf::uint64"]
ns=['tiledb']
if 'namespaces' in file_cfginfo:
ns=file_cfginfo['namespaces']
ns0=''
if len(ns)>0:
ns0=ns[0]
include_str_hfile='#pragma once\n'
DEF_STR='%s_PYBIND_%s_H' % (ns0,stem_filename)
DEF_STR=DEF_STR.upper()
include_str_hfile='%s#ifndef %s\n' % (include_str_hfile,DEF_STR)
include_str_hfile='%s#define %s\n\n' % (include_str_hfile,DEF_STR)
include_str_hfile='%s%s' % (include_str_hfile,file_cfginfo.get("include_str",""))
include_str = '#include "%s"' % (full_filename)
include_str_hfile='%s\n%s\n' % (include_str_hfile,include_str)
cppheader=get_chp(filename) #parse head file
if cppheader is None:
linfo='can not parse hfile:%s' % (filename)
print(linfo)
return dd
if 'pb.h' in cppheader.headerFileName and 'Data' in cppheader.headerFileName:
tempitems=filename.split('/')
typedef_file='typedef_' + tempitems[-1]
# include_typedef_file_str='/'.join(tempitems)
include_str='%s\n#include "%s"\n' % (include_str,typedef_file)
include_str_hfile='%s\n#include "%s"\n' % (include_str_hfile, typedef_file)
enumtype_dict={}
for enumt in cppheader.enums:
if enumt['name'].endswith('_enumtype'):
tempname=enumt['name'][:-9]
enumtype_dict[tempname]=enumt
class_str_dict={}
class_str=''
enum_str=''
class_str_hfile=''
class_str_cppfile=''
enum_str_hfile=''
enum_str_cppfile=''
init_module_str=''
for e in cppheader.enums:
enum_str='%s\tpy::enum_<%s::%s>(m,"%s")\n' % (enum_str,e['namespace'],e['name'],e['name'])
enum_str_hfile='%svoid init_%s_%s(pybind11::module& m);\n' % (enum_str_hfile,e['namespace'],e['name'])
enum_str_cppfile='%svoid init_%s_%s(pybind11::module& m) {\n' % (enum_str_cppfile,e['namespace'],e['name'])
enum_str_cppfile='%s\tpybind11::enum_<%s::%s>(m,"%s")\n' % (enum_str_cppfile,e['namespace'],e['name'],e['name'])
init_module_str='%s\tinit_%s_%s(m);\n' % (init_module_str,e['namespace'],e['name'])
for ev in e['values']:
enum_name=ev['name']
enum_str='%s\t\t.value("%s", %s::%s)\n' % (enum_str, enum_name, e['namespace'], e['name'])
enum_str_cppfile='%s\t\t.value("%s", %s::%s)\n' % (enum_str_cppfile, enum_name, e['namespace'], ev['name'])
enum_str='%s\t\t.export_values();\n\n' % (enum_str)
enum_str_cppfile='%s\t\t.export_values();\n\n' % (enum_str_cppfile)
enum_str_cppfile='%s}\n\n' % (enum_str_cppfile)
for ck in cppheader.classes.keys():
c=cppheader.classes[ck]
if not c['namespace'] in ns:
linfo='ignore class namespace %s::%s' % (c['namespace'],c['name'])
print(linfo)
continue
if is_any_of_words_in_str(ignore_class_keywords,ck):
linfo='ignore class keywords %s::%s' % (c['namespace'],c['name'])
print(linfo)
continue
linfo='start to process class %s::%s' % (c['namespace'],c['name'])
if ck in enumtype_dict.keys():
# enum_str='%s\tpy::class_<%s> %s(m,"%s");\n\n' % (enum_str,ck,ck.lower(),ck)
# enum_str='%s\tpy::enum_<%s::%s>(%s,"%s")\n' % (enum_str,ck,ck+"_enumtype",ck.lower(),ck)
enum_str='%s\tpy::enum_<%s::%s>(m,"%s")\n' % (enum_str,c['namespace'],ck+"_enumtype",ck)
enum_str_hfile='%svoid init_%s_%s(pybind11::module& m);\n' % (enum_str_hfile,c['namespace'],ck)
enum_str_cppfile='%svoid init_%s_%s(pybind11::module& m) {\n' % (enum_str_cppfile,c['namespace'],ck)
enum_str_cppfile='%s\tpybind11::enum_<%s::%s>(m,"%s")\n' % (enum_str_cppfile,c['namespace'],ck+"_enumtype",ck)
init_module_str='%s\tinit_%s_%s(m);\n' % (init_module_str,c['namespace'],ck)
enumt=enumtype_dict[ck]
for enumvalue in enumt['values']:
enum_name=enumvalue['name']
# enum_str='%s\t\t.value("%s", %s::%s::%s)\n' % (enum_str,enum_name.replace(ck+"_enumtype_",""),ck,ck+"_enumtype",enumvalue['name'])
enum_str='%s\t\t.value("%s_%s", %s::%s)\n' % (enum_str,ck, enum_name.replace(ck+"_enumtype_",""), c['namespace'], enumvalue['name'])
enum_str_cppfile='%s\t\t.value("%s_%s", %s::%s)\n' % (enum_str_cppfile,ck, enum_name.replace(ck+"_enumtype_",""), c['namespace'], enumvalue['name'])
enum_str='%s\t\t.export_values();\n\n' % (enum_str)
enum_str_cppfile='%s\t\t.export_values();\n\n' % (enum_str_cppfile)
enum_str_cppfile='%s}\n\n' % (enum_str_cppfile)
continue #ignore class for enum type
if is_any_of_words_in_str(ignore_ptr_keywords,ck) or ck.endswith("Util") or ck.endswith("Type"):
class_str='%s\tpy::class_<%s::%s>(m,"%s")\n' % (class_str,c['namespace'],c['name'],c['name'])
class_str_hfile='%s\nvoid init_%s_%s(pybind11::module& m);\n' % (class_str_hfile,c['namespace'],ck)
class_str_cppfile='%s\nvoid init_%s_%s(pybind11::module& m) {\n' % (class_str_cppfile,c['namespace'],ck)
class_str_cppfile='%s\tpybind11::class_<%s::%s>(m,"%s")\n' % (class_str_cppfile,c['namespace'],c['name'],c['name'])
init_module_str='%s\tinit_%s_%s(m);\n' % (init_module_str,c['namespace'],ck)
elif ck.endswith("Mgr") or ck.endswith("Mask") or ck.endswith("Status") or ck.endswith("TypeHelper"):
class_str='%s\tpy::class_<%s::%s>(m,"%s")\n' % (class_str,c['namespace'],c['name'],c['name'])
class_str_hfile='%s\nvoid init_%s_%s(pybind11::module& m);\n' % (class_str_hfile,c['namespace'],ck)
class_str_cppfile='%s\nvoid init_%s_%s(pybind11::module& m) {\n' % (class_str_cppfile,c['namespace'],ck)
class_str_cppfile='%s\tpybind11::class_<%s::%s>(m,"%s")\n' % (class_str_cppfile,c['namespace'],c['name'],c['name'])
init_module_str='%s\tinit_%s_%s(m);\n' % (init_module_str,c['namespace'],ck)
else:
class_str='%s\tpy::class_<%s::%s, std::shared_ptr<%s::%s> >(m,"%s")\n' % (class_str,c['namespace'],c['name'],c['namespace'],c['name'],c['name'])#class_str='%s\tpy::class_<%s::%s, std::shared_ptr<%s::%s> >(m,"%s")\n' % (class_str,c['namespace'],c['name'],c['namespace'],c['name'],c['name'])
class_str_hfile='%s\nvoid init_%s_%s(pybind11::module& m);\n' % (class_str_hfile,c['namespace'],ck)
class_str_cppfile='%s\nvoid init_%s_%s(pybind11::module& m) {\n' % (class_str_cppfile,c['namespace'],ck)
class_str_cppfile='%s\tpybind11::class_<%s::%s, std::shared_ptr<%s::%s> >(m,"%s")\n' % (class_str_cppfile,c['namespace'],c['name'],c['namespace'],c['name'],c['name'])#class_str_cppfile='%s\tpybind11::class_<%s::%s, std::shared_ptr<%s::%s> >(m,"%s")\n' % (class_str_cppfile,c['namespace'],c['name'],c['namespace'],c['name'],c['name'])
init_module_str='%s\tinit_%s_%s(m);\n' % (init_module_str,c['namespace'],ck)
# class_str='%s\t\t.def(py::init<>())\n' % (class_str)
# class_str_cppfile='%s\t\t.def(pybind11::init<>())\n' % (class_str_cppfile)
cm_count={}
for cm in c['methods']['public']:
cmname=cm['name']
cm_count[cmname]=cm_count.get(cmname,0)+1
for cm in c['methods']['public']:
cmname=cm['name']
linestr_api=''
linestr_pb=''
linestr_call='('
namespace_prefix='%s::' % (c['namespace'])
return_type=cm['rtnType']
if "static" in return_type:
method_cast='static_cast<%s (*)(' % (return_type.replace("static",""))
else:
method_cast='(%s (%s::%s::*)(' % (return_type,c['namespace'],ck)
# return_type=CommonUtil.add_prefix_for_words_in_str(namespace_data_type_keywords,return_type,namespace_prefix)
if cm['virtual']:
linestr_api=linestr_api+"virtual "
linestr_pb=linestr_pb+"virtual "
linestr_api=linestr_api+ return_type #cm['rtnType']
linestr_api='\t%s %s(' % (linestr_api,cm['name'])
linestr_pb=linestr_pb + return_type #cm['rtnType']
linestr_pb='\t%s %s(' % (linestr_pb, cm['name'])
nParams=len(cm['parameters'])
pcount=0
for p in cm['parameters']:
if pcount>0:
linestr_api="%s," % (linestr_api)
linestr_pb="%s," % (linestr_pb)
linestr_call='%s,' % (linestr_call)
method_cast='%s,' % (method_cast)
param_type=p['type']
# param_type=CommonUtil.add_prefix_for_words_in_str(namespace_data_type_keywords,param_type,namespace_prefix)
linestr_api="%s %s %s" % (linestr_api,param_type,p['name'])
linestr_call='%s%s' % (linestr_call,p['name'])
method_cast='%s%s' % (method_cast,p['type'])
pcount=pcount+1
linestr_api='%s)' % (linestr_api)
linestr_api=linestr_api.replace("const::google::protobuf::Map", "const ::google::protobuf::Map")
linestr_pb='%s)' % (linestr_pb)
linestr_call='%s)' % (linestr_call)
if cm['const']:
if 'static' in return_type or ('static' in cm.keys() and cm['static']):
method_cast='%s) const >' % (method_cast)
else:
method_cast='%s) const)' % (method_cast)
else:
if 'static' in return_type or ('static' in cm.keys() and cm['static']):
method_cast='%s)>' % (method_cast)
else:
method_cast='%s))' % (method_cast)
######
nParams=len(cm['parameters'])
pcount=0
param_str=''
orig_param_str=''
for p in cm['parameters']:
if pcount==0:
orig_param_str=p['type']
else:
orig_param_str='%s,%s' % (orig_param_str,p['type'])
param_str='%s, py::arg("%s")' % (param_str,p['name'])
pcount=pcount+1
if cm['constructor']:
linestr_api=linestr_api.replace('void','')
linestr_pb=linestr_pb.replace('void','')
elif cm['destructor']:
linestr_api=linestr_api.replace('void','')
linestr_pb=linestr_pb.replace('void','')
if not '~' in linestr_api:
linestr_api=linestr_api.replace(c['name'], ('~%s' % (c['name'])) )
if not '~' in linestr_pb:
linestr_pb=linestr_pb.replace(c['name'], ('~%s' % (c['name']) ) )
if cmname.startswith('mutable_') or cmname.startswith('release_') or cmname.startswith('set_allocated_'):
ignore_line='//ignore %s\n' %(linestr_api) #cmname.startswith('has_') or cmname.startswith('clear_') or
class_str='%s%s' % (class_str,ignore_line)
class_str_cppfile='%s%s' % (class_str_cppfile,ignore_line)
elif cmname.startswith('set_') and len(cm['parameters'])==1 and (cm['parameters'][0]['type']=='const char *' or cm['parameters'][0]['type']=='char const *') :
ignore_line='//ignore %s\n' %(linestr_api)
class_str="%s%s" % (class_str,ignore_line)
class_str_cppfile="%s%s" % (class_str_cppfile,ignore_line)
elif cmname.startswith('set_') and len(cm['parameters'])==2 and (cm['parameters'][0]['type']=='const char *' or cm['parameters'][0]['type']=='char const *') and cm['parameters'][1]['type']=='size_t':
ignore_line='//ignore_constchar %s\n' %(linestr_api)
class_str="%s%s" % (class_str,ignore_line)
class_str_cppfile="%s%s" % (class_str_cppfile,ignore_line)
elif is_any_of_words_in_str(ignore_method_keywords, linestr_api): ## ('operator' in linestr_api): # and cm['constructor']:
ignore_line='//ignore_keywords %s\n' %(linestr_api)
class_str="%s%s" % (class_str,ignore_line)
class_str_cppfile="%s%s" % (class_str_cppfile,ignore_line)
elif c['namespace']=='tiledb' and 'Type' in linestr_api:
ignore_line='//ignore_Type %s\n' %(linestr_api)
class_str="%s%s" % (class_str,ignore_line)
class_str_cppfile="%s%s" % (class_str_cppfile,ignore_line)
elif cm['constructor']: #and 'const ' in linestr_api and c['name'] in linestr_api: #ignore copy constructor
if cm.get('deleted',False):
ignore_line='//ignore_constructor_deleted %s\n' %(linestr_api)
class_str="%s%s" % (class_str,ignore_line)
class_str_cppfile="%s%s" % (class_str_cppfile,ignore_line)
elif c['name'].endswith('Util') or c['name'].endswith('Mgr'):
ignore_line='//ignore_constructor_staticclass %s\n' %(linestr_api)
class_str="%s%s" % (class_str,ignore_line)
class_str_cppfile="%s%s" % (class_str_cppfile,ignore_line)
else:
class_str='%s\t\t.def(py::init<%s>())\n' % (class_str,orig_param_str) #class_str="%s%s" % (class_str,ignore_line)
class_str_cppfile='%s\t\t.def(py::init<%s>())\n' % (class_str_cppfile,orig_param_str)#class_str_cppfile="%s%s" % (class_str_cppfile,ignore_line)
elif cm['destructor']: #and 'const ' in linestr_api and c['name'] in linestr_api: #ignore copy constructor
ignore_line='//ignore_destructor %s\n' %(linestr_api)
class_str="%s%s" % (class_str,ignore_line)
class_str_cppfile="%s%s" % (class_str_cppfile,ignore_line)
# elif 'boost::shared_ptr' in cm['rtnType']:
# ignore_line='//ignore %s\n' %(linestr_api)
# class_str="%s%s" % (class_str,ignore_line)
elif ('typename' in str(cm.get('template',''))) or ('class' in str(cm.get('template',''))):
ignore_line='//ignore_templatefunction %s\n' %(linestr_api)
class_str="%s%s" % (class_str,ignore_line)
class_str_cppfile="%s%s" % (class_str_cppfile,ignore_line)
elif ("google::protobuf" in linestr_api and (not is_any_of_words_in_str(protobuf_inttypes,linestr_api)) ) or ( "::google::protobuf::Map" in linestr_api):
ignore_line='//ignore %s\n' %(linestr_api)
class_str="%s%s" % (class_str,ignore_line)
class_str_cppfile="%s%s" % (class_str_cppfile,ignore_line)
else:
if 'static' in cm['rtnType'] or ('static' in cm.keys() and cm['static']):
temp_param_str=orig_param_str
if len(temp_param_str)>0 and temp_param_str[0]==',':
temp_param_str=temp_param_str.replace(',','',1)
if cm_count.get(cmname,0) > 1: # cmname.startswith("set_") and nParams==1 and "string" in method_cast:
class_str='%s\t\t.def_static("%s", static_cast< %s (*)(%s) >(&%s::%s::%s))\n' % (class_str,cmname,cm['rtnType'],temp_param_str, c['namespace'],ck,cmname)#class_str='%s\t\t.def_static("%s", %s(&%s::%s::%s)%s)\n' % (class_str,cmname,method_cast, c['namespace'],ck,cmname,param_str)
class_str_cppfile='%s\t\t.def_static("%s", static_cast< %s (*)(%s) >(&%s::%s::%s))\n' % (class_str_cppfile,cmname,cm['rtnType'],temp_param_str, c['namespace'],ck,cmname)#class_str_cppfile='%s\t\t.def_static("%s", %s(&%s::%s::%s)%s)\n' % (class_str_cppfile,cmname,method_cast, c['namespace'],ck,cmname,param_str)
elif 'py::args' in linestr_api or 'py::kwargs' in linestr_api:
class_str='%s\t\t.def_static("%s", &%s::%s::%s)\n' % (class_str,cmname,c['namespace'],ck,cmname )
class_str_cppfile='%s\t\t.def_static("%s", &%s::%s::%s)\n' % (class_str_cppfile,cmname,c['namespace'],ck,cmname )
else:
class_str='%s\t\t.def_static("%s", &%s::%s::%s%s)\n' % (class_str,cmname,c['namespace'],ck,cmname,param_str)
class_str_cppfile='%s\t\t.def_static("%s", &%s::%s::%s%s)\n' % (class_str_cppfile,cmname,c['namespace'],ck,cmname,param_str)
else:
if cm_count.get(cmname,0) > 1: # cmname.startswith("set_") and nParams==1 and "string" in method_cast:
class_str='%s\t\t.def("%s", %s(&%s::%s::%s)%s)\n' % (class_str,cmname,method_cast,c['namespace'], ck,cmname,param_str)
class_str_cppfile='%s\t\t.def("%s", %s(&%s::%s::%s)%s)\n' % (class_str_cppfile,cmname,method_cast,c['namespace'], ck,cmname,param_str)
elif 'py::args' in linestr_api or 'py::kwargs' in linestr_api:
class_str='%s\t\t.def("%s", &%s::%s::%s)\n' % (class_str,cmname,c['namespace'],ck,cmname )
class_str_cppfile='%s\t\t.def("%s", &%s::%s::%s)\n' % (class_str_cppfile,cmname,c['namespace'],ck,cmname )
else:
class_str='%s\t\t.def("%s", &%s::%s::%s,%s)\n' % (class_str,cmname,c['namespace'],ck,cmname,param_str)
class_str_cppfile='%s\t\t.def("%s", &%s::%s::%s%s)\n' % (class_str_cppfile,cmname,c['namespace'],ck,cmname,param_str)
class_str='%s\t\t;\n\n' % (class_str)
class_str_cppfile='%s\t\t;\n\n' % (class_str_cppfile)
class_str_cppfile='%s}\n\n' % (class_str_cppfile)
##for ck
| |
# This module contains functions required to produce the figures associated with Formulation_I in
# Wild et al. (in review)
from __future__ import division # This ensures result of quotient of two integers will be a float, not an integer.
import sys
import pysedsim
import numpy as np
import processing_reference_set
from data_processing import Import_Simulation_Output
from data_processing import Total_Storage_Capacity
from pysedsim_plotting import Single_Time_Series_Value_Plotting
from pysedsim_plotting import Probability_Plot
from matplotlib import pyplot
import copy
import matplotlib.lines as mlines
# Import reference set files from formulation 2, so that the same color scheme and objective can be used for the
# colors of the rule curves from formulation 1.
def determine_rule_curve_colors(formulation_name, base_formulation_name, color_obj_num, color_obj_num_base,
policies=None, main_file_loc=None):
'''
Determines color array to use for plotting rule curves (formulation I) or policies (formulation II or III),
based upon a colormap that uses a range from the reference set for a base case.
:param formulation_name: indicates formulation being plotted. required, string, options: 'formulation_1',
'formulation_2', 'formulation_3'
:param base_formulation_name: indicates reference set to be used to define base colormap. required, string,
:param policies: optional, indicates row numbers (policies) from formulation_name for which colors should be
returned.
:param color_obj_num_base: objective number (column number) in reference set file that has no decision variable
values, for the formulation used to define the base color.
:param color_obj_num: objective number (column number) in reference set file that has no decision variable
values, for the formulation being plotted.
options: 'formulation_1', 'formulation_2', 'formulation_3'.
:return: color_values: array of values to be used as plotting colors
'''
# Load reference set that contains objective that will be used as the base color map
reference_set_path_no_vars = r'E:\Publications in prep\Paper 2\Results\Output_Storage\Sambor_SA_' + \
base_formulation_name + r'\sets\pysedsim_ref_set_no_vars.ref'
ref_set_no_vars = np.loadtxt(reference_set_path_no_vars)# reference set for formulation II (use to obtain flow values for coloring)
ref_set_no_vars = (-1)*ref_set_no_vars # reformat reference set
max_q = np.max(ref_set_no_vars[:,color_obj_num_base]) # Max objective (flow) values for objective to be colored, for formulation II
min_q = np.min(ref_set_no_vars[:,color_obj_num_base]) # Min objective (flow) values for objective to be colored, for formulation II
if formulation_name == 'formulation_1':
# Find color values based on performance of three rule curve policies from formulation I
flow_val_rule_curves = np.zeros(3) # Array to store flow values
[Time_Series_Import_Dictionary, RETURNED_master, df3] = formulation_I_histogram(produce_plot = 'No',
main_file_loc = main_file_loc)
flow_val_rule_curves[0] = Time_Series_Import_Dictionary['rule_curve_1_sedmgmt_stochastic']['Bypass Channel 1'][
'Q_out'].resample('A', how='mean').mean().mean() # Mean annual flow value (m3/s), rule curve 1
flow_val_rule_curves[1] = Time_Series_Import_Dictionary['rule_curve_2_sedmgmt_stochastic']['Bypass Channel 1'][
'Q_out'].resample('A', how='mean').mean().mean() # Mean annual flow value (m3/s), rule curve 1
flow_val_rule_curves[2] = Time_Series_Import_Dictionary['rule_curve_3_sedmgmt_stochastic']['Bypass Channel 1'][
'Q_out'].resample('A', how='mean').mean().mean() # Mean annual flow value (m3/s), rule curve 1
norm_array = [((flow_val_rule_curves[i]-min_q)/(max_q-min_q)) for i in range(len(flow_val_rule_curves))] # Reset norm array values
# Load reference set for formulation of interest, so the objective values for the specified policy (row) numbers
# can be determined
elif formulation_name in ['formulation_2', 'formulation_3']:
# Find color values based on performance of provided policy numbers from formulation II or III
reference_set_path_no_vars = r'E:\Publications in prep\Paper 2\Results\Output_Storage\Sambor_SA_' + \
formulation_name + r'\sets\pysedsim_ref_set_no_vars.ref'
ref_set_no_vars = np.loadtxt(reference_set_path_no_vars)
ref_set_no_vars = (-1)*ref_set_no_vars
norm_array = [((ref_set_no_vars[i,color_obj_num]-min_q)/(max_q-min_q)) for i in policies] # Reset norm array values
# Find color values based on performance of three rule curve policies from formulation I
cmap = pyplot.cm.get_cmap("jet_r") # Use same color map used in other cases
color_values = cmap(norm_array) # Array storing line color values for the three policies
return color_values
def formulation_I_histogram(produce_plot = 'Yes', main_file_loc=None, var_sub_list=None):
# As this is simulation only (no optimization), we directly call the Probability_Plot() function in
# pysedsim_plotting, rather than using the Reevaluation() function in the processing_reference_set.py module.
# Specify scenarios (i.e., names of simulations corresponding to folders for which simulation outputs exist)
Sims_to_Import_1 = ["rule_curve_1_nosedmgmt_deterministic", "rule_curve_1_nosedmgmt_stochastic",
"rule_curve_2_nosedmgmt_deterministic", "rule_curve_2_nosedmgmt_stochastic",
"rule_curve_3_nosedmgmt_deterministic", "rule_curve_3_nosedmgmt_stochastic"]
Sims_to_Import_1A = ["rule_curve_1_sedmgmt_deterministic", "rule_curve_1_sedmgmt_stochastic",
"rule_curve_2_sedmgmt_deterministic", "rule_curve_2_sedmgmt_stochastic",
"rule_curve_3_sedmgmt_deterministic", "rule_curve_3_sedmgmt_stochastic"]
Sims_to_Import_2 = ["rule_curve_1_nosedmgmt_deterministic", "rule_curve_1_nosedmgmt_stochastic",
"rule_curve_2_nosedmgmt_deterministic", "rule_curve_2_nosedmgmt_stochastic",
"rule_curve_3_nosedmgmt_deterministic", "rule_curve_3_nosedmgmt_stochastic",
"rule_curve_1_sedmgmt_deterministic", "rule_curve_1_sedmgmt_stochastic",
"rule_curve_2_sedmgmt_deterministic", "rule_curve_2_sedmgmt_stochastic",
"rule_curve_3_sedmgmt_deterministic", "rule_curve_3_sedmgmt_stochastic"]
#Sims_to_Import = [[Sims_to_Import_1, Sims_to_Import_1, Sims_to_Import_1],
# [Sims_to_Import_1, Sims_to_Import_1A, Sims_to_Import_2]]
Sims_to_Import = [[Sims_to_Import_1, Sims_to_Import_1, Sims_to_Import_1],
[Sims_to_Import_1, Sims_to_Import_1, Sims_to_Import_1],
[Sims_to_Import_1A, Sims_to_Import_1A, Sims_to_Import_1A]
]
# System elements (i.e., reservoir, channel and junction names) for which data should be imported for the specified
# simulation scenarios.
Locations_to_Import_1 = {
"rule_curve_1_nosedmgmt_deterministic": ["Sambor SA"],
"rule_curve_1_nosedmgmt_stochastic": ["Sambor SA"],
"rule_curve_2_nosedmgmt_deterministic": ["Sambor SA"],
"rule_curve_2_nosedmgmt_stochastic": ["Sambor SA"],
"rule_curve_3_nosedmgmt_deterministic": ["Sambor SA"],
"rule_curve_3_nosedmgmt_stochastic": ["Sambor SA"]
}
Locations_to_Import_1A = {
"rule_curve_1_sedmgmt_deterministic": ["Sambor SA"],
"rule_curve_1_sedmgmt_stochastic": ["Sambor SA"],
"rule_curve_2_sedmgmt_deterministic": ["Sambor SA"],
"rule_curve_2_sedmgmt_stochastic": ["Sambor SA"],
"rule_curve_3_sedmgmt_deterministic": ["Sambor SA"],
"rule_curve_3_sedmgmt_stochastic": ["Sambor SA"]
}
Locations_to_Import_2 = {
"rule_curve_1_nosedmgmt_deterministic": ["Bypass Channel 1"],
"rule_curve_1_nosedmgmt_stochastic": ["Bypass Channel 1"],
"rule_curve_2_nosedmgmt_deterministic": ["Bypass Channel 1"],
"rule_curve_2_nosedmgmt_stochastic": ["Bypass Channel 1"],
"rule_curve_3_nosedmgmt_deterministic": ["Bypass Channel 1"],
"rule_curve_3_nosedmgmt_stochastic": ["Bypass Channel 1"]
}
Locations_to_Import_3 = {
"rule_curve_1_nosedmgmt_deterministic": ["Sambor SA"],
"rule_curve_1_nosedmgmt_stochastic": ["Sambor SA"],
"rule_curve_2_nosedmgmt_deterministic": ["Sambor SA"],
"rule_curve_2_nosedmgmt_stochastic": ["Sambor SA"],
"rule_curve_3_nosedmgmt_deterministic": ["Sambor SA"],
"rule_curve_3_nosedmgmt_stochastic": ["Sambor SA"],
"rule_curve_1_sedmgmt_deterministic": ["Sambor SA"],
"rule_curve_1_sedmgmt_stochastic": ["Sambor SA"],
"rule_curve_2_sedmgmt_deterministic": ["Sambor SA"],
"rule_curve_2_sedmgmt_stochastic": ["Sambor SA"],
"rule_curve_3_sedmgmt_deterministic": ["Sambor SA"],
"rule_curve_3_sedmgmt_stochastic": ["Sambor SA"]
}
Locations_to_Import_4 = {
"rule_curve_1_nosedmgmt_deterministic": ["Junction 4"],
"rule_curve_1_nosedmgmt_stochastic": ["Junction 4"],
"rule_curve_2_nosedmgmt_deterministic": ["Junction 4"],
"rule_curve_2_nosedmgmt_stochastic": ["Junction 4"],
"rule_curve_3_nosedmgmt_deterministic": ["Junction 4"],
"rule_curve_3_nosedmgmt_stochastic": ["Junction 4"],
"rule_curve_1_sedmgmt_deterministic": ["Junction 4"],
"rule_curve_1_sedmgmt_stochastic": ["Junction 4"],
"rule_curve_2_sedmgmt_deterministic": ["Junction 4"],
"rule_curve_2_sedmgmt_stochastic": ["Junction 4"],
"rule_curve_3_sedmgmt_deterministic": ["Junction 4"],
"rule_curve_3_sedmgmt_stochastic": ["Junction 4"]
}
Locations_to_Import_all = {
"rule_curve_1_nosedmgmt_deterministic": ["Sambor SA", "Junction 4", 'Bypass Channel 1'],
"rule_curve_1_nosedmgmt_stochastic": ["Sambor SA", "Junction 4", 'Bypass Channel 1'],
"rule_curve_2_nosedmgmt_deterministic": ["Sambor SA", "Junction 4", 'Bypass Channel 1'],
"rule_curve_2_nosedmgmt_stochastic": ["Sambor SA", "Junction 4", 'Bypass Channel 1'],
"rule_curve_3_nosedmgmt_deterministic": ["Sambor SA", "Junction 4", 'Bypass Channel 1'],
"rule_curve_3_nosedmgmt_stochastic": ["Sambor SA", "Junction 4", 'Bypass Channel 1'],
"rule_curve_1_sedmgmt_deterministic": ["Sambor SA", "Junction 4", 'Bypass Channel 1'],
"rule_curve_1_sedmgmt_stochastic": ["Sambor SA", "Junction 4", 'Bypass Channel 1'],
"rule_curve_2_sedmgmt_deterministic": ["Sambor SA", "Junction 4", 'Bypass Channel 1'],
"rule_curve_2_sedmgmt_stochastic": ["Sambor SA", "Junction 4", 'Bypass Channel 1'],
"rule_curve_3_sedmgmt_deterministic": ["Sambor SA", "Junction 4", 'Bypass Channel 1'],
"rule_curve_3_sedmgmt_stochastic": ["Sambor SA", "Junction 4", 'Bypass Channel 1']
}
Locations_to_Import = [[Locations_to_Import_1, Locations_to_Import_1, Locations_to_Import_2],
[Locations_to_Import_1, Locations_to_Import_1, Locations_to_Import_1],
[Locations_to_Import_1A, Locations_to_Import_1A, Locations_to_Import_1A]
] # _4
# Time series variables to be imported for the scenarios and system elements specified above.
if var_sub_list is None:
# Import default variable names
var_sub_list = ['Hydropower_avg_MWH', 'larv_surv', 'larv_pass', 'Q_out', 'Settled_mass', 'Settled_mass',
'SS_W_out', 'res_flushed_load', 'larv_mass_out_surv_total'] # res_flushed_load
else:
var_sub_list = var_sub_list
# Import all relevant data for all scenarios, system locations and variables listed above
[Time_Series_Import_Dictionary, Num_Realizations, Num_Years, TSID_key_list] = Import_Simulation_Output(Sims_to_Import_2,
Locations_to_Import_all,
var_sub_list,
main_file_loc)
# Loop through scenarios, locations, and variables and replace the deterministic ones with a single value so they can
# be plotted this way.
replace_deterministic_with_mean = 'Yes'
scenario_sub_list = [
'rule_curve_1_nosedmgmt_deterministic',
'rule_curve_1_sedmgmt_deterministic',
'rule_curve_2_nosedmgmt_deterministic',
'rule_curve_2_sedmgmt_deterministic',
'rule_curve_3_nosedmgmt_deterministic',
'rule_curve_3_sedmgmt_deterministic'
]
# Create a copy of Time_Series_Import_Dictionary to manipulate
tsid_copy = copy.deepcopy(Time_Series_Import_Dictionary)
tsid_copy_2 = copy.deepcopy(Time_Series_Import_Dictionary)
month_num_list = ['6'] #, '6', '7', '8'
month_num_list_ints = [int(i) for i in month_num_list]
if replace_deterministic_with_mean == 'Yes':
for scenario in scenario_sub_list:
for loc_key in tsid_copy[scenario].keys():
for var_key in tsid_copy[scenario][loc_key]:
# Replace all time series values with the mean value
if var_key in ['SS_W_out']:
i=int(month_num_list[0])-1
for mon in month_num_list:
i+=1
tsid_copy_2[scenario][loc_key][var_key]['Month'] = tsid_copy_2[scenario][loc_key][var_key][
'Realization1'].index.month
tsid_copy_2[scenario][loc_key][var_key][str(i)] = tsid_copy_2[scenario][
loc_key][var_key]['Realization1'][tsid_copy_2[scenario][loc_key][var_key].index.month==i]
mean_value = tsid_copy_2[scenario][loc_key][var_key][month_num_list].mean(axis=1).mean()
tsid_copy[scenario][loc_key][var_key]['Realization1'] = mean_value #put mean daily value in
# tsid_copy_2[scenario][loc_key][var_key]['mon_val'][tsid_copy_2[scenario][loc_key][var_key][
# 'mon_val'].index.month==i] = \
# tsid_copy_2[scenario][loc_key][var_key]['Realization1'][tsid_copy_2[scenario][loc_key][
# var_key][
#
# 'Realization1'].index.month==i].values
# tsid_copy_2[scenario][loc_key][var_key].index.month==i]['Realization1']
#tsid_copy_2[scenario][loc_key][var_key]['Realization1'].loc[] = tsid_copy_2[scenario][loc_key][
# var_key][
# tsid_copy_2[scenario][loc_key][var_key].index.month==i]['Realization1']
#tsid_copy_2[scenario][loc_key][var_key][
#'Realization1'].index.month
#tsid_copy_2[scenario][loc_key][var_key][mon] [tsid_copy_2.index.month==i].values
# Sediment load discharge is being computed monthly, so the average daily value being placed
# needs to be handled differently
#mean_5 =
#tsid_copy[scenario][loc_key][var_key].loc[
# (tsid_copy[scenario][loc_key][var_key].index.month == 2) & (
# tsid_copy[scenario][loc_key][var_key].index.day == 29)] = 0
#month_5_mean = tsid_copy[scenario][loc_key][var_key]['Realization1'].resample
#month_1_mean = tsid_copy[scenario][loc_key][var_key]['Realization1'].resample
#tsid_copy[scenario][loc_key][var_key]['Realization1'] = tsid_copy[scenario][loc_key][var_key][
# 'Realization1'].resample('M', how=sum)
else:
tsid_copy[scenario][loc_key][var_key]['Realization1'] = tsid_copy[scenario][loc_key][var_key][
'Realization1'].mean()
# Set any values on day 29 as == 0 for any variables that involve summing (e.g., energy)
if var_key in ['Hydropower_avg_MWH']:
tsid_copy[scenario][loc_key][var_key].loc[
(tsid_copy[scenario][loc_key][var_key].index.month == 2) & (
tsid_copy[scenario][loc_key][var_key].index.day == 29)] = 0
if produce_plot == 'Yes':
# Plot PDFs of performance of the four rule curve policies across six performance measures.
# Top 3 PDFs will be un-related to sediment so will show rule curves 1 and 2; bottom 3 will show rule curves 1-4
figure_storage_loc = r'E:\Publications in prep\Paper 2\Results\Figures and Tables\Figure Formulation 1 histogram'
num_pm_plots = [3, 3, 3] # number of performance measures plots to create. Each will be saved as | |
#!/usr/bin/env python
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from numpy import *
import pickle
import sys,os,string
from astropy.io import ascii
from myplotlib import PanelPlot
import config
import STANstats
import get_data
import sigfig
try:
import corner
except:
corner = None
def MAD(a):
'''return the median absolute deviation *1.48'''
return 1.48*median(absolute(a-median(a)))
def RMS(a):
'''Return the root-mean-square'''
return sqrt(mean(power(a - median(a),2)))
def tomag(flux,eflux,zp):
m = -2.5*log10(flux) + zp
dm = eflux/flux*1.087
return m,dm
def toflux(mag,emag,zp):
flux = power(10, -0.4*(mag-zp))
eflux = emag*flux/1.087
return flux,eflux
cfg = config.config(sys.argv[1])
with open(cfg.sampler.output) as f:
d = pickle.load(f)
c = STANstats.STANchains(chains=d['samples'], flatnames=d['flatnames'])
if not cfg.model.NGauss:
cfg.model.NGauss = 1
# MCMC parameters
for var in c.params:
locals()[var] = c.median(var)
locals()['e_'+var] = c.std(var)
# Data from pickle file
for var in d['data']:
locals()[var] = d['data'][var]
if not cfg.model.in_mag:
flux,e_flux = mag,e_mag
mag,e_mag = tomag(mag, e_mag, 25.0)
flux4258,e_flux4258 = mag4258, e_mag4258
mag4258,e_mag4258 = tomag(mag4258, e_mag4258, 23.0)
fluxLMC,e_fluxLMC = magLMC,e_magLMC
magLMC,e_magLMC = tomag(magLMC, e_magLMC, 12.0)
fluxMW,e_fluxMW = magMW, e_magMW
magMW,e_magMW = tomag(magMW, e_magMW, 3.0)
w_P = []
w_VI = []
w_OH = []
w_res = []
s_res = []
w_labs = []
Pmin,Pmax = inf,-inf
VImin,VImax = inf,-inf
OHmin,OHmax = inf,-inf
fig1 = PanelPlot(1,2, pwidths=[1], pheights=[0.2, 0.8], figsize=(10,6))
fig2 = PanelPlot(1,2, pwidths=[1], pheights=[0.2, 0.8], figsize=(10,6))
fig3 = PanelPlot(1,2, pwidths=[1], pheights=[0.2, 0.8], figsize=(10,6))
aresids = []
alabels = []
if cfg.model.use_MW:
if 'betaVI_MW' not in c.params:
betaVI_MW = betaVI
e_betaVI_MW = e_betaVI
dist = -5*log10(pi_true) - 5
model = dist + betaVI*VI_MW + betaP*P_MW + betaOH*(OH_MW-9.5) + M
resids = magMW - model
if cfg.model.in_mag:
aresids.append(resids)
else:
r = fluxMW - toflux(model, 0, 3.0)[0]
aresids.append(r/RMS(r))
alabels.append('MW')
w_res.append(median(resids))
s_res.append(std(resids))
OH_MW = array([OH_MW]*len(VI_MW))
w_P.append(median(P_MW))
w_VI.append(median(VI_MW))
w_OH.append(median(OH_MW))
w_labs.append('MW')
fig1.axes[0].plot(P_MW, resids, 'o', color='blue')
fig2.axes[0].plot(VI_MW,resids, 'o', color='blue')
fig3.axes[0].plot([OH_MW]*len(VI_MW),resids, 'o', color='blue')
fig1.axes[1].plot(P_MW, magMW - dist - betaVI_MW*VI_MW - betaOH*(OH_MW-9.5),
'o', color='blue', label='MW')
fig2.axes[1].plot(VI_MW, magMW - dist - betaP*P_MW - betaOH*(OH_MW-9.5),
'o', color='blue', label='MW')
fig3.axes[1].plot(OH_MW, magMW - dist - betaP*P_MW - betaVI_MW*VI_MW,
'o', color='blue', label='MW')
fig1.axes[0].axhline(eps_MW, linestyle='--', color='blue')
fig1.axes[0].axhline(-eps_MW, linestyle='--', color='blue')
Pmin = min(Pmin, P_MW.min())
Pmax = max(Pmax, P_MW.max())
VImin = min(VImin, VI_MW.min())
VImax = max(VImax, VI_MW.max())
OHmin = min(OHmin, OH_MW.min())
OHmax = max(OHmax, OH_MW.max())
if cfg.model.use_LMC:
if 'betaVI_LMC' not in c.params:
betaVI_LMC = betaVI
e_betaVI_LMC = e_betaVI
model = DM_LMC + betaVI_LMC*VI_LMC + betaP*P_LMC + \
betaOH*(OH_LMC-9.5) + M
resids = magLMC - model
w_res.append(median(resids))
s_res.append(std((resids)))
if cfg.model.in_mag:
aresids.append(resids)
else:
r = fluxLMC - toflux(model,0.0, 12.0)[0]
aresids.append(r/RMS(r))
alabels.append('LMC')
OH_LMC = array([OH_LMC]*len(P_LMC))
w_P.append(median(P_LMC))
w_VI.append(median(VI_LMC))
w_OH.append(median(OH_LMC))
w_labs.append('LMC')
fig1.axes[0].plot(P_LMC, resids, 's', color='k')
fig2.axes[0].plot(VI_LMC, resids, 's', color='k')
fig3.axes[0].plot(OH_LMC, resids, 's', color='k')
fig1.axes[1].plot(P_LMC, resids + betaP*P_LMC + M, 's', color='k',
label='LMC')
fig2.axes[1].plot(VI_LMC, resids + betaVI_LMC*VI_LMC + M, 's', color='k',
label='LMC')
fig3.axes[1].plot(OH_LMC, resids + betaOH*(OH_LMC-9.5) + M, 's', color='k',
label='LMC')
fig1.axes[0].axhline(eps_LMC, linestyle='--', color='k')
fig1.axes[0].axhline(-eps_LMC, linestyle='--', color='k')
Pmin = min(Pmin, P_LMC.min())
Pmax = max(Pmax, P_LMC.max())
VImin = min(VImin, VI_LMC.min())
VImax = max(VImax, VI_LMC.max())
OHmin = min(OHmin, OH_LMC.min())
OHmax = max(OHmax, OH_LMC.max())
if cfg.model.use_4258:
if 'betaVI_4258' not in c.params:
betaVI_4258 = betaVI
e_betaVI_4258 = e_betaVI
model = DM_4258 + betaVI_4258*VI_4258 + betaP*P_4258 + \
betaOH*(OH_4258-9.5) + M
resids = mag4258 - model
if cfg.model.in_mag:
aresids.append(resids)
else:
r = flux4258 - toflux(model, 0.0, 23.0)[0]
aresids.append(r/RMS(r))
alabels.append('N4258')
w_res.append(median(resids))
s_res.append(std((resids)))
w_P.append(median(P_4258))
w_VI.append(median(VI_4258))
w_OH.append(median(OH_4258))
w_labs.append('4258')
fig1.axes[0].plot(P_4258, resids, '^', color='red')
fig2.axes[0].plot(VI_4258, resids, '^', color='red')
fig3.axes[0].plot(OH_4258, resids, '^', color='red')
fig1.axes[1].plot(P_4258, resids + betaP*P_4258 + M, '^', color='red',
label='4258')
fig2.axes[1].plot(VI_4258, resids + betaVI_4258*VI_4258+M, '^', color='red',
label='4258')
fig3.axes[1].plot(OH_4258, resids + betaOH*(OH_4258-9.5)+M, '^', color='red',
label='4258')
if cfg.model.NGauss > 1:
#mid = argmax(c.median('theta_4258'))
mid = argmax(c.median('theta'))
#fig1.axes[0].axhline(eps_4258[mid], linestyle='--', color='red')
#fig1.axes[0].axhline(-eps_4258[mid], linestyle='--', color='red')
fig1.axes[0].axhline(eps[mid], linestyle='--', color='red')
fig1.axes[0].axhline(-eps[mid], linestyle='--', color='red')
else:
#fig1.axes[0].axhline(eps_4258, linestyle='--', color='red')
#fig1.axes[0].axhline(-eps_4258, linestyle='--', color='red')
fig1.axes[0].axhline(eps, linestyle='--', color='red')
fig1.axes[0].axhline(-eps, linestyle='--', color='red')
Pmin = min(Pmin, P.min())
Pmax = max(Pmax, P.max())
VImin = min(VImin, VI_4258.min())
VImax = max(VImax, VI_4258.max())
OHmin = min(OHmin, OH_4258.min())
OHmax = max(OHmax, OH_4258.max())
#xx1 = array([Pmin,Pmax])
xx1 = linspace(Pmin,Pmax, 100)
xx2 = array([VImin,VImax])
xx3 = array([OHmin,OHmax])
fig1.axes[1].plot(xx1, M + betaP*xx1, '-', color='k')
fig3.axes[1].plot(xx3, M + betaOH*(xx3-9.5), '-', color='k')
if cfg.model.use_MW:
fig2.axes[1].plot(xx2, M + betaVI_MW*xx2, '-', color='blue')
if cfg.model.use_LMC:
fig2.axes[1].plot(xx2, M + betaVI_LMC*xx2, '-', color='k')
if cfg.model.use_4258:
fig2.axes[1].plot(xx2, M + betaVI_4258*xx2, '-', color='red')
fig1.axes[0].axhline(0, linestyle='-', color='k')
fig2.axes[0].axhline(0, linestyle='-', color='k')
fig3.axes[0].axhline(0, linestyle='-', color='k')
fig1.axes[0].set_xlabel(r'$\log_{10}\left(P\right)$')
fig2.axes[0].set_xlabel('$V-I$')
fig3.axes[0].set_xlabel('$[O/H]$')
fig1.axes[0].set_ylabel('resids')
fig1.axes[1].set_ylabel('corrected mag')
fig2.axes[0].set_ylabel('resids')
fig2.axes[1].set_ylabel('corrected mag')
fig3.axes[0].set_ylabel('resids')
fig3.axes[1].set_ylabel('corrected mag')
fig1.axes[1].legend()
fig2.axes[1].legend()
fig3.axes[1].legend()
plt.draw()
fig1.set_limits()
fig1.draw()
fig2.set_limits()
fig2.draw()
fig3.set_limits()
fig3.draw()
fig1.fig.savefig('anchors_P.pdf')
fig2.fig.savefig('anchors_VI.pdf')
fig3.fig.savefig('anchors_OH.pdf')
plt.close(fig1.fig)
plt.close(fig2.fig)
plt.close(fig3.fig)
symbs = ['o','s','^','d','p','v']*5
cols = ['k']*6+['red']*6+['blue']*6+['green']*6+['orange']*6
Pmin,Pmax = inf,-inf
VImin,VImax = inf,-inf
OHmin,OHmax = inf,-inf
fig1 = PanelPlot(1,2, pwidths=[1], pheights=[0.2, 0.8], figsize=(10,6))
fig2 = PanelPlot(1,2, pwidths=[1], pheights=[0.2, 0.8], figsize=(10,6))
fig3 = PanelPlot(1,2, pwidths=[1], pheights=[0.2, 0.8], figsize=(10,6))
if len(shape(betaVI)) == 0:
betaVI = ones((S,))*betaVI
e_betaVI = ones((S,))*e_betaVI
sresids = []
for i in range(S):
figi = PanelPlot(1,2, pwidths=[1], pheights=[0.2,0.8], figsize=(10,6))
gids = equal(ID, i+1)
model = DM[i] + betaVI[i]*VI[gids] + betaP*P[gids] + \
betaOH*(OH[gids]-9.5) + M
resids = mag[gids] - model
if cfg.model.in_mag:
sresids.append(resids)
else:
r = flux[gids] - toflux(model, 0.0, 25.0)[0]
sresids.append(r/RMS(r))
w_res.append(median(resids))
s_res.append(std((resids)))
w_P.append(median(P[gids]))
w_VI.append(median(VI[gids]))
w_OH.append(median(OH[gids]))
w_labs.append(cephlist[i])
fig1.axes[0].plot(P[gids],resids, symbs[i], color=cols[i])
figi.axes[0].plot(P[gids],resids, 'o', color='k')
fig2.axes[0].plot(VI[gids],resids, symbs[i], color=cols[i])
fig3.axes[0].plot(OH[gids],resids, symbs[i], color=cols[i])
fig1.axes[1].plot(P[gids], resids + betaP*P[gids] + M, symbs[i],
color=cols[i], label=cephlist[i])
figi.axes[1].plot(P[gids], resids + betaP*P[gids] + M, 'o',
color='k', label=cephlist[i])
reals = c.get_trace('M', merge=True)[newaxis,:,0] + \
(c.get_trace('DM', merge=True)[newaxis,:,i]-DM[i]) + \
c.get_trace('betaP', merge=True)[newaxis,:,0]*xx1[:,newaxis]
mreals = median(reals, axis=1)
sreals = std(reals, axis=1)
fig2.axes[1].plot(VI[gids], resids + betaVI[i]*VI[gids] + M, symbs[i],
color=cols[i], label=cephlist[i])
fig3.axes[1].plot(OH[gids], resids + betaOH*(OH[gids]-9.5) + M, symbs[i],
color=cols[i], label=cephlist[i])
figi.axes[0].axhline(0, linestyle='-', color='red')
figi.axes[0].plot(xx1, sreals, '--', color='red')
figi.axes[0].plot(xx1, -sreals, '--', color='red')
#figi.axes[0].axhline(eps, linestyle='--', color='red')
#figi.axes[1].plot(xx1, M + betaP*xx1, '-', color='red')
figi.axes[1].plot(xx1, mreals, '-', color='red')
figi.axes[1].plot(xx1, mreals+sreals, '--', color='red')
figi.axes[1].plot(xx1, mreals-sreals, '--', color='red')
figi.axes[0].set_xlabel(r'$\log_{10}\left(P\right)$')
figi.axes[0].set_ylabel('resids')
figi.axes[1].set_ylabel('corrected mag')
figi.axes[1].legend(fontsize=8)
figi.set_limits()
figi.draw()
figi.fig.savefig('SN_hosts_P_%s.pdf' % cephlist[i])
plt.close(figi.fig)
Pmin = min(Pmin, P.min())
Pmax = max(Pmax, P.max())
VImin = min(VImin, VI.min())
VImax = max(VImax, VI.max())
OHmin = min(OHmin, OH.min())
OHmax = max(OHmax, OH.max())
xx1 = array([Pmin,Pmax])
xx2 = array([VImin,VImax])
xx3 = array([OHmin,OHmax])
if 'm_betaVI' not in d:
m_betaVI = mean(betaVI)
fig1.axes[1].plot(xx1, M + betaP*xx1, '-', color='k')
fig2.axes[1].plot(xx2, M + m_betaVI*xx2, '-', color='k')
fig3.axes[1].plot(xx3, M + betaOH*(xx3-9.5), '-', color='k')
fig1.axes[0].axhline(0, linestyle='-', color='k')
fig2.axes[0].axhline(0, linestyle='-', color='k')
fig2.axes[0].axhline(0, linestyle='-', color='k')
if cfg.model.NGauss > 1:
aeps = eps[argmax(c.median('theta'))]
else:
aeps = eps
fig1.axes[0].axhline(aeps, linestyle='--', color='k')
fig1.axes[0].axhline(-aeps, linestyle='--', color='k')
fig2.axes[0].axhline(aeps, linestyle='--', color='k')
fig2.axes[0].axhline(-aeps, linestyle='--', color='k')
fig3.axes[0].axhline(aeps, linestyle='--', color='k')
fig3.axes[0].axhline(-aeps, linestyle='--', color='k')
fig1.axes[0].set_xlabel(r'$\log_{10}\left(P\right)$')
fig2.axes[0].set_xlabel('$V-I$')
fig3.axes[0].set_xlabel('$[O/H]$')
fig1.axes[0].set_ylabel('resids')
fig1.axes[1].set_ylabel('corrected mag')
fig2.axes[0].set_ylabel('resids')
fig2.axes[1].set_ylabel('corrected mag')
fig3.axes[0].set_ylabel('resids')
fig3.axes[1].set_ylabel('corrected mag')
fig1.axes[1].legend(fontsize=8)
fig2.axes[1].legend(fontsize=8)
fig3.axes[1].legend(fontsize=8)
plt.draw()
fig1.set_limits()
fig1.draw()
fig2.set_limits()
fig2.draw()
fig3.set_limits()
fig3.draw()
fig1.fig.savefig('SN_hosts_P.pdf')
fig2.fig.savefig('SN_hosts_VI.pdf')
fig3.fig.savefig('SN_hosts_OH.pdf')
plt.close(fig1.fig)
plt.close(fig2.fig)
plt.close(fig3.fig)
# Redidual histogram
sresids = concatenate(sresids)
aresids.append(sresids)
alabels.append('SN Hosts')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(aresids, label=alabels, histtype='step', stacked=True, normed=True,
bins=100, linewidth=2)
ax.set_xlabel('Model residuals', fontsize=16)
ax.set_xlim(-5,5)
ax.legend()
plt.tight_layout()
fig.savefig('resids_hist.pdf')
fig3 = plt.figure() # to told weighted average of residuals
fig4 = plt.figure() # to told weighted average of residuals
fig5 = plt.figure() # to told weighted average of residuals
ax3 = fig3.add_subplot(111)
ax4 = fig4.add_subplot(111)
ax5 = fig5.add_subplot(111)
for i in range(len(w_res)):
ax3.errorbar([w_P[i]], [w_res[i]], fmt=symbs[i], color=cols[i],
label=w_labs[i], yerr=s_res[i], capsize=0, ms=10)
ax4.errorbar([w_VI[i]], [w_res[i]], fmt=symbs[i], color=cols[i],
label=w_labs[i], yerr=s_res[i], capsize=0, ms=10)
ax5.errorbar([w_OH[i]], [w_res[i]], fmt=symbs[i], color=cols[i],
label=w_labs[i], yerr=s_res[i], capsize=0, ms=10)
lgd1 = ax3.legend(fontsize=8, loc=3, ncol=4, bbox_to_anchor=(0.,1.02,1.,0.102),
mode='expand')
lgd2 = ax4.legend(fontsize=8, loc=3, ncol=4, bbox_to_anchor=(0.,1.02,1.,0.102),
mode='expand')
lgd3 = ax5.legend(fontsize=8, loc=3, ncol=4, bbox_to_anchor=(0.,1.02,1.,0.102),
mode='expand')
ax3.set_xlabel(r'$\log_{10}\left(P\right)$')
ax4.set_xlabel(r'$V-I$')
ax5.set_xlabel(r'$[O/H]$')
ax3.set_ylabel(r'median residuals')
ax4.set_ylabel(r'median residuals')
ax5.set_ylabel(r'median residuals')
fig3.savefig('ceph_res_comb_P.pdf', bbox_extra_artists=(lgd1,), bbox_inches='tight')
fig4.savefig('ceph_res_comb_VI.pdf', bbox_extra_artists=(lgd2,), bbox_inches='tight')
fig5.savefig('ceph_res_comb_OH.pdf', bbox_extra_artists=(lgd3,), bbox_inches='tight')
plt.close(fig3)
plt.close(fig4)
plt.close(fig5)
# Now, let's do some triangle plots and output the parameters of interest.
# Cepheid parameters:
if corner is not None:
tp1 = c.triangle_plot(['M','betaP','betaVI','betaOH'])
tp1.savefig('Ceph_triangle.pdf')
else:
print "Warning: corner is not installed, so no triangle plots. To install:"
print "pip install corner"
# Now we output some tables.
fout = open('results_table.txt','w')
fout.write("Cepheids\n")
fout.write("--------\n")
fout.write('M: %s +/- %s\n' % sigfig.round_sig_error(M,e_M,2))
fout.write('betaP: %s +/- %s\n' % sigfig.round_sig_error(betaP,e_betaP,2))
fout.write('betaOH: %s +/- %s\n' % sigfig.round_sig_error(betaOH,e_betaOH,2))
hosts = []
headers = ['Host','DM','betaVI']
headers += ['eps%d' % i for i in range(cfg.model.NGauss)]
cols = [[],[]]
ecols = [[],[]]
for i in range(cfg.model.NGauss):
cols.append([])
ecols.append([])
if cfg.model.use_MW:
hosts.append('MW')
cols[0].append(-1); ecols[0].append(-1)
cols[1].append(betaVI_MW); ecols[1].append(e_betaVI_MW)
cols[2].append(eps_MW); ecols[2].append(e_eps_MW)
for i in range(1,cfg.model.NGauss):
cols[i+2].append(-1)
ecols[i+2].append(-1)
if cfg.model.use_LMC:
hosts.append('LMC')
cols[0].append(DM_LMC); ecols[0].append(e_DM_LMC)
cols[1].append(betaVI_LMC); ecols[1].append(e_betaVI_LMC)
cols[2].append(eps_LMC); ecols[2].append(e_eps_LMC)
for i in range(1,cfg.model.NGauss):
cols[i+2].append(-1)
ecols[i+2].append(-1)
if cfg.model.use_4258:
hosts.append('4258')
cols[0].append(DM_4258); ecols[0].append(e_DM_4258)
cols[1].append(betaVI_4258); ecols[1].append(e_betaVI_4258)
if cfg.model.NGauss > 1:
for i in range(cfg.model.NGauss):
#cols[2+i].append(eps_4258[i]);
#ecols[2+i].append(e_eps_4258[i])
cols[2+i].append(eps[i]);
ecols[2+i].append(e_eps[i])
else:
#cols[2].append(eps_4258);
#ecols[2].append(e_eps_4258)
cols[2].append(eps);
ecols[2].append(e_eps)
hosts += cephlist
cols[0] = concatenate([cols[0], DM]); ecols[0] = concatenate([ecols[0], e_DM])
cols[1] = concatenate([cols[1], betaVI]); ecols[1] = concatenate([ecols[1],
e_betaVI])
if cfg.model.NGauss > 1:
for i in range(cfg.model.NGauss):
cols[2+i] = concatenate([cols[2+i], [eps[i]]*S])
ecols[2+i] = concatenate([ecols[2+i], [e_eps[i]]*S])
else:
cols[2] = concatenate([cols[2], [eps]*S])
ecols[2] = concatenate([ecols[2], [e_eps]*S])
lines = sigfig.format_table(cols=cols, errors=ecols, n=2, headers=headers,
labels=hosts)
[fout.write(line+"\n") for line in lines]
fout.close()
# Final covariance matrix. We need to be a bit more robust here
DMs = c.get_trace('DM', merge=True)
devs = absolute(DMs - c.median('DM', merge=True)[newaxis,:])
# Do a 5-sigma clip to get rid fo really diviant points for NGC 4424
gids = less(devs, 5*1.4826*c.mad('DM', merge=True)[newaxis,:])
gids = product(gids, axis=1).astype(bool)
C = cov(DMs[gids,:].T)
DMs = c.median('DM')
eDMs = c.std('DM')
f = open('DM_cov.dat','w')
[f.write(ceph+" ") for ceph in cephlist]
f.write('\n')
[f.write("%f " % DM) for DM in DMs]
f.write('\n')
for i in range(C.shape[0]):
for j in range(C.shape[1]):
f.write("%f " % C[i,j])
f.write('\n')
# Now make a nice figure
fig = plt.figure()
ax = fig.add_subplot(111)
sids | |
and b}
self.paths = self.set_height()
self.data_dict = {a: b for a, b in zip(pref.menu_e, self.paths)}
self.plot_dict = {a:b for a,b in self.data_dict.items() if self.plot_path[a] and b}
def _verify_temp(self,value):
if is_str_float(value.replace(",",".")):
if float(value) <= -273.15:
self.span_worthy = False
text = "Temperature should not be smaller than or equal to {} °C\n"
self.msg.append(text.format(self.commafy("-273.15")))
return None
else:
t = float(value) + 273.15
text = "Temperature is set to {} K\n"
self.msg.append(text.format(self.commafy("{:.2f}".format(t))))
return t
else:
self.span_worthy = False
text = "Unrecognized temperature: {}\n"
self.msg.append(text.format(str(value)))
def commafy(self,item):
return str(item).replace(".", ",") if self.comma else str(item).replace(",", ".")
@functools.lru_cache(maxsize=1)
def max_value(self):
return max(max(a[self.e_source] for a in path) for path in list(self.raw_crt_dict.values()) if path)
@functools.lru_cache(maxsize=1)
def min_value(self):
return min(min(a[self.e_source] for a in path) for path in list(self.raw_crt_dict.values()) if path)
@functools.lru_cache(maxsize=1)
def delta_value(self):
return self.max_value()-self.min_value()
@functools.lru_cache(maxsize=1)
def n_col(self):
try: x = max(max(a[0] for a in path) for path in list(self.plot_dict.values()) if path)
except ValueError: x = 0
return x
@functools.lru_cache(maxsize=1)
def set_height(self):
paths = []
for idx_a, a in enumerate(self.raw_crt):
path = []
for idx_b, b in enumerate(a): # For every structure
try:
height = 1 - (b[self.e_source] - self.min_value())/ self.delta_value()
height = int(round(abs(height*(self.bottom_height - self.top_height) + self.top_height)))
except ZeroDivisionError: height = int(self.top_height/2 + self.bottom_height/2)
path.append([*b,height])
paths.append(path)
return paths
def set_single_height(self,value):
try:
height = 1 - (value - self.min_value()) / self.delta_value()
height = int(height * (self.bottom_height - self.top_height) + self.top_height +50)
except ZeroDivisionError:
height = int(self.top_height / 2 + self.bottom_height / 2)
return height
def set_horizontal(self,mult,add):
value = (float(mult)-1)*int(self.x_space)
value += int(self.x_start_offset) + int(add) + 80
return int(value)
def char_care(self,text):
return str(text).encode("ascii", "xmlcharrefreplace").decode("utf-8")
def is_avail(self,name,path_a,num_a,path_b,num_b,ignore_same=False):
try:
first = int(num_a) in [a[0] for a in self.data_dict[path_a]]
second = int(num_b) in [a[0] for a in self.data_dict[path_b]]
if int(num_a) == int(num_b) and path_a == path_b:
text = f"{name}: Cannot conect items on same column and path"
self.msg.append(text)
return False
elif int(num_a) == int(num_b) and not ignore_same:
text = f"{name}: Cannot conect items on same column"
self.msg.append(text)
return False
elif not self.plot_path[path_a]:
return False
elif not self.plot_path[path_b]:
return False
return first and second
except KeyError:
return False
except ValueError:
return False
@functools.lru_cache(maxsize=1)
def graph_frame(self):
horizontal_end = self.set_horizontal(self.n_col(), self.x_end_offset)
a = [
'<svg width="{0}" viewBox="30 0 {0} 500" height="500" xmlns="http://www.w3.org/2000/svg">', #0 FRAME
' <line x1="100" y1="35" x2="100" y2="475" stroke="black" stroke-width="2"/>', #1 FULL VERTICAL LEFT LINE
' <line x1="100" y1="475" x2="{}" y2="475" stroke="black" stroke-width="2"/>', #2 FULL BOTTOM HORIZONTAL LINE
' <text x="{}" y="20" font-size="22" text-anchor="middle" fill="black">{}</text>', #3 MAIN TITLE
' <text x="-250" y="55" font-size="22" {} text-anchor="middle" fill="black">{}</text>', #4 Y AXIS TITLE
' <text x="{}" y="495" font-size="22" text-anchor="middle" fill="black">{}</text>', #5 X AXIS TITLE
' <polygon points="100,25 95,35 105,35" style="fill:black;stroke:black;stroke-width:1"/>', #6 TOP LEFT Y ARROW
' <polygon points="{0},475 {1},470 {1},480" style="fill:black;stroke:black;stroke-width:1"/>', #7 BOTTOM RIGHT X ARROW
' <polygon points="100,485 95,475 105,475" style="fill:black;stroke:black;stroke-width:1"/>', #8 BOTTOM RIGHT Y ARROW
' <line x1="150" y1="475" x2="{}" y2="475" stroke="black" stroke-width="2"/>', #9 PARTIAL BOTTOM HORIZONTAL LINE
' <polygon points="140,475 150,470 150,480" style="fill:black;stroke:black;stroke-width:1"/>',# 10 BOTTOM LEFT X ARROW (PARTIAL)
' <polygon points="90,475 100,470 100,480" style="fill:black;stroke:black;stroke-width:1"/>',# 11 BOTTOM LEFT X ARROW (FULL)
' <line x1="{0}" y1="35" x2="{0}" y2="475" stroke="black" stroke-width="2"/>',# 12 FULL VERTICAL RIGHT LINE
' <line x1="100" y1="35" x2="{}" y2="35" stroke="black" stroke-width="2"/>' #13 FULL TOP HORIZONTAL LINE
]
a[0] = a[0].format(horizontal_end + 75)
a[2] = a[2].format(horizontal_end + 55)
a[3] = a[3].format(self.set_horizontal(self.n_col()/2,55), self.char_care(self.main_title))
a[4] = a[4].format('transform="rotate(-90)"', self.char_care(self.y_title))
a[5] = a[5].format(self.set_horizontal(self.n_col()/2,55), self.char_care(self.x_title))
a[6] = a[6]
a[7] = a[7].format(horizontal_end + 65,horizontal_end + 55)
a[8] = a[8]
a[9] = a[9].format(horizontal_end + 55)
a[10] = a[10]
a[11] = a[11]
a[12] = a[12].format(horizontal_end + 55)
a[13] = a[13].format(horizontal_end + 55)
code = {"Borderless":[a[n] for n in (0,3)],
"Y only":[a[n] for n in (0,1,3,4,6,8)],
"XY":[a[n] for n in (0,1,2,3,4,5,6,7)],
"X only":[a[n] for n in (0,2,5,7,11)],
"Frame":[a[n] for n in (0,1,2,3,4,5,12,13)],
"X,Y":[a[n] for n in (0,1,3,4,5,6,7,8,9,10)]
}
self.svg_code.extend(code[self.frame])
@functools.lru_cache(maxsize=1)
def graph_grid(self):
if self.frame == "Borderless" or self.frame == "X only": return
if self.delta_value() == 0: return
step_size = float("{:.0E}".format(abs(self.delta_value())/10))
max_e = float("{:.0E}".format(abs(2*self.delta_value()+self.max_value())))
if step_size == 0: return
steps = [max_e-step_size*n for n in range(60)]
for item in steps:
value = int(self.set_single_height(item))
if 50 < value < 465:
item = {
"0":"{:.0f}".format(item),
"1":"{:.1f}".format(item),
"2":"{:.02f}".format(item)
}[self.grid_decimal]
item = self.commafy(item)
c = [
' <line x1="100" y1="{0}" x2="105" y2="{0}" stroke="black" stroke-width="2"/>',
' <text x="80" y="{}" text-anchor="middle" fill="black">{}</text>']
c[0] = c[0].format(value)
c[1] = c[1].format(value,item)
self.svg_code.extend(c)
@functools.lru_cache(maxsize=1)
def graph_crt_points(self):
for key,value in self.data_dict.items():
if not self.plot_path[key]: continue
opt_cri = self.path_options[key][0]
opt_con = self.path_options[key][1]
if not len(value) == len(set([a[0] for a in value])):
text = "WARNING: Two or more structures are occupying the same block lane!"
self.msg.append(text)
l_c = [0, 0, 0] # last collumn
for idx, item in enumerate(value):
c_p = [self.set_horizontal(item[0], self.wide[0]),
int(round(item[-1] + 50)),
self.set_horizontal(item[0], self.wide[1])]
# [x1,y1,x2], y1=y2
a = [
' <line x1="{}" y1="{}" x2="{}" y2="{}" stroke="{}" stroke-width="{}" {}/>',
' <text x="{}" y="{}" text-anchor="middle" fill="{}">{}{}</text>',
' <text x="{}" y="{}" text-anchor="middle" fill="{}">{}</text>',
' <text x="{}" y="{}" text-anchor="middle" fill="{}">{}</text>']
x = pref.svg_repl[opt_cri[-1]]
z = pref.placement[item[-2]][0 if self.plot_np else 1]
trick_g = "g" if self.e_source == 3 else "h"
trick_h = "h" if self.e_source == 3 else "g"
item_rep = lambda x: float(item[x].replace(",",".") if type(item[x]) is str else item[x])
digit_rounding = lambda x: {"0": "{:.0f}".format(item_rep(x)),"1": "{:.1f}".format(item_rep(x)),"2": "{:.2f}".format(item_rep(x))}[self.e_decimal]
g = self.g_h_labels[trick_g][0] + self.commafy(digit_rounding(self.e_source)) + self.g_h_labels[trick_g][-1]
h = self.g_h_labels[trick_h][0] + self.commafy(digit_rounding(self.e_complement) if is_str_float(item[self.e_complement].replace(",",".")) else item[self.e_complement]) + self.g_h_labels[trick_h][-1]
ts_dict = {
" " : "",
"‡ (big)":'<tspan dy="-7" font-family="arial" font-size=".7em">{}</tspan>'.format(self.char_care("‡")),
"‡ (small)":'<tspan dy="-7" font-family="monospace" font-size=".7em">{}</tspan>'.format(self.char_care("‡"))
}
ts_mark = ts_dict[self.ts_mark] if item[1] == "TS" else ""
a[0] = a[0].format(c_p[0], c_p[1], c_p[2], c_p[1], opt_cri[0],opt_cri[1],x)
a[1] = a[1].format(int((c_p[0] + c_p[2])/2), c_p[1] + z[0], opt_cri[0],self.char_care(item[2]),ts_mark)
a[2] = a[2].format(int((c_p[0] + c_p[2])/2), c_p[1] + z[1],opt_cri[0],self.char_care(g))
a[3] = a[3].format(int((c_p[0] + c_p[2])/2), c_p[1] + z[2],opt_cri[0],self.char_care(h))
self.svg_code.extend(a if self.plot_np else a[:-1])
if not idx == 0:
b = ' <line x1="{}" y1="{}" x2="{}" y2="{}" stroke="{}" stroke-width="{}" {}/>'
x = pref.svg_repl[opt_con[-1]]
b = b.format(l_c[2], l_c[1], c_p[0], c_p[1], opt_con[0],opt_con[1],x)
self.svg_code.append(b)
l_c = c_p
@functools.lru_cache(maxsize=1)
def graph_connectors(self):
if not self.plot[-1] == 1: return
for idx,i in enumerate(self.conectors):
if not self.is_avail(f"Connector {idx+1}",*i[0:4]):continue
i[1] = int(i[1])
i[3] = int(i[3])
start = next(n for n in self.data_dict[i[0]] if n[0] == i[1])
end = next(n for n in self.data_dict[i[2]] if n[0] == i[3])
con = [start,end]
if con[0][0] > con[1][0]: con.reverse()
if con[0][0] < con[1][0]:
x = pref.svg_repl[i[6]]
a = ' <line x1="{}" y1="{}" x2="{}" y2="{}" stroke="{}" stroke-width="{}" {}/>'
a = a.format(self.set_horizontal(con[0][0], self.wide[1]),
con[0][-1] + 50,
self.set_horizontal(con[1][0], self.wide[0]),
con[1][-1] + 50, i[4], i[5],x)
self.svg_code.append(a)
@functools.lru_cache(maxsize=1)
def graph_comparers(self):
if not self.plot[-2] == 1: return
for i,a in enumerate(self.comparers):
if not self.is_avail(f"Comparer {i+1}",a["A"],a["1"],a["B"],a["2"],ignore_same=True): continue
start = next(n for n in self.data_dict[a["A"]] if n[0] == int(a["1"]))
end = next(n for n in self.data_dict[a["B"]] if n[0] == int(a["2"]))
com = [start,end]
ordered = com[0][0] < com[1][0]
if not ordered:
com = [end,start]
if a["S4"] == "reverse":
com.reverse()
y1 = com[0][6] + 50
y2 = com[1][6] + 50
color = a["S1"]
width = a["S2"]
dashed = pref.svg_repl[a["S3"]]
excess_y = 8 if com[0][-1] < com[1][-1] else -8
excess_yb = 2 if com[0][-1] < com[1][-1] else -2
text_pos = {
"left":[-5,"-90"],
"right":[15, "-90"],
"fliped_left": [-15, "90"],
"fliped_right": [5, "90"]
}[a["S6"]]
ordered = com[0][0] < com[1][0]
digit_rounding = {"0": "{:.0f}", "1": "{:.1f}", "2": "{:.2f}"}[self.e_decimal]
label = self.commafy(digit_rounding.format(abs(com[0][self.e_source] - com[1][self.e_source])))
protruded = ["p_left", "xp_left", "p_right", "xp_right", "average"]
x2_mod = 5
if com[0][0] == com[1][0]:
excess_x = -10 if a["S5"] in ["p_left","xp_left"] else 10
x1 = {
"left":self.set_horizontal(com[1][0],self.wide[0]),
"right":self.set_horizontal(com[1][0],self.wide[1]),
"midle":self.set_horizontal(com[1][0],int(sum(self.wide)/2)),
"p_left":self.set_horizontal(com[1][0],self.wide[0]),
"xp_left": self.set_horizontal(com[1][0], self.wide[0]),
"p_right": self.set_horizontal(com[1][0], self.wide[1]),
"xp_right": self.set_horizontal(com[1][0], self.wide[1]),
"average": self.set_horizontal((com[1][0]+com[0][0])/2, int(sum(self.wide)/2))
}[a["S5"]]
x2 = {
"left":self.set_horizontal(com[1][0],self.wide[0]+x2_mod),
"right":self.set_horizontal(com[1][0],self.wide[1]-x2_mod),
"midle":self.set_horizontal(com[1][0],int(sum(self.wide)/2)),
"p_left":self.set_horizontal(com[1][0],self.wide[0]-2*x2_mod),
"xp_left": self.set_horizontal(com[1][0], self.wide[0] - 4*x2_mod),
"p_right": self.set_horizontal(com[1][0], self.wide[1] + 2*x2_mod),
"xp_right": self.set_horizontal(com[1][0], self.wide[1] + 4*x2_mod),
"average": self.set_horizontal((com[1][0]+com[0][0])/2, int(sum(self.wide)/2))
}[a["S5"]]
label = self.commafy(digit_rounding.format(abs(com[0][self.e_source] - com[1][self.e_source])))
b = [
' <line x1="{}" y1="{}" x2="{}" y2="{}" stroke="{}" stroke-width="{}" {}/>', # HORIZONTAL
' <line x1="{}" y1="{}" x2="{}" y2="{}" stroke="{}" stroke-width="{}" />', # VERTICAL
' <polygon points="{},{} {},{} {},{}" style="fill:{};stroke:{};stroke-width:1"/>', # TOP Y ARROW
' <polygon points="{},{} {},{} {},{}" style="fill:{};stroke:{};stroke-width:1"/>', # BOTTOM Y ARROW
' <text x="{}" y="{}" font-size="16" {} text-anchor="middle" fill="{}">{}</text>', # Y label
' <line x1="{}" y1="{}" x2="{}" y2="{}" stroke="{}" stroke-width="{}" {}/>' # HORIZONTAL SHORT
]
b[0] = b[0].format(x1, y1, x2 + excess_x, y1, color, width, dashed)
b[1] = b[1].format(x2, y1 + excess_y, x2, y2 - excess_y, color, width)
b[2] = b[2].format(x2, y1 + excess_yb, x2 - 4, y1 + excess_y, x2 + 4, y1 + excess_y, color, color)
b[3] = b[3].format(x2, y2 - excess_yb, x2 - 4, y2 - excess_y, x2 + 4, y2 - excess_y, color, color)
b[4] = b[4].format(x2 + text_pos[0], int((y2 + y1) / 2),
f'transform="rotate({text_pos[1]},{x2 + text_pos[0]},{int((y2 + y1) / 2)})"',
color,
self.char_care(label))
b[5] = b[5].format(x1, y2, x2 + excess_x, y2, color, width, dashed)
protruded = ["p_left", "xp_left", "p_right", "xp_right", "average"]
self.svg_code.extend(b if a["S5"] in protruded else b[1:5])
else:
excess_x = 10 if ordered else -10
x1 = self.set_horizontal(com[0][0],self.wide[1] if ordered else self.wide[0])
x2 = {
"left":self.set_horizontal(com[1][0],self.wide[0]+x2_mod),
"right":self.set_horizontal(com[1][0],self.wide[1]-x2_mod),
"midle":self.set_horizontal(com[1][0],int(sum(self.wide)/2)),
"p_left":self.set_horizontal(com[1][0],self.wide[0]-2*x2_mod),
"xp_left": self.set_horizontal(com[1][0], self.wide[0] - 4*x2_mod),
"p_right": self.set_horizontal(com[1][0], self.wide[1] + 2*x2_mod),
"xp_right": self.set_horizontal(com[1][0], self.wide[1] + 4*x2_mod),
"average": self.set_horizontal((com[1][0]+com[0][0])/2, int(sum(self.wide)/2))
}[a["S5"]]
x3 = {
"left":None,
"right":None,
"midle":None,
"p_left":self.set_horizontal(com[1][0],self.wide[0]),
"xp_left": self.set_horizontal(com[1][0], self.wide[0]),
"p_right": self.set_horizontal(com[1][0], self.wide[1]),
"xp_right": self.set_horizontal(com[1][0], self.wide[1]),
"average": self.set_horizontal(com[1][0],self.wide[1] if a["S4"] == "reverse" else self.wide[0])
}[a["S5"]]
x4 = {
"left":None,
"right":None,
"midle":None,
"p_left":x2+excess_x if a["S4"] == "reverse" else x2-excess_x,
"xp_left": x2+excess_x if a["S4"] == "reverse" else x2-excess_x,
"p_right":x2-excess_x if a["S4"] == "reverse" else x2+excess_x,
"xp_right": x2-excess_x if a["S4"] == "reverse" else x2+excess_x,
"average": x2-excess_x if a["S4"] == "reverse" else x2-excess_x
}[a["S5"]]
b = [
' <line x1="{}" y1="{}" x2="{}" y2="{}" stroke="{}" stroke-width="{}" {}/>', # HORIZONTAL
' <line x1="{}" y1="{}" x2="{}" y2="{}" stroke="{}" stroke-width="{}" />', # VERTICAL
' <polygon points="{},{} {},{} {},{}" style="fill:{};stroke:{};stroke-width:1"/>',# TOP Y ARROW
' <polygon points="{},{} {},{} {},{}" style="fill:{};stroke:{};stroke-width:1"/>',# BOTTOM Y ARROW
' <text x="{}" y="{}" font-size="16" {} text-anchor="middle" fill="{}">{}</text>',#Y label
' <line x1="{}" y1="{}" x2="{}" y2="{}" stroke="{}" stroke-width="{}" {}/>' # HORIZONTAL SHORT
]
b[0] = b[0].format(x1,y1,x2+excess_x,y1,color,width,dashed)
b[1] = b[1].format(x2,y1+excess_y,x2,y2-excess_y,color,width)
b[2] = b[2].format(x2,y1+excess_yb,x2-4,y1+excess_y,x2+4,y1+excess_y,color,color)
b[3] = b[3].format(x2,y2-excess_yb,x2-4,y2-excess_y,x2+4,y2-excess_y,color,color)
b[4] = b[4].format(x2+text_pos[0],int((y2+y1)/2),f'transform="rotate({text_pos[1]},{x2+text_pos[0]},{int((y2+y1)/2)})"',color,self.char_care(label))
b[5] = b[5].format(x4, | |
None, lambda self: None
) # default
"""Indicates whether the event is being cancelled.
When the event is cancellable, set the property to True to cancel it.
Get: Cancel(self: RevitEventArgs) -> bool
Set: Cancel(self: RevitEventArgs) = value
"""
Cancellable = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Indicates whether an event may be cancelled by an event delegate.
Get: Cancellable(self: RevitEventArgs) -> bool
"""
class PostEventArgs(RevitEventArgs):
""" The class is used as a base class for arguments of any post-event. """
Status = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Indicates whether the action associated with this event succeeded, failed, or was cancelled (by an APIevent handler).
Get: Status(self: PostEventArgs) -> EventStatus
"""
class PostDocEventArgs(PostEventArgs):
""" The class is used as base class for arguments of any post-event that is associated to a particular Document. """
Document = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""The document associated with the event.
Get: Document(self: PostDocEventArgs) -> Document
"""
class PreEventArgs(RevitEventArgs):
""" The class is used as a base class for the arguments for any pre-event. """
class PreDocEventArgs(PreEventArgs):
""" The class is used as base class for the arguments of any pre-event arguments that is associated to a particular Document. """
Document = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""The document associated with the event.
Get: Document(self: PreDocEventArgs) -> Document
"""
class ProgressChangedEventArgs(RevitAPISingleEventArgs, IDisposable):
""" The event arguments used by the ProgressChanged event. """
def Cancel(self):
"""
Cancel(self: ProgressChangedEventArgs)
Requests to cancel the progress bar's operation.
"""
pass
def Dispose(self):
""" Dispose(self: RevitAPIEventArgs, A_0: bool) """
pass
def ReleaseUnmanagedResources(self, *args): # cannot find CLR method
""" ReleaseUnmanagedResources(self: RevitAPIEventArgs, disposing: bool) """
pass
def __enter__(self, *args): # cannot find CLR method
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args): # cannot find CLR method
""" __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
Caption = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""The text from the progress bar caption that describes the operation in progress
Get: Caption(self: ProgressChangedEventArgs) -> str
"""
LowerRange = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Lower part of progress bar range - always zero
Get: LowerRange(self: ProgressChangedEventArgs) -> int
"""
Position = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Progress bar position - value is always between zero and upperRange and is incremented by one with each event of stage "PositionChanged"
Get: Position(self: ProgressChangedEventArgs) -> int
"""
Stage = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""The current stage of the progress bar
Get: Stage(self: ProgressChangedEventArgs) -> ProgressStage
"""
UpperRange = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Upper part of progress bar range - will be any non-zero number
Get: UpperRange(self: ProgressChangedEventArgs) -> int
"""
class ProgressStage(Enum, IComparable, IFormattable, IConvertible):
"""
The associated action of a ProgressChanged event
enum ProgressStage, values: CaptionChanged (3), Finished (5), PositionChanged (2), RangeChanged (1), Started (0), Unchanged (4)
"""
def __eq__(self, *args): # cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): # cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): # cannot find CLR method
pass
def __gt__(self, *args): # cannot find CLR method
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): # cannot find CLR method
pass
def __lt__(self, *args): # cannot find CLR method
pass
def __ne__(self, *args): # cannot find CLR method
pass
def __reduce_ex__(self, *args): # cannot find CLR method
pass
def __str__(self, *args): # cannot find CLR method
pass
CaptionChanged = None
Finished = None
PositionChanged = None
RangeChanged = None
Started = None
Unchanged = None
value__ = None
class RevitAPIEventStatus(Enum, IComparable, IFormattable, IConvertible):
"""
Describes the status of an action which triggered a post event.
enum RevitAPIEventStatus, values: Cancelled (1), Failed (-1), Succeeded (0)
"""
def __eq__(self, *args): # cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): # cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): # cannot find CLR method
pass
def __gt__(self, *args): # cannot find CLR method
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): # cannot find CLR method
pass
def __lt__(self, *args): # cannot find CLR method
pass
def __ne__(self, *args): # cannot find CLR method
pass
def __reduce_ex__(self, *args): # cannot find CLR method
pass
def __str__(self, *args): # cannot find CLR method
pass
Cancelled = None
Failed = None
Succeeded = None
value__ = None
class UndoOperation(Enum, IComparable, IFormattable, IConvertible):
"""
The operation associated with DocumentChanged event
enum UndoOperation, values: TransactionCommitted (0), TransactionGroupRolledBack (2), TransactionRedone (4), TransactionRolledBack (1), TransactionUndone (3)
"""
def __eq__(self, *args): # cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): # cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): # cannot find CLR method
pass
def __gt__(self, *args): # cannot find CLR method
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): # cannot find CLR method
pass
def __lt__(self, *args): # cannot find CLR method
pass
def __ne__(self, *args): # cannot find CLR method
pass
def __reduce_ex__(self, *args): # cannot find CLR method
pass
def __str__(self, *args): # cannot find CLR method
pass
TransactionCommitted = None
TransactionGroupRolledBack = None
TransactionRedone = None
TransactionRolledBack = None
TransactionUndone = None
value__ = None
class ViewPrintedEventArgs(RevitAPIPostDocEventArgs, IDisposable):
""" The event arguments used by the ViewPrinted event. """
def Dispose(self):
""" Dispose(self: RevitAPIEventArgs, A_0: bool) """
pass
def ReleaseUnmanagedResources(self, *args): # cannot find CLR method
""" ReleaseUnmanagedResources(self: RevitAPIEventArgs, disposing: bool) """
pass
def __enter__(self, *args): # cannot find CLR method
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args): # cannot find CLR method
""" __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
Index = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""The index of the view being printed out of the set of all views being printed.
Get: Index(self: ViewPrintedEventArgs) -> int
"""
TotalViews = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""The number of all views being printed.
Get: TotalViews(self: ViewPrintedEventArgs) -> int
"""
View = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""The view that was printed.
Get: View(self: ViewPrintedEventArgs) -> View
"""
class ViewPrintingEventArgs(RevitAPIPreDocEventArgs, IDisposable):
""" The event arguments used by the ViewPrinting event. """
def Dispose(self):
""" Dispose(self: RevitAPIEventArgs, A_0: bool) """
pass
def GetSettings(self):
"""
GetSettings(self: ViewPrintingEventArgs) -> IPrintSetting
Get the print settings of the active printing session.
"""
pass
def ReleaseUnmanagedResources(self, *args): # cannot find CLR method
""" ReleaseUnmanagedResources(self: RevitAPIEventArgs, disposing: bool) """
pass
def __enter__(self, *args): # cannot find CLR method
""" | |
<filename>tiktalik_cli/command/instance.py
"""Module tiktalik_cli.command.instance"""
# Copyright (c) 2013 Techstorage sp. z o.o.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from tiktalik.computing.objects import Instance
from tiktalik.error import TiktalikAPIError
from .command import ComputingCommand, CommandError
from . import util
class ListInstances(ComputingCommand):
@classmethod
def add_parser(cls, parser, subparser):
p = subparser.add_parser("list", description="List instances", parents=[parser])
p.add_argument(
"-a",
action="store_true",
dest="actions",
help="Fetch recent actions for each instance",
)
p.add_argument(
"-c",
action="store_true",
dest="cost",
help="Fetch current hourly cost for each instance",
)
p.add_argument(
"-i",
action="store_true",
dest="vpsimage",
help="Fetch VPS Image details for each instance",
)
p.add_argument(
"-v",
dest="verbose",
action="store_true",
help="Print extra information (flags -a, -i, -t yield more details)",
)
return "list"
def execute(self):
instances = Instance.list_all(
self.conn,
actions=self.args.actions,
cost=self.args.cost,
vpsimage=self.args.vpsimage,
)
for instance in sorted(instances, key=lambda x: x.hostname):
if not self.args.verbose:
print(
(
"%s (%s) - %s"
% (
instance.hostname,
instance.uuid,
"Running" if instance.running else "Not running",
)
)
)
else:
util.print_instance(instance, verbose=bool(self.args.verbose))
class CreateInstance(ComputingCommand):
@classmethod
def add_parser(cls, parent, subparser):
p = subparser.add_parser(
"create-instance", description="Create a new instance.", parents=[parent]
)
p.add_argument("image_uuid", help="UUID of a VPS Image that should be used.")
p.add_argument(
"size",
help='Instance size (in units). Allowed values: 0.25, 0.5, 1 - 15, "cpuhog", "cpuhog4", "1s", "2s", "4s", "8s" and "16s".',
)
p.add_argument("hostname", help="Hostname set at installation time.")
p.add_argument(
"-n",
dest="networks",
metavar="NETWORK_NAME",
action="append",
help="Attach these networks to the new instance. Use the list-networks command to list available networks.",
)
p.add_argument(
"-b",
dest="batch_mode",
action="store_true",
help="Batch mode. Don't confirm the operation.",
)
p.add_argument(
"-d",
dest="disk_size_gb",
action="store",
type=int,
help="For standard instances must set disk size in GB.",
)
return "create-instance"
def execute(self):
if not self.args.networks:
raise CommandError("You must specify at least one network")
networks = self.conn.list_networks()
# Mapping of network names->uuids
networks = dict((net.name, net.uuid) for net in networks)
# Make sure all networks exist
diff = set(self.args.networks) - set(networks.keys())
if diff:
raise CommandError(
"Some of the networks you specified don't exist: %s" % ", ".join(diff)
)
# List of network uuids instead of names
networks = [networks[n] for n in self.args.networks]
# Validate image existence
try:
self.conn.get_image(self.args.image_uuid)
except TiktalikAPIError as ex:
if ex.http_status == 404:
raise CommandError("Image %s not found" % self.args.image_uuid)
size = self._parse_instance_size(self.args.size)
# For standard instances - must be set disk_size param
disk_size_gb = None
if size.endswith("s"):
if not self.args.disk_size_gb:
raise CommandError("Disk size not set, see -d param.")
disk_size_gb = self.args.disk_size_gb
if not self.args.batch_mode:
self.yesno(
"Creating new instance with these parameters:\n"
"Image UUID: %s\nSize: %s\nHostname: %s\nNetworks: %s\n"
"Is this OK?"
% (
self.args.image_uuid,
size,
self.args.hostname,
", ".join(self.args.networks),
)
)
response = self.conn.create_instance(
self.args.hostname,
size,
self.args.image_uuid,
networks,
disk_size_gb=disk_size_gb,
)
print(("Instance", self.args.hostname, "is now being installed."))
def _parse_instance_size(self, sz):
"""
Parse instance size passed as string and validate it.
Valid values are: 0.25, 0.5, integers 1-15, "cpuhog", "cpuhog4", "1s", "2s", "4s", "8s" and "16s".
Raise CommandError if `sz` is not a valid size.
"""
if sz in ("cpuhog", "cpuhog4", "1s", "2s", "4s", "8s", "16s"):
return sz
try:
sz = float(sz)
if sz < 1:
if sz <= 0 or sz not in (0.25, 0.5):
sz = None
else:
sz = int(sz)
if sz > 15:
sz = None
except ValueError:
sz = None
if sz is None:
raise CommandError(
'Instance size must be one of 0.25, 0.5, integral value 1-15, "1s", "2s", "4s", "8s", "16s", "cpuhog" or "cpuhog4".'
)
return str(sz)
class InstanceCommand(ComputingCommand):
"""
Base class for commands that operate on existing instances, eg. start, stop, info.
"""
@classmethod
def add_common_arguments(cls, parser):
group = parser.add_mutually_exclusive_group()
group.add_argument(
"instance",
nargs="?",
default="",
help="Instance hostname prefix, or instance UUID prefix.",
)
group.add_argument(
"-n",
dest="hostname",
help="Instance full hostname. Use -u or `instance' argument if the hostname is not unique.",
)
group.add_argument(
"-u",
dest="uuid",
help="Instance full UUID. Can be specified instead of name (-n) or `instance' argument.",
)
def _instance_from_args(self, actions=False, vpsimage=False, cost=False):
if self.args.hostname and self.args.uuid:
raise CommandError(
"Both hostname and UUID can't be specified. Decide on one!"
)
if not self.args.hostname and not self.args.uuid and not self.args.instance:
raise CommandError(
"Either `instance' argument, hostname (-n) or UUID (-u) must be provided."
)
try:
# select by hostname/uuid prefix
if self.args.instance:
instances = []
for i in Instance.list_all(self.conn):
if i.hostname.startswith(self.args.instance):
instances.append(i)
elif i.uuid.startswith(self.args.instance):
instances.append(i)
if not instances:
raise CommandError("There is no such instance.")
if len(instances) > 1:
print("Matched more than one instance:")
for i in instances:
print((" - %s (%s)" % (i.hostname, i.uuid)))
raise CommandError("Select exactly one instance.")
instance = Instance.get_by_uuid(
self.conn, instances[0].uuid, actions, vpsimage, cost
)
# select by full hostname only:
elif self.args.hostname:
instances = Instance.get_by_hostname(
self.conn, self.args.hostname, actions, vpsimage, cost
)
if len(instances) > 1:
msg = ", ".join(i.uuid for i in instances)
raise CommandError(
"There are %s instances matching hostname %s: %s"
% (len(instances), self.args.hostname, msg)
)
instance = instances[0]
# select by full uuid only:
else:
instance = Instance.get_by_uuid(
self.conn, self.args.uuid, actions, vpsimage, cost
)
return instance
except TiktalikAPIError as ex:
if ex.http_status == 404:
raise CommandError("No such instance.")
raise
class StartInstance(InstanceCommand):
@classmethod
def add_parser(cls, parent, subparser):
p = subparser.add_parser(
"start",
description="Start an instance. Either name or UUID must be specified.",
parents=[parent],
)
InstanceCommand.add_common_arguments(p)
return "start"
def execute(self):
instance = self._instance_from_args()
instance.start()
print(
(
"Instance %s (%s) is now being started"
% (instance.hostname, instance.uuid)
)
)
class StopInstance(InstanceCommand):
@classmethod
def add_parser(cls, parent, subparser):
p = subparser.add_parser(
"stop",
description="Stop an instance. Send ACPI Shutdown signal (default), or stop forcefully (see -f argument)",
parents=[parent],
)
InstanceCommand.add_common_arguments(p)
p.add_argument(
"-f",
dest="force",
action="store_true",
help="Stop the instance forcefully. Do not send gently ACPI Shutdown signal, instead cut the power off.",
)
return "stop"
def execute(self):
instance = self._instance_from_args()
if self.args.force:
instance.force_stop()
else:
instance.stop()
print(
(
"Instance %s (%s) is now being stopped%s"
% (
instance.hostname,
instance.uuid,
" forcefuly" if self.args.force else "",
)
)
)
class BackupInstance(InstanceCommand):
@classmethod
def add_parser(cls, parent, subparser):
p = subparser.add_parser(
"backup",
description="Backup an instance. Either name or UUID must be specified.",
parents=[parent],
)
InstanceCommand.add_common_arguments(p)
p.add_argument(
"--set_name", type=str, help="Set name of your backup",
)
return "backup"
def execute(self):
instance = self._instance_from_args()
if instance.running:
raise CommandError(
"Instance is running. Please stop it before starting backup."
)
if self.args.set_name:
instance.backup(self.args.set_name)
else:
instance.backup()
print(
(
"Instance %s (%s) is now being backed up"
% (instance.hostname, instance.uuid)
)
)
class DeleteInstance(InstanceCommand):
@classmethod
def add_parser(cls, parent, subparser):
p = subparser.add_parser(
"delete-instance",
description="Permanently remove an instance. Either name or UUID must be specified.",
parents=[parent],
)
InstanceCommand.add_common_arguments(p)
return "delete-instance"
def execute(self):
instance = self._instance_from_args()
self.conn.delete_instance(instance.uuid)
print(
(
"Instance %s (%s) is now being removed"
% (instance.hostname, instance.uuid)
)
)
class InstanceInfo(InstanceCommand):
@classmethod
def add_parser(cls, parent, subparser):
p = subparser.add_parser(
"info", description="Display instance information.", parents=[parent]
)
p.add_argument(
"-a",
"--all",
dest="verbose",
action="store_true",
help="Print all extra information.",
)
InstanceCommand.add_common_arguments(p)
return "info"
def execute(self):
if not self.args.verbose:
instance = self._instance_from_args()
else:
instance = self._instance_from_args(actions=True, vpsimage=True, cost=True)
instance.load_block_devices()
util.print_instance(instance, self.args.verbose)
class AddInterface(InstanceCommand):
@classmethod
def add_parser(cls, parent, subparser):
p = subparser.add_parser(
"add-interface",
description="Add a new interface to an instance.",
parents=[parent],
)
p.add_argument(
"network_uuid",
help="UUID of a Network that should be attached to this interface",
)
p.add_argument("seq", help="Seq number of the interface, eg. 2 maps to eth2")
InstanceCommand.add_common_arguments(p)
return "add-interface"
def execute(self):
instance = self._instance_from_args()
instance.add_interface(self.args.network_uuid, self.args.seq)
class RemoveInterface(InstanceCommand):
@classmethod
| |
# -*- coding: utf-8 -*-
"""
Redis 操作类
Redis是完全开源免费的,遵守BSD协议,是一个高性能的key-value数据库。
Redis支持数据的持久化。
Redis支持简单的key-value类型,还提供list,set,zset,hash等数据结构的存储。
Redis支持数据的备份,即master-slave模式的数据备份。
Redis读的速度是110000次/s,写的速度是81000次/s
Redis的所有操作都是原子性的,同时Redis还支持对几个操作全并后的原子性执行。
"""
import functools
from redis import StrictRedis
def wraps_set_expire(func):
"""
装饰器, 设置key默认过期时间
"""
@functools.wraps(func)
def wrapper_func(self, keyname, *args, **kwargs):
ret_func = func(self, keyname, *args, **kwargs)
# 设置key的过期时间
if ret_func is not None:
self.set_expire(keyname)
return ret_func
return wrapper_func
class LibRedis:
# 默认所有key的前缀
key_prefix = 'RAWE_'
# redis 连接对象
obj_redis = None
# 默认的过期时间为3天
DEFAULT_EXPIRE = 259200
def __init__(self, host='127.0.0.1', port=6379, db=0, prefix=None, charset='utf-8'):
"""
初始化
"""
if not host or not port:
return None
if prefix:
self.key_prefix = prefix.strip()
# construct
self.obj_redis = StrictRedis(
host=host, port=port, db=db, charset='utf-8')
def key_make(self, keyname=None):
"""
处理所有key,增加前缀
如果实例化时没有设置,则使用默认前缀
"""
if not keyname:
return None
return self.key_prefix + str(keyname).strip()
def set_expire(self, keyname=None):
"""
设置key的过期时间,装饰器调用
"""
if not keyname:
return None
return self.obj_redis.expire(self.key_make(keyname), self.DEFAULT_EXPIRE)
# --------------------------------------------------------
# String
# --------------------------------------------------------
@wraps_set_expire
def set(self, keyname=None, value=None):
"""
设置指定 key 的值。
如果 key 已经存储其他值, SET 就覆写旧值,且无视类型。
return:
设置操作成功完成时,才返回 OK
"""
if not keyname or value is None:
return None
keyname = self.key_make(keyname.strip())
if isinstance(value, str):
value = value.strip()
return self.obj_redis.set(keyname, value)
def get(self, keyname=None):
"""
获取指定 key 的值。
return:
key 的值
如果 key 不存在,返回 nil。
如果key 储存的值不是字符串类型,返回一个错误。
"""
if not keyname:
return None
keyname = self.key_make(keyname.strip())
result = self.obj_redis.get(keyname)
return None if not result else bytes.decode(result)
def delete(self, keyname=None):
"""
删除已存在的键。不存在的 key 会被忽略
return:
被删除 key 的数量
"""
if not keyname:
return None
keyname = self.key_make(keyname.strip())
return self.obj_redis.delete(keyname)
@wraps_set_expire
def append(self, keyname=None, value=None):
"""
为指定的 keyname 追加值
如果 keyname 已经存在并且是一个字符串,
APPEND 命令将 value 追加到 keyname 原来的值的末尾。
如果 keyname 不存在,
APPEND 就简单地将给定 keyname 设为 value ,就像执行 SET keyname value 一样
return:
追加指定值之后, keyname 中字符串的长度
"""
if not keyname or value is None:
return None
keyname = self.key_make(keyname.strip())
if isinstance(value, str):
value = value.strip()
else:
value = str(value)
return self.obj_redis.append(keyname, value)
@wraps_set_expire
def incr(self, keyname=None, expire=None):
"""
将 keyname 中储存的数字值增一。
如果 keyname 不存在,那么 key 的值会先被初始化为 0 ,然后再执行 INCR 操作。
如果值包含错误的类型,或字符串类型的值不能表示为数字,那么返回一个错误。
本操作的值限制在 64 位(bit)有符号数字表示之内。
return:
执行 INCR 命令之后 key 的值
"""
if not keyname:
return None
keyname = self.key_make(keyname.strip())
return self.obj_redis.incr(keyname, 1)
@wraps_set_expire
def incrBy(self, keyname=None, amount=1):
"""
将 keyname 中储存的数字加上指定的增量值。
如果 keyname 不存在,那么 key 的值会先被初始化为 0 ,然后再执行 INCRBY 命令
如果值包含错误的类型,或字符串类型的值不能表示为数字,那么返回一个错误。
本操作的值限制在 64 位(bit)有符号数字表示之内。
return:
加上指定的增量值之后, key 的值
"""
if not keyname or not amount:
return None
keyname = self.key_make(keyname.strip())
if isinstance(amount, int):
amount = max(0, amount)
else:
amount = 1
return self.obj_redis.incrby(keyname, amount)
@wraps_set_expire
def decr(self, keyname=None):
"""
将 key 中储存的数字值减一。
如果 key 不存在,那么 key 的值会先被初始化为 0 ,然后再执行 DECR 操作。
如果值包含错误的类型,或字符串类型的值不能表示为数字,那么返回一个错误。
本操作的值限制在 64 位(bit)有符号数字表示之内。
return:
执行命令之后 key 的值
"""
if not keyname:
return None
keyname = self.key_make(keyname.strip())
return self.obj_redis.decr(keyname, 1)
@wraps_set_expire
def decrBy(self, keyname=None, amount=1):
"""
将 keyname 所储存的值减去指定的减量值。
如果 keyname 不存在,那么 key 的值会先被初始化为 0 ,然后再执行 DECRBY 操作。
如果值包含错误的类型,或字符串类型的值不能表示为数字,那么返回一个错误。
本操作的值限制在 64 位(bit)有符号数字表示之内
"""
if not keyname or not amount:
return None
keyname = self.key_make(keyname.strip())
amount = int(amount)
return self.obj_redis.decr(keyname, amount)
# --------------------------------------------------------
# Hash 哈希
# 一个string类型的field和value的映射表,hash特别适合用于存储对象
# 每个 hash 可以存储 232 - 1 键值对(40多亿)
# --------------------------------------------------------
@wraps_set_expire
def hSet(self, keyname=None, key=None, value=None):
"""
从哈希名为keyname中添加key1->value1 将哈希表key中的域field的值设为value。-ok -ok
如果key不存在,一个新的哈希表被创建并进行hset操作。
如果域field已经存在于哈希表中,旧值将被覆盖。
错误则 返回 FALSE
如果字段是哈希表中的一个新建字段,并且值设置成功,返回 1 。
如果哈希表中域字段已经存在且旧值已被新值覆盖,返回 0 。
"""
if not keyname or not key or value is None:
return None
keyname = self.key_make(keyname.strip())
key = key.strip()
return self.obj_redis.hset(keyname, key, value)
@wraps_set_expire
def hGet(self, keyname=None, key=None):
"""
获取存储在哈希表中指定字段的值
返回给定字段的值。如果给定的字段或 key 不存在时,返回 None
"""
if not keyname or not key:
return None
keyname = self.key_make(keyname.strip())
key = key.strip()
result = self.obj_redis.hget(keyname, key)
if not result:
return None
# bytes to str
return bytes.decode(result)
@wraps_set_expire
def hLen(self, keyname=None):
"""
获取哈希表中字段的数量
哈希表中字段的数量。 当 keyname 不存在时,返回 0
"""
if not keyname:
return None
keyname = self.key_make(keyname.strip())
return self.obj_redis.hlen(keyname)
@wraps_set_expire
def hKeys(self, keyname=None):
"""
获取哈希表中的所有域(field)
包含哈希表中所有域(field)列表。
当 key 不存在时,返回一个空列表
"""
if not keyname:
return None
keyname = self.key_make(keyname.strip())
result = self.obj_redis.hkeys(keyname)
if not result:
return None
# bytes to str
ret_list = list()
for v in result:
ret_list.append(bytes.decode(v))
return ret_list
@wraps_set_expire
def hVals(self, keyname=None):
"""
哈希表所有域(field)的值
包含哈希表中所有域(field)值的列表。
当 key 不存在时,返回一个空表
"""
if not keyname:
return None
keyname = self.key_make(keyname.strip())
result = self.obj_redis.hvals(keyname)
if not result:
return None
# bytes to str
ret_list = list()
for v in result:
ret_list.append(bytes.decode(v))
return ret_list
@wraps_set_expire
def hGetAll(self, keyname=None):
"""
获取在哈希表中指定 keyname 的所有字段和值
返回哈希表中,所有的字段和值
在返回值里,紧跟每个字段名(field name)之后是字段的值(value),
所以返回值的长度是哈希表大小的两倍。
"""
if not keyname:
return None
keyname = self.key_make(keyname.strip())
result = self.obj_redis.hgetall(keyname)
if not result:
return None
# bytes to str
ret_dict = dict()
for k, v in result.items():
ret_dict[bytes.decode(k)] = bytes.decode(v)
return ret_dict
def hExists(self, keyname=None, key=None):
"""
查看哈希表 keyname 中,是否存在键名为key的字段
ashname含有给定字段key,返回 True。
keyname不存在 或 key 不存在,返回 False
"""
if not keyname or key is None:
return None
keyname = self.key_make(keyname.strip())
return self.obj_redis.hexists(keyname, key)
def hDel(self, keyname=None, *keys):
"""
删除哈希表 key 中的一个或多个指定字段,不存在的字段将被忽略
返回值
被成功删除字段的数量,不包括被忽略的字段
keyname 或 key 不存在则返回 0
"""
if not keyname:
return None
keyname = self.key_make(keyname.strip())
return self.obj_redis.hdel(keyname, *keys)
# --------------------------------------------------------
# List 列表, 左(Left)为头部,右(Right)为尾部
# 一个列表最多可以包含 232 - 1 个元素 (4294967295, 每个列表超过40亿个元素)
# --------------------------------------------------------
@wraps_set_expire
def lPush(self, keyname=None, *values):
"""
将一个或多个值插入到列表头部, 返回操作后列表的长度。
如果 key 不存在,一个空列表会被创建并执行 LPUSH 操作。
当 key 存在但不是列表类型时,返回一个错误。
注意:在Redis 2.4版本以前的 LPUSH 命令,都只接受单个 value 值
"""
if not keyname:
return None
keyname = self.key_make(keyname.strip())
return self.obj_redis.lpush(keyname, *values)
@wraps_set_expire
def lPop(self, keyname=None):
"""
弹出队列头部元素,移除并返回列表的第一个元素。
返回列表的第一个元素。 当列表 key 不存在时,返回 None
"""
if not keyname:
return None
keyname = self.key_make(keyname.strip())
return self.obj_redis.lpop(keyname)
@wraps_set_expire
def rPush(self, keyname=None, *values):
"""
将一个或多个值插入到列表的尾部(最右边), 返回操作后列表的长度。
如果列表不存在,一个空列表会被创建并执行 RPUSH 操作。
当列表存在但不是列表类型时,返回一个错误。
注意:在 Redis 2.4 版本以前的 RPUSH 命令,都只接受单个 value 值。
"""
if not keyname:
return None
keyname = self.key_make(keyname.strip())
return self.obj_redis.rpush(keyname, *values)
@wraps_set_expire
def rPop(self, keyname=None):
"""
移除并获取列表最后一个元素
返回列表的最后一个元素。 当列表不存在时,返回 None
"""
if not keyname:
return None
keyname = self.key_make(keyname.strip())
result = self.obj_redis.rpop(keyname)
if not result:
return None
# bytes to str
return bytes.decode(result)
@wraps_set_expire
def lLen(self, keyname=None):
"""
获取列表长度
如果列表 key 不存在,则 key 被解释为一个空列表,返回 0
如果 key 不是列表类型,返回一个错误
"""
if not keyname:
return None
keyname = self.key_make(keyname.strip())
return self.obj_redis.llen(keyname)
@wraps_set_expire
def lTrim(self, keyname=None, start=0, end=-1):
"""
让列表只保留指定区间内的元素,不在指定区间之内的元素都将被删除
下标 0 表示列表的第一个元素,1 表示列表的第二个元素
-1 表示列表的最后一个元素,-2 表示列表的倒数第二个元素
返回 True
"""
if not keyname:
return None
keyname = self.key_make(keyname.strip())
return self.obj_redis.ltrim(keyname, start, end)
@wraps_set_expire
def lGetRange(self, keyname=None, start=0, end=-1):
"""
返回列表中指定区间内的元素,区间以偏移量 START 和 END 指定
下标 0 表示列表的第一个元素,以 1 表示列表的第二个元素
-1 表示列表的最后一个元素, -2 表示列表的倒数第二个元素
返回一个列表,包含指定区间内的元素
"""
if not keyname:
return None
keyname = self.key_make(keyname.strip())
result = self.obj_redis.lrange(keyname, start, end)
if not result:
return None
# bytes to str
ret_list = list()
for v in result:
ret_list.append(bytes.decode(v))
return ret_list
@wraps_set_expire
def lRemove(self, keyname=None, value=None, count=1):
"""
根据参数 COUNT 的值,移除列表中与参数 VALUE 相等的元素。
COUNT 的值可以是以下几种:
count > 0 : 从表头开始向表尾搜索,移除与 VALUE 相等的元素,数量为 COUNT 。
count < 0 : 从表尾开始向表头搜索,移除与 VALUE 相等的元素,数量为 COUNT 的绝对值。
count = 0 : 移除表中所有与 VALUE 相等的值。
返回被移除元素的数量。 列表或元素不存在时返回 0
"""
if not keyname or value is None:
return None
keyname = self.key_make(keyname.strip())
return self.obj_redis.lrem(keyname, count, value)
# --------------------------------------------------------
# Set 无序集合
# Set 是 String 类型的无序集合。集合成员是唯一的。
# 集合是通过哈希表实现的,所以添加,删除,查找的复杂度都是 O(1)
# 集合中最大的成员数为 232 - 1 (4294967295, 每个集合可存储40多亿个成员)
# --------------------------------------------------------
@wraps_set_expire
def sAdd(self, keyname=None, *values):
"""
将一个或多个成员元素加入到集合中,已经存在于集合的成员元素将被忽略。
假如集合 key 不存在,则创建一个只包含添加的元素作成员的集合。
当集合 key 不是集合类型时,返回一个错误。
注意:在Redis2.4版本以前, SADD 只接受单个成员值。
"""
if not keyname:
return None
keyname = self.key_make(keyname.strip())
return self.obj_redis.sadd(keyname, *values)
@wraps_set_expire
def sCard(self, keyname=None):
"""
获取集合key中元素的数量
集合的数量。 当集合 key 不存在时,返回 0
"""
if not keyname:
return None
keyname = self.key_make(keyname.strip())
return self.obj_redis.scard(keyname)
def sDiff(self, keyname=None, *keys):
"""
差集
返回所给key列表想减后的集合,相当于求差集
不存在的集合 key 将视为空集。
请注意顺序是前面的集合,减去后面的集合,求差集
返回包含差集成员的列表
"""
if not keyname:
return None
other_keys = list()
for k in keys:
other_keys.append(self.key_make(k))
result = self.obj_redis.sdiff(keyname, *other_keys)
if not result:
return None
# bytes to str
ret_set = set()
for v in result:
ret_set.add(bytes.decode(v))
return ret_set
@wraps_set_expire
def sDiffStore(self, store_key=None, key=None, *keys):
"""
差集并存储
给定所有集合的差集并存储在 store_key 中
将给定集合之间的差集存储在指定的集合中。
如果指定的集合 key 已存在,则会被覆盖
返回store_key结果集中的元素数量
"""
if not store_key or not key:
return None
store_key = self.key_make(store_key.strip())
key = self.key_make(key.strip())
other_keys = list()
for k in keys:
other_keys.append(self.key_make(k))
return self.obj_redis.sdiffstore(store_key, key, *other_keys)
def sInter(self, keyname=None, *keys):
"""
交集
返回给定所有给定集合的交集。 不存在的集合 key 被视为空集。
当给定集合当中有一个空集或key不存在时,结果也为空集(根据集合运算定律)。
"""
if not keyname:
return None
keyname = self.key_make(keyname.strip())
other_keys = list()
for k in keys:
other_keys.append(self.key_make(k))
result = self.obj_redis.sinter(keyname, *other_keys)
if not result:
return None
# | |
<reponame>picsag/NLP
#!/usr/bin/env python
# coding: utf-8
# # Assignment 3: Question Answering
#
# Welcome to this week's assignment of course 4. In this you will explore question answering. You will implement the "Text to Text Transfer from Transformers" (better known as T5). Since you implemented transformers from scratch last week you will now be able to use them.
#
# <img src = "images/qa.png">
#
# ## Important Note on Submission to the AutoGrader
#
# Before submitting your assignment to the AutoGrader, please make sure you are not doing the following:
#
# 1. You have not added any _extra_ `print` statement(s) in the assignment.
# 2. You have not added any _extra_ code cell(s) in the assignment.
# 3. You have not changed any of the function parameters.
# 4. You are not using any global variables inside your graded exercises. Unless specifically instructed to do so, please refrain from it and use the local variables instead.
# 5. You are not changing the assignment code where it is not required, like creating _extra_ variables.
#
# If you do any of the following, you will get something like, `Grader not found` (or similarly unexpected) error upon submitting your assignment. Before asking for help/debugging the errors in your assignment, check for these first. If this is the case, and you don't remember the changes you have made, you can get a fresh copy of the assignment by following these [instructions](https://www.coursera.org/learn/attention-models-in-nlp/supplement/shBOS/how-to-refresh-your-workspace).
# ## Outline
#
# - [Overview](#0)
# - [Part 0: Importing the Packages](#0)
# - [Part 1: C4 Dataset](#1)
# - [1.1 Pre-Training Objective](#1.1)
# - [1.2 Process C4](#1.2)
# - [1.2.1 Decode to natural language](#1.2.1)
# - [1.3 Tokenizing and Masking](#1.3)
# - [Exercise 01](#ex01)
# - [1.4 Creating the Pairs](#1.4)
# - [Part 2: Transfomer](#2)
# - [2.1 Transformer Encoder](#2.1)
# - [2.1.1 The Feedforward Block](#2.1.1)
# - [Exercise 02](#ex02)
# - [2.1.2 The Encoder Block](#2.1.2)
# - [Exercise 03](#ex03)
# - [2.1.3 The Transformer Encoder](#2.1.3)
# - [Exercise 04](#ex04)
# <a name='0'></a>
# ### Overview
#
# This assignment will be different from the two previous ones. Due to memory and time constraints of this environment you will not be able to train a model and use it for inference. Instead you will create the necessary building blocks for the transformer encoder model and will use a pretrained version of the same model in two ungraded labs after this assignment.
#
# After completing these 3 (1 graded and 2 ungraded) labs you will:
# * Implement the code neccesary for Bidirectional Encoder Representation from Transformer (BERT).
# * Understand how the C4 dataset is structured.
# * Use a pretrained model for inference.
# * Understand how the "Text to Text Transfer from Transformers" or T5 model works.
# <a name='0'></a>
# # Part 0: Importing the Packages
# In[1]:
import ast
import pprint
import string
import textwrap
import itertools
import numpy as np
import w3_tests
import trax
from trax import layers as tl
import trax.supervised.trainer_lib
import trax.models
import trax.optimizers
from trax.supervised import decoding
# Will come handy later.
wrapper = textwrap.TextWrapper(width=70)
# Set random seed
np.random.seed(42)
# <a name='1'></a>
# ## Part 1: C4 Dataset
#
# The [C4](https://www.tensorflow.org/datasets/catalog/c4) is a huge data set. For the purpose of this assignment you will use a few examples out of it which are present in `data.txt`. C4 is based on the [common crawl](https://commoncrawl.org/) project. Feel free to read more on their website.
#
# Run the cell below to see how the examples look like.
# In[2]:
# load example jsons
example_jsons = list(map(ast.literal_eval, open('data/data.txt')))
# In[3]:
# Printing the examples to see how the data looks like
for i in range(5):
print(f'example number {i+1}: \n\n{example_jsons[i]} \n')
# Notice the `b` before each string? This means that this data comes as bytes rather than strings. Strings are actually lists of bytes so for the rest of the assignments the name `strings` will be used to describe the data.
#
# To check this run the following cell:
# In[4]:
type(example_jsons[0].get('text'))
# <a name='1.1'></a>
# ### 1.1 Pre-Training Objective
#
# **Note:** The word "mask" will be used throughout this assignment in context of hiding/removing word(s)
#
# You will be implementing the BERT loss as shown in the following image.
#
# <img src = "images/loss.png" width="600" height = "400">
#
# Assume you have the following text: <span style = "color:blue"> **Thank you <span style = "color:red">for inviting </span> me to your party <span style = "color:red">last</span> week** </span>
#
#
# Now as input you will mask the words in red in the text:
#
# <span style = "color:blue"> **Input:**</span> Thank you **X** me to your party **Y** week.
#
# <span style = "color:blue">**Output:**</span> The model should predict the words(s) for **X** and **Y**.
#
# **Z** is used to represent the end.
# <a name='1.2'></a>
# ### 1.2 Process C4
#
# C4 only has the plain string `text` field, so you will tokenize and have `inputs` and `targets` out of it for supervised learning. Given your inputs, the goal is to predict the targets during training.
#
# You will now take the `text` and convert it to `inputs` and `targets`.
# In[5]:
# Grab text field from dictionary
natural_language_texts = [example_json['text'] for example_json in example_jsons]
# In[6]:
# First text example
natural_language_texts[4]
# <a name='1.2.1'></a>
# #### 1.2.1 Decode to natural language
#
# The following functions will help you `detokenize` and`tokenize` the text data.
#
# The `sentencepiece` vocabulary was used to convert from text to ids. This vocabulary file is loaded and used in these helper functions.
#
# `natural_language_texts` has the text from the examples we gave you.
#
# Run the cells below to see what is going on.
# In[7]:
# Special tokens
PAD, EOS, UNK = 0, 1, 2
def detokenize(np_array):
return trax.data.detokenize(
np_array,
vocab_type='sentencepiece',
vocab_file='sentencepiece.model',
vocab_dir='./models')
def tokenize(s):
# The trax.data.tokenize function operates on streams,
# that's why we have to create 1-element stream with iter
# and later retrieve the result with next.
return next(trax.data.tokenize(
iter([s]),
vocab_type='sentencepiece',
vocab_file='sentencepiece.model',
vocab_dir='./models'))
# In[8]:
# printing the encoding of each word to see how subwords are tokenized
tokenized_text = [(tokenize(word).tolist(), word) for word in natural_language_texts[0].split()]
print(tokenized_text, '\n')
# In[9]:
# We can see that detokenize successfully undoes the tokenization
print(f"tokenized: {tokenize('Beginners')}\ndetokenized: {detokenize(tokenize('Beginners'))}")
# As you can see above, you were able to take a piece of string and tokenize it.
#
# Now you will create `input` and `target` pairs that will allow you to train your model. T5 uses the ids at the end of the vocab file as sentinels. For example, it will replace:
# - `vocab_size - 1` by `<Z>`
# - `vocab_size - 2` by `<Y>`
# - and so forth.
#
# It assigns every word a `chr`.
#
# The `pretty_decode` function below, which you will use in a bit, helps in handling the type when decoding. Take a look and try to understand what the function is doing.
#
#
# Notice that:
# ```python
# string.ascii_letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
# ```
#
# **NOTE:** Targets may have more than the 52 sentinels we replace, but this is just to give you an idea of things.
# In[10]:
vocab_size = trax.data.vocab_size(
vocab_type='sentencepiece',
vocab_file='sentencepiece.model',
vocab_dir='./models')
def get_sentinels(vocab_size=32000, display=False):
sentinels = {}
for i, char in enumerate(reversed(string.ascii_letters), 1):
decoded_text = detokenize([vocab_size - i])
# Sentinels, ex: <Z> - <a>
sentinels[decoded_text] = f'<{char}>'
if display:
print(f'The sentinel is <{char}> and the decoded token is:', decoded_text)
return sentinels
# In[11]:
sentinels = get_sentinels(vocab_size, display=True)
# In[12]:
def pretty_decode(encoded_str_list, sentinels):
# If already a string, just do the replacements.
if isinstance(encoded_str_list, (str, bytes)):
for token, char in sentinels.items():
encoded_str_list = encoded_str_list.replace(token, char)
return encoded_str_list
# We need to decode and then prettyfy it.
return pretty_decode(detokenize(encoded_str_list), sentinels)
# In[13]:
pretty_decode("I want to dress up as an Intellectual this halloween.", sentinels)
# The functions above make your `inputs` and `targets` more readable. For example, you might see something like this once you implement the masking function below.
#
# - <span style="color:red"> Input sentence: </span> Younes and Lukasz were working together in the lab yesterday after lunch.
# - <span style="color:red">Input: </span> <NAME> Lukasz **Z** together in the **Y** yesterday after lunch.
# - <span style="color:red">Target: </span> **Z** were working **Y** lab.
# | |
dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
result['cpf_list'] = []
if self.cpf_list is not None:
for k in self.cpf_list:
result['cpf_list'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
self.cpf_list = []
if m.get('cpf_list') is not None:
for k in m.get('cpf_list'):
temp_model = TdmCpfProvinceVO()
self.cpf_list.append(temp_model.from_map(k))
return self
class CreateCpfVerifyRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
user_id: str = None,
user_name: str = None,
meta_info: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 用户身份证ID(目前只支持身份证ID)
self.user_id = user_id
# 用户姓名
self.user_name = user_name
# 环境参数,需要通过客户端 SDK 获取
self.meta_info = meta_info
def validate(self):
self.validate_required(self.user_id, 'user_id')
self.validate_required(self.user_name, 'user_name')
self.validate_required(self.meta_info, 'meta_info')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.user_id is not None:
result['user_id'] = self.user_id
if self.user_name is not None:
result['user_name'] = self.user_name
if self.meta_info is not None:
result['meta_info'] = self.meta_info
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('user_id') is not None:
self.user_id = m.get('user_id')
if m.get('user_name') is not None:
self.user_name = m.get('user_name')
if m.get('meta_info') is not None:
self.meta_info = m.get('meta_info')
return self
class CreateCpfVerifyResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
certify_id: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 可信实人认证唯一标识
self.certify_id = certify_id
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.certify_id is not None:
result['certify_id'] = self.certify_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('certify_id') is not None:
self.certify_id = m.get('certify_id')
return self
class QueryCpfVerifyRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
certify_id: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 可信实人认证唯一标识
self.certify_id = certify_id
def validate(self):
self.validate_required(self.certify_id, 'certify_id')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.certify_id is not None:
result['certify_id'] = self.certify_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('certify_id') is not None:
self.certify_id = m.get('certify_id')
return self
class QueryCpfVerifyResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
verify_vo: TdmVerifyLogVO = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 核身结果
self.verify_vo = verify_vo
def validate(self):
if self.verify_vo:
self.verify_vo.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.verify_vo is not None:
result['verify_vo'] = self.verify_vo.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('verify_vo') is not None:
temp_model = TdmVerifyLogVO()
self.verify_vo = temp_model.from_map(m['verify_vo'])
return self
class RecognizeCpfAuthRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
request_id: str = None,
data_owner_identity: str = None,
data_owner_name: str = None,
authorized_identity: str = None,
authorized_name: str = None,
target_code: str = None,
auth_agreement: AuthAgreement = None,
certification_info: CertificationInfo = None,
content: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 业务流水
self.request_id = request_id
# 用户身份证ID
self.data_owner_identity = data_owner_identity
# 用户姓名
self.data_owner_name = data_owner_name
# 被授权机构ID
self.authorized_identity = authorized_identity
# 被授权机构名称
self.authorized_name = authorized_name
# 被授权标
self.target_code = target_code
# 授权协议
self.auth_agreement = auth_agreement
# 核身信息
self.certification_info = certification_info
# 扩展字段
self.content = content
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.data_owner_identity, 'data_owner_identity')
self.validate_required(self.authorized_identity, 'authorized_identity')
self.validate_required(self.target_code, 'target_code')
self.validate_required(self.auth_agreement, 'auth_agreement')
if self.auth_agreement:
self.auth_agreement.validate()
self.validate_required(self.certification_info, 'certification_info')
if self.certification_info:
self.certification_info.validate()
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.request_id is not None:
result['request_id'] = self.request_id
if self.data_owner_identity is not None:
result['data_owner_identity'] = self.data_owner_identity
if self.data_owner_name is not None:
result['data_owner_name'] = self.data_owner_name
if self.authorized_identity is not None:
result['authorized_identity'] = self.authorized_identity
if self.authorized_name is not None:
result['authorized_name'] = self.authorized_name
if self.target_code is not None:
result['target_code'] = self.target_code
if self.auth_agreement is not None:
result['auth_agreement'] = self.auth_agreement.to_map()
if self.certification_info is not None:
result['certification_info'] = self.certification_info.to_map()
if self.content is not None:
result['content'] = self.content
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('request_id') is not None:
self.request_id = m.get('request_id')
if m.get('data_owner_identity') is not None:
self.data_owner_identity = m.get('data_owner_identity')
if m.get('data_owner_name') is not None:
self.data_owner_name = m.get('data_owner_name')
if m.get('authorized_identity') is not None:
self.authorized_identity = m.get('authorized_identity')
if m.get('authorized_name') is not None:
self.authorized_name = m.get('authorized_name')
if m.get('target_code') is not None:
self.target_code = m.get('target_code')
if m.get('auth_agreement') is not None:
temp_model = AuthAgreement()
self.auth_agreement = temp_model.from_map(m['auth_agreement'])
if m.get('certification_info') is not None:
temp_model = CertificationInfo()
self.certification_info = temp_model.from_map(m['certification_info'])
if m.get('content') is not None:
self.content = m.get('content')
return self
class RecognizeCpfAuthResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
auth_code: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 授权码
self.auth_code = auth_code
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.auth_code is not None:
result['auth_code'] = self.auth_code
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('auth_code') is not None:
self.auth_code = m.get('auth_code')
return self
class ExecAuthRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
authorized_identity: str = None,
authorized_platform_identity: str = None,
auth_agreement: AuthAgreement = None,
content: str = None,
data_owner_identity: str = None,
request_id: str = None,
target_code: str = None,
certification_type: str = None,
certification_info: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 被授权租户身份ID:
#
# 身份证号/统一社会组织机构信用码
self.authorized_identity = authorized_identity
# 分配code
#
#
self.authorized_platform_identity = authorized_platform_identity
# 授权协议
self.auth_agreement = auth_agreement
# 扩展字段,目前是一个json串,传入相关附加信息,如果对应的附加信息key不需要,可以不传,
#
# key对应的value数据字典由平台提供
self.content = content
# 授权租户身份ID:
# 身份证号/统一社会组织机构信用码
#
#
self.data_owner_identity = data_owner_identity
# 请求流水号(64位 由平台方定义)_
# 幂等标示
self.request_id = request_id
# 标的物
#
#
self.target_code = target_code
# 核身产品类型
self.certification_type = certification_type
# 核身信息
#
#
self.certification_info = certification_info
def validate(self):
self.validate_required(self.authorized_identity, 'authorized_identity')
self.validate_required(self.authorized_platform_identity, 'authorized_platform_identity')
self.validate_required(self.auth_agreement, 'auth_agreement')
if self.auth_agreement:
self.auth_agreement.validate()
self.validate_required(self.content, 'content')
self.validate_required(self.data_owner_identity, 'data_owner_identity')
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.target_code, 'target_code')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.authorized_identity is not None:
result['authorized_identity'] = self.authorized_identity
if self.authorized_platform_identity is not None:
result['authorized_platform_identity'] = self.authorized_platform_identity
if self.auth_agreement is not None:
result['auth_agreement'] = self.auth_agreement.to_map()
if self.content is not None:
result['content'] = self.content
if self.data_owner_identity is not None:
result['data_owner_identity'] = | |
may have less dimension labels.
"""
return _process('filter_temporal', data=data, extent=extent, dimension=dimension)
def first(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
First element
:param data: An array with elements of any data type.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default. Setting
this flag to `false` considers no-data values so that `null` is returned if the first value is such a
value.
:return: The first element of the input array.
"""
return _process('first', data=data, ignore_nodata=ignore_nodata)
def floor(x) -> ProcessBuilder:
"""
Round fractions down
:param x: A number to round down.
:return: The number rounded down.
"""
return _process('floor', x=x)
def gt(x, y) -> ProcessBuilder:
"""
Greater than comparison
:param x: First operand.
:param y: Second operand.
:return: `true` if `x` is strictly greater than `y` or `null` if any operand is `null`, otherwise `false`.
"""
return _process('gt', x=x, y=y)
def gte(x, y) -> ProcessBuilder:
"""
Greater than or equal to comparison
:param x: First operand.
:param y: Second operand.
:return: `true` if `x` is greater than or equal to `y`, `null` if any operand is `null`, otherwise `false`.
"""
return _process('gte', x=x, y=y)
def if_(value, accept, reject=UNSET) -> ProcessBuilder:
"""
If-Then-Else conditional
:param value: A boolean value.
:param accept: A value that is returned if the boolean value is `true`.
:param reject: A value that is returned if the boolean value is **not** `true`. Defaults to `null`.
:return: Either the `accept` or `reject` argument depending on the given boolean value.
"""
return _process('if', value=value, accept=accept, reject=reject)
def int(x) -> ProcessBuilder:
"""
Integer part of a number
:param x: A number.
:return: Integer part of the number.
"""
return _process('int', x=x)
def is_infinite(x) -> ProcessBuilder:
"""
Value is an infinite number
:param x: The data to check.
:return: `true` if the data is an infinite number, otherwise `false`.
"""
return _process('is_infinite', x=x)
def is_nan(x) -> ProcessBuilder:
"""
Value is not a number
:param x: The data to check.
:return: `true` if the data is not a number, otherwise `false`.
"""
return _process('is_nan', x=x)
def is_nodata(x) -> ProcessBuilder:
"""
Value is not a no-data value
:param x: The data to check.
:return: `true` if the data is a no-data value, otherwise `false`.
"""
return _process('is_nodata', x=x)
def is_valid(x) -> ProcessBuilder:
"""
Value is valid data
:param x: The data to check.
:return: `true` if the data is valid, otherwise `false`.
"""
return _process('is_valid', x=x)
def last(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
Last element
:param data: An array with elements of any data type.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default. Setting
this flag to `false` considers no-data values so that `null` is returned if the last value is such a value.
:return: The last element of the input array.
"""
return _process('last', data=data, ignore_nodata=ignore_nodata)
def linear_scale_range(x, inputMin, inputMax, outputMin=UNSET, outputMax=UNSET) -> ProcessBuilder:
"""
Linear transformation between two ranges
:param x: A number to transform. The number gets clipped to the bounds specified in `inputMin` and
`inputMax`.
:param inputMin: Minimum value the input can obtain.
:param inputMax: Maximum value the input can obtain.
:param outputMin: Minimum value of the desired output range.
:param outputMax: Maximum value of the desired output range.
:return: The transformed number.
"""
return _process('linear_scale_range', x=x, inputMin=inputMin, inputMax=inputMax, outputMin=outputMin, outputMax=outputMax)
def ln(x) -> ProcessBuilder:
"""
Natural logarithm
:param x: A number to compute the natural logarithm for.
:return: The computed natural logarithm.
"""
return _process('ln', x=x)
def load_collection(id, spatial_extent, temporal_extent, bands=UNSET, properties=UNSET) -> ProcessBuilder:
"""
Load a collection
:param id: The collection id.
:param spatial_extent: Limits the data to load from the collection to the specified bounding box or
polygons. The process puts a pixel into the data cube if the point at the pixel center intersects with the
bounding box or any of the polygons (as defined in the Simple Features standard by the OGC). The GeoJSON
can be one of the following feature types: * A `Polygon` or `MultiPolygon` geometry, * a `Feature` with a
`Polygon` or `MultiPolygon` geometry, * a `FeatureCollection` containing at least one `Feature` with
`Polygon` or `MultiPolygon` geometries, or * a `GeometryCollection` containing `Polygon` or `MultiPolygon`
geometries. To maximize interoperability, `GeometryCollection` should be avoided in favour of one of the
alternatives above. Set this parameter to `null` to set no limit for the spatial extent. Be careful with
this when loading large datasets! It is recommended to use this parameter instead of using
``filter_bbox()`` or ``filter_spatial()`` directly after loading unbounded data.
:param temporal_extent: Limits the data to load from the collection to the specified left-closed temporal
interval. Applies to all temporal dimensions. The interval has to be specified as an array with exactly two
elements: 1. The first element is the start of the temporal interval. The specified instance in time is
**included** in the interval. 2. The second element is the end of the temporal interval. The specified
instance in time is **excluded** from the interval. The specified temporal strings follow [RFC
3339](https://www.rfc-editor.org/rfc/rfc3339.html). Also supports open intervals by setting one of the
boundaries to `null`, but never both. Set this parameter to `null` to set no limit for the temporal
extent. Be careful with this when loading large datasets! It is recommended to use this parameter instead
of using ``filter_temporal()`` directly after loading unbounded data.
:param bands: Only adds the specified bands into the data cube so that bands that don't match the list of
band names are not available. Applies to all dimensions of type `bands`. Either the unique band name
(metadata field `name` in bands) or one of the common band names (metadata field `common_name` in bands)
can be specified. If the unique band name and the common name conflict, the unique band name has a higher
priority. The order of the specified array defines the order of the bands in the data cube. If multiple
bands match a common name, all matched bands are included in the original order. It is recommended to use
this parameter instead of using ``filter_bands()`` directly after loading unbounded data.
:param properties: Limits the data by metadata properties to include only data in the data cube which all
given conditions return `true` for (AND operation). Specify key-value-pairs with the key being the name of
the metadata property, which can be retrieved with the openEO Data Discovery for Collections. The value
must a condition (user-defined process) to be evaluated against the collection metadata, see the example.
:return: A data cube for further processing. The dimensions and dimension properties (name, type, labels,
reference system and resolution) correspond to the collection's metadata, but the dimension labels are
restricted as specified in the parameters.
"""
return _process('load_collection', id=id, spatial_extent=spatial_extent, temporal_extent=temporal_extent, bands=bands, properties=properties)
def load_result(id) -> ProcessBuilder:
"""
Load batch job results
:param id: The id of a batch job with results.
:return: A data cube for further processing.
"""
return _process('load_result', id=id)
def load_result(id) -> ProcessBuilder:
"""
Load batch job results
:param id: The id of a batch job with results.
:return: A data cube for further processing.
"""
return _process('load_result', id=id)
def load_uploaded_files(paths, format, options=UNSET) -> ProcessBuilder:
"""
Load files from the user workspace
:param paths: The files to read. Folders can't be specified, instead specify all files. An error is thrown
if a file can't be read.
:param format: The file format to read from. It must be one of the values that the server reports as
supported input file formats, which usually correspond to the short GDAL/OGR codes. If the format is not
suitable for loading the data, a `FormatUnsuitable` exception will be thrown. This parameter is *case
insensitive*.
:param options: The file format parameters to be used to read the files. Must correspond to the parameters
that the server reports as supported parameters for the chosen `format`. The parameter names and valid
values usually correspond to the GDAL/OGR format options.
:return: A data cube | |
<filename>PAW_lib.py<gh_stars>10-100
"""PAW_lib.py: Written by <NAME> and <NAME>, OHSU.
Library of support functions and classes for PAW pipeline programs.
The MIT License (MIT)
Copyright (c) 2017 <NAME> and OHSU
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
Direct questions to:
Technology & Research Collaborations, Oregon Health & Science University,
Ph: 503-494-8200, FAX: 503-494-4729, Email: <EMAIL>.
"""
# Updated for Python 3, Aug. 2017 -PW
# added a sample name parsing class, -PW 10/21/2017
# recoded older routines like "amino_acid_count" for better Comet support. -PW 10/27/2017
# np.linspace needed an integer for last argument (float conversion in np was failing sometimes) -PW 20200201
# added suppport for sample mapping from files in PAWShell -PW 20201005
import os
import sys
import glob
import fnmatch
import gzip
import time
import re
import copy
from pprint import pprint
from collections import OrderedDict
import tkinter
from tkinter import filedialog
from tkinter import messagebox
import pandas as pd
import numpy as np
MIN_PEP_LEN = 7
# this is only used in a debugging block
##import matplotlib.pyplot as pyplot # this seems incompatible with standard IDLE (OK with Anaconda)
###################### standard dialog boxes ###########################
# updated from fasta_lib.py -PW 9/16/2017
def get_folder(default_location, title_string=None):
"""Dialog box to browse to a folder. Returns folder path.
Usage: full_folder_name = get_folder(default_location, [title]),
where "default_location" is a starting folder location,
"title" is an optional message to list in the dialog box,
and "full_folder_name" is the complete selected folder name.
Written by <NAME>, 2008, 2016
"""
# set up GUI elements
root = tkinter.Tk()
root.withdraw()
try:
root.tk.call('console', 'hide')
except:
pass
# set default title string and location if not passed
if title_string is None:
title_string = 'Select a folder with desired files/dirs'
if not default_location:
default_location = os.getcwd()
# create dialog box for folder selection
root.update() # helps make sure dialog box goes away after selection
full_folder_name = filedialog.askdirectory(parent=root, initialdir=default_location,
title=title_string, mustexist=True)
# return full folder name
return full_folder_name
def get_file(default_location, ext_list, title_string=None):
"""Dialog box to browse to a file. Returns full file name.
Usage: full_file_name = get_file(default_location, ext_list, [title]),
where "default_location" is a starting folder location,
ext_list is a list of (label, pattern) tuples,
e.g. ext_list = [('Text files', '*.txt')],
"title" is an optional message to list in the dialog box, and
"full_file_name" is the complete name of the selected file.
Written by <NAME>, OHSU, 2008, 2016.
"""
# set up GUI elements
root = tkinter.Tk()
root.withdraw()
try:
root.tk.call('console', 'hide')
except:
pass
# set default title string and ext list if not passed
if title_string is None:
title_string = 'Select a single FILE'
if not ext_list:
ext_list = [('All files', '*.*')]
if not default_location:
default_location = os.getcwd()
# create dialog box for file selection
root.update() # helps make sure dialog box goes away after selection
filename = filedialog.askopenfilename(parent=root, initialdir=default_location,
filetypes=ext_list, title=title_string)
# return full filename
return filename
def save_file(default_location, ext_list, default_file='', title_string=None):
"""Dialog box to save a file. Returns full name of desired file.
Usage: full_file_name = save_file(def_loc, ext_list, [def_file], [title]),
where "def_loc" is a starting folder location,
ext_list is a list of (label, pattern) tuples,
e.g. ext_list = [('Text files', '*.txt')],
"def_file" is an optional default filename,
"title" is an optional message to list in dialog box, and
"full_file_name" is the complete name of the desired file.
Written by <NAME>, OHSU, 2009, 2016.
"""
# set up GUI elements
root = tkinter.Tk()
root.withdraw()
try:
root.tk.call('console', 'hide')
except:
pass
# set default title string if not passed
if title_string is None:
title_string = 'Select a single FILE'
if not ext_list:
ext_list = [('All files', '*.*')]
if not default_location:
default_location = os.getcwd()
# create dialog box for file selection
root.update() # helps make sure dialog box goes away after selection
filename = filedialog.asksaveasfilename(parent=root, initialdir=default_location,
initialfile=default_file, filetypes=ext_list,
title=title_string)
# return full filename
return filename
def get_files(default_location, ext_list, title_string=None):
"""Dialog box to browse for files. Returns a tuple of file names.
Usage: file_name_list = get_files(default_location, ext_list, [title]),
where "default_location" is a starting folder location,
ext_list is a list of (label, pattern) tuples,
e.g. ext_list = [('Text files', '*.txt')],
"title" is an optional message to list in the dialog box, and
"file_name_list" is a tuple of file name(s).
Written by <NAME>, OHSU, 2010, 2016.
"""
# set up GUI elements
root = tkinter.Tk()
root.withdraw()
# set default title string if not passed
if title_string is None:
title_string = 'Select one or more FILE(s)'
if not ext_list:
ext_list = [('All files', '*.*')]
if not default_location:
default_location = os.getcwd()
# create dialog box for file selection
root.update() # helps make sure dialog box goes away after selection
filenames = filedialog.askopenfilenames(parent=root, initialdir=default_location,
filetypes=ext_list, multiple=True,
title=title_string)
return filenames
def get_string(title, prompt='Enter a string', initial=''):
"""Function to wrapper tkSimpleDialog.askstring function
Written by <NAME>, OHSU, 2010.
"""
from tkinter.simpledialog import askstring
return askstring(title, prompt, initialvalue=initial)
# end
def get_yesno(title, message='Answer yes or no', **options):
"""Asks a yes (True) or no (False) question.
"""
from tkinter.messagebox import askyesno
return askyesno(title, message, **options)
################## some support functions/classes for PAW pipeline use #####################
# updated for 2017 Comet compatibility (new mod formats) -PW 10/27/2017
class Peptide:
"""An object for Comet peptide strings.
"""
def __init__(self, sequence, delim='.', enzyme='Trypsin'):
self.full_seq = sequence # original string
self.enzyme = enzyme
self.prefix = None # preceeding residue string
self.seq = None # actual peptide sequence
self.suffix = None # following residue string
self.base_seq = None # actual peptide sequence without any mods
self.net = None # number of enzymatic termini (given enzyme)
self.length = None # number of amino acids in base sequence
# compile a couple of regex
self.new_mods = re.compile(r'\[[-+]?([0-9]+(\.[0-9]*)?|\.[0-9]+)\]')
self.old_mods = re.compile(r'[*#@^~$%!+nc\[\]\{\}\(\)]')
# load attributes
self.split_peptide(delim)
self.compute_net(enzyme)
def split_peptide(self, delim):
"""This splits SEQUEST/Comet peptide strings into prefix, sequence, and suffix.
Computes some things and sets some attributes; supports the new bracketed
floating point modification format (Comet 2017 and newer).
"""
# this removes new Comet modification notation (bracketed floating points)
base_seq = self.new_mods.sub('', self.full_seq)
# probably have bounding residues delimited by periods
items = base_seq.split(delim)
if len(items) == 3:
self.prefix, middle, self.suffix = items
self.seq = self.full_seq[len(self.prefix) + 1: -(len(self.suffix) + 1)]
elif len(items) == 1:
self.prefix, self.suffix = 'X', 'X'
middle = items[0]
self.seq = self.full_seq
else:
print('WARNING: malformed peptide string:', self.full_seq)
# remove older style modification symbols: *#@^~$%!+[](){} and 'n', 'c'
self.base_seq = self.old_mods.sub('', middle)
self.length = len(self.base_seq)
return
def _N_side_cleavage(self, prefix, prefix_pattern, nterm, nterm_pattern, suffix, suffix_pattern):
"""Computes number of termini constent with protease cleavage for N-terminal side cutters."""
self.net = 0
if (prefix in prefix_pattern) or (nterm in nterm_pattern):
self.net += 1
if suffix in suffix_pattern:
self.net += 1
def _C_side_cleavage(self, prefix, prefix_pattern, cterm, cterm_pattern, suffix, suffix_pattern, noP=True):
"""Computes number of termini constent with protease cleavage for C-terminal side cutters."""
self.net = 0
ct_okay = False
if prefix in prefix_pattern:
self.net += 1
if (cterm in cterm_pattern) or (suffix in suffix_pattern):
self.net += 1
ct_okay = True
if noP and (suffix == 'P') and (self.net > 0) and ct_okay: # trypsin strict
self.net -= 1
def compute_net(self, enzyme):
"""Figures out the number of peptide termini consistent with the enzyme cleavage.
Written by <NAME>, OHSU, 2008, rewritten 2017.
"""
# valid amino acid characters
amino_acids = set(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', | |
import sys
import typing
import bpy_types
class PhysicButtonsPanel:
bl_context = None
''' '''
bl_region_type = None
''' '''
bl_space_type = None
''' '''
def poll(self, context):
'''
'''
pass
class PHYSICS_PT_softbody(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_softbody_cache(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_softbody_collision(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_softbody_edge(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_softbody_edge_aerodynamics(
PhysicButtonsPanel, bpy_types.Panel, bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_softbody_edge_stiffness(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, | |
definition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource id.
:vartype id: str
:ivar name: Gets the resource name.
:vartype name: str
:ivar type: Gets the resource type.
:vartype type: str
:param location: The resource location.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:param properties: The api resource properties.
:type properties: ~azure.mgmt.logic.models.ApiResourceProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'properties': {'key': 'properties', 'type': 'ApiResourceProperties'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
properties: Optional["ApiResourceProperties"] = None,
**kwargs
):
super(ManagedApi, self).__init__(location=location, tags=tags, **kwargs)
self.properties = properties
class ManagedApiListResult(msrest.serialization.Model):
"""The list of managed APIs.
:param value: The managed APIs.
:type value: list[~azure.mgmt.logic.models.ManagedApi]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ManagedApi]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["ManagedApi"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(ManagedApiListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class NetworkConfiguration(msrest.serialization.Model):
"""The network configuration.
:param virtual_network_address_space: Gets the virtual network address space.
:type virtual_network_address_space: str
:param access_endpoint: The access endpoint.
:type access_endpoint: ~azure.mgmt.logic.models.IntegrationServiceEnvironmentAccessEndpoint
:param subnets: The subnets.
:type subnets: list[~azure.mgmt.logic.models.ResourceReference]
"""
_attribute_map = {
'virtual_network_address_space': {'key': 'virtualNetworkAddressSpace', 'type': 'str'},
'access_endpoint': {'key': 'accessEndpoint', 'type': 'IntegrationServiceEnvironmentAccessEndpoint'},
'subnets': {'key': 'subnets', 'type': '[ResourceReference]'},
}
def __init__(
self,
*,
virtual_network_address_space: Optional[str] = None,
access_endpoint: Optional["IntegrationServiceEnvironmentAccessEndpoint"] = None,
subnets: Optional[List["ResourceReference"]] = None,
**kwargs
):
super(NetworkConfiguration, self).__init__(**kwargs)
self.virtual_network_address_space = virtual_network_address_space
self.access_endpoint = access_endpoint
self.subnets = subnets
class OpenAuthenticationAccessPolicies(msrest.serialization.Model):
"""AuthenticationPolicy of type Open.
:param policies: Open authentication policies.
:type policies: dict[str, ~azure.mgmt.logic.models.OpenAuthenticationAccessPolicy]
"""
_attribute_map = {
'policies': {'key': 'policies', 'type': '{OpenAuthenticationAccessPolicy}'},
}
def __init__(
self,
*,
policies: Optional[Dict[str, "OpenAuthenticationAccessPolicy"]] = None,
**kwargs
):
super(OpenAuthenticationAccessPolicies, self).__init__(**kwargs)
self.policies = policies
class OpenAuthenticationAccessPolicy(msrest.serialization.Model):
"""Open authentication access policy defined by user.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: Type of provider for OAuth. Default value: "AAD".
:vartype type: str
:param claims: The access policy claims.
:type claims: list[~azure.mgmt.logic.models.OpenAuthenticationPolicyClaim]
"""
_validation = {
'type': {'readonly': True, 'constant': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'claims': {'key': 'claims', 'type': '[OpenAuthenticationPolicyClaim]'},
}
type = "AAD"
def __init__(
self,
*,
claims: Optional[List["OpenAuthenticationPolicyClaim"]] = None,
**kwargs
):
super(OpenAuthenticationAccessPolicy, self).__init__(**kwargs)
self.type = None
self.claims = claims
class OpenAuthenticationPolicyClaim(msrest.serialization.Model):
"""Open authentication policy claim.
:param name: The name of the claim.
:type name: str
:param value: The value of the claim.
:type value: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
value: Optional[str] = None,
**kwargs
):
super(OpenAuthenticationPolicyClaim, self).__init__(**kwargs)
self.name = name
self.value = value
class Operation(msrest.serialization.Model):
"""Logic REST API operation.
:param origin: Operation: origin.
:type origin: str
:param name: Operation name: {provider}/{resource}/{operation}.
:type name: str
:param display: The object that represents the operation.
:type display: ~azure.mgmt.logic.models.OperationDisplay
:param properties: The properties.
:type properties: object
"""
_attribute_map = {
'origin': {'key': 'origin', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'properties': {'key': 'properties', 'type': 'object'},
}
def __init__(
self,
*,
origin: Optional[str] = None,
name: Optional[str] = None,
display: Optional["OperationDisplay"] = None,
properties: Optional[object] = None,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.origin = origin
self.name = name
self.display = display
self.properties = properties
class OperationDisplay(msrest.serialization.Model):
"""The object that represents the operation.
:param provider: Service provider: Microsoft.Logic.
:type provider: str
:param resource: Resource on which the operation is performed: Profile, endpoint, etc.
:type resource: str
:param operation: Operation type: Read, write, delete, etc.
:type operation: str
:param description: Operation: description.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class OperationListResult(msrest.serialization.Model):
"""Result of the request to list Logic operations. It contains a list of operations and a URL link to get the next set of results.
:param value: List of Logic operations supported by the Logic resource provider.
:type value: list[~azure.mgmt.logic.models.Operation]
:param next_link: URL to get the next set of operation list results if there are any.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["Operation"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class OperationResultProperties(msrest.serialization.Model):
"""The run operation result properties.
:param start_time: The start time of the workflow scope repetition.
:type start_time: ~datetime.datetime
:param end_time: The end time of the workflow scope repetition.
:type end_time: ~datetime.datetime
:param correlation: The correlation properties.
:type correlation: ~azure.mgmt.logic.models.RunActionCorrelation
:param status: The status of the workflow scope repetition. Possible values include:
"NotSpecified", "Paused", "Running", "Waiting", "Succeeded", "Skipped", "Suspended",
"Cancelled", "Failed", "Faulted", "TimedOut", "Aborted", "Ignored".
:type status: str or ~azure.mgmt.logic.models.WorkflowStatus
:param code: The workflow scope repetition code.
:type code: str
:param error: Any object.
:type error: object
"""
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'correlation': {'key': 'correlation', 'type': 'RunActionCorrelation'},
'status': {'key': 'status', 'type': 'str'},
'code': {'key': 'code', 'type': 'str'},
'error': {'key': 'error', 'type': 'object'},
}
def __init__(
self,
*,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
correlation: Optional["RunActionCorrelation"] = None,
status: Optional[Union[str, "WorkflowStatus"]] = None,
code: Optional[str] = None,
error: Optional[object] = None,
**kwargs
):
super(OperationResultProperties, self).__init__(**kwargs)
self.start_time = start_time
self.end_time = end_time
self.correlation = correlation
self.status = status
self.code = code
self.error = error
class OperationResult(OperationResultProperties):
"""The operation result definition.
Variables are only populated by the server, and will be ignored when sending a request.
:param start_time: The start time of the workflow scope repetition.
:type start_time: ~datetime.datetime
:param end_time: The end time of the workflow scope repetition.
:type end_time: ~datetime.datetime
:param correlation: The correlation properties.
:type correlation: ~azure.mgmt.logic.models.RunActionCorrelation
:param status: The status of the workflow scope repetition. Possible values include:
"NotSpecified", "Paused", "Running", "Waiting", "Succeeded", "Skipped", "Suspended",
"Cancelled", "Failed", "Faulted", "TimedOut", "Aborted", "Ignored".
:type status: str or ~azure.mgmt.logic.models.WorkflowStatus
:param code: The workflow scope repetition code.
:type code: str
:param error: Any object.
:type error: object
:ivar tracking_id: Gets the tracking id.
:vartype tracking_id: str
:ivar inputs: Gets the inputs.
:vartype inputs: object
:ivar inputs_link: Gets the link to inputs.
:vartype inputs_link: ~azure.mgmt.logic.models.ContentLink
:ivar outputs: Gets the outputs.
:vartype outputs: object
:ivar outputs_link: Gets the link to outputs.
:vartype outputs_link: ~azure.mgmt.logic.models.ContentLink
:ivar tracked_properties: Gets the tracked properties.
:vartype tracked_properties: object
:param retry_history: Gets the retry histories.
:type retry_history: list[~azure.mgmt.logic.models.RetryHistory]
:param iteration_count:
:type iteration_count: int
"""
_validation = {
'tracking_id': {'readonly': True},
'inputs': {'readonly': True},
'inputs_link': {'readonly': True},
'outputs': {'readonly': True},
'outputs_link': {'readonly': True},
'tracked_properties': {'readonly': True},
}
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'correlation': {'key': 'correlation', 'type': 'RunActionCorrelation'},
'status': {'key': 'status', 'type': 'str'},
'code': {'key': 'code', 'type': 'str'},
'error': {'key': 'error', 'type': 'object'},
'tracking_id': {'key': 'trackingId', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': 'object'},
'inputs_link': {'key': 'inputsLink', 'type': 'ContentLink'},
'outputs': {'key': 'outputs', 'type': 'object'},
'outputs_link': {'key': 'outputsLink', 'type': 'ContentLink'},
'tracked_properties': {'key': 'trackedProperties', 'type': 'object'},
'retry_history': {'key': 'retryHistory', 'type': '[RetryHistory]'},
'iteration_count': {'key': 'iterationCount', 'type': 'int'},
}
def __init__(
self,
*,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
correlation: Optional["RunActionCorrelation"] = None,
status: Optional[Union[str, "WorkflowStatus"]] = None,
code: Optional[str] = None,
error: Optional[object] = None,
retry_history: Optional[List["RetryHistory"]] = None,
iteration_count: Optional[int] = None,
**kwargs
):
super(OperationResult, self).__init__(start_time=start_time, end_time=end_time, correlation=correlation, status=status, code=code, error=error, **kwargs)
self.tracking_id = None
self.inputs = None
self.inputs_link | |
<reponame>sail-sg/mugs
# Copyright 2022 Garena Online Private Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
models and functions for building student and teacher networks for multi-granular losses.
"""
import torch
import torch.nn as nn
import src.vision_transformer as vits
from src.vision_transformer import trunc_normal_
class Instance_Superivsion_Head(nn.Module):
"""
a class to implement Instance Superivsion Head
--in_dim: input dimension of projection head
--hidden_dim: hidden dimension of projection head
--out_dim: ouput dimension of projection and prediction heads
--pred_hidden_dim: hidden dimension of prediction head
--nlayers: layer number of projection head. prediction head has nlayers-1 layer
--proj_bn: whether we use batch normalization in projection head
--pred_bn: whether we use batch normalization in prediction head
--norm_before_pred: whether we use normalization before prediction head
"""
def __init__(
self,
in_dim,
hidden_dim=2048,
out_dim=256,
pred_hidden_dim=4096,
nlayers=3,
proj_bn=False,
pred_bn=False,
norm_before_pred=True,
):
super().__init__()
nlayers = max(nlayers, 1)
self.norm_before_pred = norm_before_pred
self.projector = self._build_mlp(
nlayers, in_dim, hidden_dim, out_dim, use_bn=proj_bn
)
self.apply(self._init_weights)
self.predictor = None
if pred_hidden_dim > 0: # teacher no, student yes
self.predictor = self._build_mlp(
nlayers - 1, out_dim, pred_hidden_dim, out_dim, use_bn=pred_bn
)
def _init_weights(self, m):
"""
initilize the parameters in network
"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
def _build_mlp(self, num_layers, input_dim, hidden_dim, output_dim, use_bn=False):
"""
build a mlp
"""
mlp = []
for layer in range(num_layers):
dim1 = input_dim if layer == 0 else hidden_dim
dim2 = output_dim if layer == num_layers - 1 else hidden_dim
mlp.append(nn.Linear(dim1, dim2, bias=False))
if layer < num_layers - 1:
if use_bn:
mlp.append(nn.BatchNorm1d(dim2))
mlp.append(nn.GELU())
return nn.Sequential(*mlp)
def forward(self, x, return_target=False):
"""
forward the input through projection head for teacher and
projection/prediction heads for student
"""
feat = self.projector(x)
if return_target:
feat = nn.functional.normalize(feat, dim=-1, p=2)
return feat
## return prediction
if self.norm_before_pred:
feat = nn.functional.normalize(feat, dim=-1, p=2)
pred = self.predictor(feat)
pred = nn.functional.normalize(pred, dim=-1, p=2)
return pred
class Local_Group_Superivsion_Head(nn.Module):
"""
a class to implement Local Group Superivsion Head which is the same as Instance Superivsion Head
--in_dim: input dimension of projection head
--hidden_dim: hidden dimension of projection head
--out_dim: ouput dimension of projection and prediction heads
--pred_hidden_dim: hidden dimension of prediction head
--nlayers: layer number of projection head. prediction head has nlayers-1 layer
--proj_bn: whether we use batch normalization in projection head
--pred_bn: whether we use batch normalization in prediction head
--norm_before_pred: whether we use normalization before prediction head
"""
def __init__(
self,
in_dim,
hidden_dim=2048,
out_dim=256,
pred_hidden_dim=4096,
nlayers=3,
proj_bn=False,
pred_bn=False,
norm_before_pred=True,
):
super().__init__()
nlayers = max(nlayers, 1)
self.norm_before_pred = norm_before_pred
self.projector = self._build_mlp(
nlayers, in_dim, hidden_dim, out_dim, use_bn=proj_bn
)
self.apply(self._init_weights)
self.predictor = None
if pred_hidden_dim > 0: # teacher no, student yes
self.predictor = self._build_mlp(
nlayers - 1, out_dim, pred_hidden_dim, out_dim, use_bn=pred_bn
)
def _init_weights(self, m):
"""
initilize the parameters in network
"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
def _build_mlp(self, num_layers, input_dim, hidden_dim, output_dim, use_bn=False):
"""
build a mlp
"""
mlp = []
for layer in range(num_layers):
dim1 = input_dim if layer == 0 else hidden_dim
dim2 = output_dim if layer == num_layers - 1 else hidden_dim
mlp.append(nn.Linear(dim1, dim2, bias=False))
if layer < num_layers - 1:
if use_bn:
mlp.append(nn.BatchNorm1d(dim2))
mlp.append(nn.GELU())
return nn.Sequential(*mlp)
def forward(self, x, return_target=False):
"""
forward the input through projection head for teacher and
projection/prediction heads for student
"""
feat = self.projector(x)
if return_target:
feat = nn.functional.normalize(feat, dim=-1, p=2)
return feat
## return prediction
if self.norm_before_pred:
feat = nn.functional.normalize(feat, dim=-1, p=2)
pred = self.predictor(feat)
pred = nn.functional.normalize(pred, dim=-1, p=2)
return pred
class Group_Superivsion_Head(nn.Module):
"""
a class to implement Local Group Superivsion Head which is the same as Instance Superivsion Head
--in_dim: input dimension of projection head
--hidden_dim: hidden dimension of projection head
--out_dim: ouput dimension of projection and prediction heads
--pred_hidden_dim: hidden dimension of prediction head
--nlayers: layer number of projection head. prediction head has nlayers-1 layer
--proj_bn: whether we use batch normalization in projection head
--pred_bn: whether we use batch normalization in prediction head
--norm_before_pred: whether we use normalization before prediction head
"""
def __init__(
self,
in_dim,
out_dim,
hidden_dim=2048,
bottleneck_dim=256,
nlayers=3,
use_bn=False,
norm_last_layer=True,
):
super().__init__()
nlayers = max(nlayers, 1)
self.projector = self._build_mlp(
nlayers, in_dim, hidden_dim, bottleneck_dim, use_bn=use_bn
)
self.apply(self._init_weights)
self.last_layer = nn.utils.weight_norm(
nn.Linear(bottleneck_dim, out_dim, bias=False)
)
self.last_layer.weight_g.data.fill_(1)
if norm_last_layer:
self.last_layer.weight_g.requires_grad = False
def _build_mlp(self, num_layers, in_dim, hidden_dim, output_dim, use_bn=False):
"""
build a mlp
"""
if num_layers == 1:
mlp = nn.Linear(in_dim, output_dim)
else:
layers = [nn.Linear(in_dim, hidden_dim)]
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
for _ in range(num_layers - 2):
layers.append(nn.Linear(hidden_dim, hidden_dim))
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
layers.append(nn.Linear(hidden_dim, output_dim))
mlp = nn.Sequential(*layers)
return mlp
def _init_weights(self, m):
"""
initilize the parameters in network
"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
"""
forward the input through the projection and last prediction layer
"""
feat = self.projector(x)
feat = nn.functional.normalize(feat, dim=-1, p=2)
feat = self.last_layer(feat)
return feat
class Block_mem(nn.Module):
"""
a class to implement a memory block for local group supervision
--dim: feature vector dimenstion in the memory
--K: memory size
--top_n: number for neighbors in local group supervision
"""
def __init__(self, dim, K=2048, top_n=10):
super().__init__()
self.dim = dim
self.K = K
self.top_n = top_n
# create the queue
self.register_buffer("queue_q", torch.randn(K, dim))
self.register_buffer("queue_k", torch.randn(K, dim))
self.register_buffer("queue_v", torch.randn(K, dim))
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
@torch.no_grad()
def _dequeue_and_enqueue(self, query, weak_aug_flags):
"""
update memory queue
"""
# import pdb
# pdb.set_trace()
len_weak = 0
query = concat_all_gather(query)
if weak_aug_flags is not None:
weak_aug_flags = weak_aug_flags.cuda()
weak_aug_flags = concat_all_gather(weak_aug_flags)
idx_weak = torch.nonzero(weak_aug_flags)
len_weak = len(idx_weak)
if len_weak > 0:
idx_weak = idx_weak.squeeze(-1)
query = query[idx_weak]
else:
return len_weak
all_size = query.shape[0]
ptr = int(self.queue_ptr)
remaining_size = ptr + all_size - self.K
if remaining_size <= 0:
self.queue_q[ptr : ptr + all_size, :] = query
self.queue_k[ptr : ptr + all_size, :] = query
self.queue_v[ptr : ptr + all_size, :] = query
ptr = ptr + all_size
self.queue_ptr[0] = (ptr + all_size) % self.K
else:
self.queue_q[ptr : self.K, :] = query[0 : self.K - ptr, :]
self.queue_k[ptr : self.K, :] = query[0 : self.K - ptr, :]
self.queue_v[ptr : self.K, :] = query[0 : self.K - ptr, :]
self.queue_q[0:remaining_size, :] = query[self.K - ptr :, :]
self.queue_k[0:remaining_size, :] = query[self.K - ptr :, :]
self.queue_v[0:remaining_size, :] = query[self.K - ptr :, :]
self.queue_ptr[0] = remaining_size
return len_weak
@torch.no_grad()
def _get_similarity_index(self, x):
"""
compute the index of the top-n neighbors (key-value pair) in memory
"""
x = nn.functional.normalize(x, dim=-1)
queue_q = nn.functional.normalize(self.queue_q, dim=-1)
cosine = x @ queue_q.T
_, index = torch.topk(cosine, self.top_n, dim=-1)
return index
@torch.no_grad()
def _get_similarity_samples(self, query, index=None):
"""
compute top-n neighbors (key-value pair) in memory
"""
if index is None:
index = self._get_similarity_index(query)
get_k = self.queue_k[index.view(-1)]
get_v = self.queue_v[index.view(-1)]
B, tn = index.shape
get_k = get_k.view(B, tn, self.dim)
get_v = get_v.view(B, tn, self.dim)
return get_k, get_v
def forward(self, query):
"""
forward to find the top-n neighbors (key-value pair) in memory
"""
get_k, get_v = self._get_similarity_samples(query)
return get_k, get_v
class vit_mem(nn.Module):
"""
a class to implement a memory for local group supervision
--dim: feature vector dimenstion in the memory
--K: memory size
--top_n: number for neighbors in local group supervision
"""
def __init__(self, dim, K=2048, top_n=10):
super().__init__()
self.block = Block_mem(dim, K, top_n)
def _dequeue_and_enqueue(self, query, weak_aug_flags):
"""
update memory queue
"""
query = query.float()
weak_num = self.block._dequeue_and_enqueue(query, weak_aug_flags)
return weak_num
def forward(self, query):
"""
forward to find the top-n neighbors (key-value pair) in memory
"""
query = query.float()
get_k, get_v = self.block(query)
return get_k, get_v
class Mugs_Wrapper(nn.Module):
"""
a class to implement a student or teacher wrapper | |
('+str(self.byhandz.get())+\
') could not be converted to float. Using -99.'
zbyhand = zbyhand+str(int(self.varsliderzqual.get()))
# v v v v v v v v v v v v v v v v v v v v 1st PA v v v v v v v v v v v v v v v v v v v v
resultstr = ' '+str("%.5d" % self.currentobj)+' '+str("%.3d" % self.PAs[0])
defaultstr = resultstr
for key in self.keys:
keyval = str(self.keys[key].get())
if keyval == '-1':
defaultstr = defaultstr+' '+str(keyval)
elif len(keyval) > 10: # for text keys
defaultstr = defaultstr+' '+keyval
else:
defaultstr = defaultstr+' '+str(0)
resultstr = resultstr+' '+str(keyval)
# by-hand redshift info
defaultstr = defaultstr+' -99 0'
resultstr = resultstr+' '+zbyhand
# Multiple redshift solutions?
defaultstr = defaultstr+' 0'
resultstr = resultstr+' '+self.mzsboxvar.get()
# adding info from comment and wave fields
defaultstr = defaultstr +' #C# \n'
resultstr = resultstr +' #C# '+self.comments.get()+' \n'
skipin = skip # storing original skip value
if (resultstr == defaultstr) & (self.skipempty == True): skip = True
if not skip:
if self.duplicates:
Ndup = self.removeoutputduplicate(self.currentobj,self.PAs[0])
self.fout.write(str(resultstr))
if resultstr == defaultstr: skip = skipin # restoring original skip value
# v v v v v v v v v v v v v v v v v v v v 2nd PA v v v v v v v v v v v v v v v v v v v v
if self.Npa == 2: # if the current object has files for two PAs add a second line
resultstr = ' '+str("%.5d" % self.currentobj)+' '+str("%.3d" % self.PAs[1])
defaultstr = resultstr
for key in self.keys2:
keyval = str(self.keys2[key].get())
if keyval == '-1':
defaultstr = defaultstr+' '+str(keyval)
elif len(keyval) > 10: # for text keys
defaultstr = defaultstr+' '+keyval
else:
defaultstr = defaultstr+' '+str(0)
resultstr = resultstr+' '+str(keyval)
# by-hand redshift info
defaultstr = defaultstr+' -99 0'
resultstr = resultstr +' '+zbyhand
# Multiple redshift solutions?
defaultstr = defaultstr+' 0'
resultstr = resultstr+' '+self.mzsboxvar.get()
# adding info from comment and wave fields
defaultstr = defaultstr+' #C# \n'
resultstr = resultstr +' #C# '+self.comments2.get()+' \n'
if (resultstr == defaultstr) & (self.skipempty == True): skip = True
if not skip:
if self.duplicates:
Ndup = self.removeoutputduplicate(self.currentobj,self.PAs[1])
self.fout.write(str(resultstr))
if resultstr == defaultstr: skip = skipin # restoring original skip value
# --- close and re-open output file so inspection is saved ---
self.fout.close()
self.fout = open(self.outfile,'a')
# --- resetting widgets and closing windows ---
self.comments.delete(0,END) # reset comment field
self.comments2.delete(0,END) # reset comment field
self.byhandz.delete(0,END)
cluster, cluster_z = vi.getclusterz(self.file)
self.varsliderz.set(cluster_z) # set intial value of slider
self.varslidersmooth.set(0) # set intial value of slider
self.varsliderzqual.set(0) # set intial value of slider
self.checkboxes(self.cbpos) # reset check boxes
self.checkboxes2(self.cbpos2) # reset check boxes
self.modelbox.deselect()
self.GiGlinesbox.deselect()
self.mzsbox.deselect()
self.closewindows()
self.ds9open = False # resetting ds9 indicator
self.focus_set() # set focus to main window
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def removeoutputduplicate(self,id,pa):
"""
Subtract continuum from science fram
"""
self.fout.close()
idstr = str("%.5d" % id)
pastr = str("%.3d" % pa)
stringstart = ' '+idstr+' '+pastr
file = open(self.outfile,'r')
lines = file.readlines()
file.close()
file = open(self.outfile,"w")
Ndup = 0
for line in lines:
if line[0:10] != stringstart:
file.write(line)
else:
if self.vb: print ' - Found dublicate entry for ID '+idstr+' PA '+pastr+' deleting it!'
Ndup = Ndup+1
file.close()
self.fout = open(self.outfile,'a')
return Ndup
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def closewindows(self):
"""
Close PNG and DS9 windows
"""
killsignal = 1 # see bash> man kill
PIDkill = True
if PIDkill:
try:
os.kill(self.pngPID,killsignal) # close PNG window for currentobj
except:
print ' WARNING error occurred while trying to close PNG window(s)'
if np.logical_or(((self.ds9open == True) & (self.xpa == False)),
((self.xpa == True) & (self.quitting == True) & (self.ds9windowopen == True))):
try:
os.kill(self.ds9PID,killsignal) # close DS9 window for currentobj
except:
if self.vb: print ' - WARNING: Could not kill DS9 process id ',self.ds9PID
rmout = commands.getoutput('rm '+self.regiontemp.replace('.reg','*.reg')) # removing ds9 region file
else:
print '=== WHAT ARE YOU DOING HERE?? ==='
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def skip_but(self,position):
self.skip = Button(self)
self.skip["text"] = "Skip object"
self.skip["command"] = self.skip_but_cmd
self.skip.grid(row=position[0],column=position[1],columnspan=position[2],sticky=W)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def skip_but_cmd(self):
"""
Command for skip button
"""
self.reset(skip=True)
if self.currentobj == self.objlist[-1]:
if self.vb: print ' - Object',self.currentobj,' was the last in the list.\n Quitting GUI.'
self.quitting = True
self.quit_but_cmd()
else:
newent = np.where(self.objlist == self.currentobj)[0]+1
self.currentobj = self.objlist[newent][0]
self.openpngs()
self.labelvar.set(self.infostring())
self.updateimage()
if self.Npa != 2: self.checkboxes2(self.cbpos2,disable=True) # disable checkboxes2 if Npa not 2
# load new data for plot and replot
self.dataPlot_loaddata()
self.dataPlot_plot(refresh=True,newobj=True)
self.DPxlow_full, self.DPxhigh_full, self.DPylow_full, self.DPyhigh_full = \
self.dataPlot_getwindowinfo() # store full window
if self.fitsauto: # loading fits files automatically
if self.xpa:
self.openfits_but_cmd_xpa()
else:
self.openfits_but_cmd()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def next_but(self,position):
self.next = Button(self)
self.next["text"] = "(8) Next object (save)"
self.next["command"] = self.next_but_cmd
self.next.grid(row=position[0],column=position[1],columnspan=position[2],sticky=W)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def next_but_cmd(self):
"""
Command for next button
"""
self.reset()
if self.currentobj == self.objlist[-1]:
if self.vb: print ' - Object',self.currentobj,' was the last in the list.\n Quitting GUI.'
self.quitting = True
self.quit_but_cmd()
else:
newent = np.where(self.objlist == self.currentobj)[0]+1
self.currentobj = self.objlist[newent][0]
self.openpngs()
self.labelvar.set(self.infostring())
self.updateimage()
if self.Npa != 2: self.checkboxes2(self.cbpos2,disable=True) # disable checkboxes2 if Npa not 2
# load new data for plot and replot
self.dataPlot_loaddata()
self.dataPlot_plot(refresh=True,newobj=True)
self.DPxlow_full, self.DPxhigh_full, self.DPylow_full, self.DPyhigh_full = \
self.dataPlot_getwindowinfo() # store full window
if self.fitsauto: # loading fits files automatically
if self.xpa:
self.openfits_but_cmd_xpa()
else:
self.openfits_but_cmd()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def prev_but(self,position):
self.prev= Button(self)
self.prev["text"] = "(7) Previous object"
self.prev["command"] = self.prev_but_cmd
self.prev.grid(row=position[0],column=position[1],columnspan=position[2],sticky=W)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def prev_but_cmd(self):
"""
Command for previous button
"""
self.reset()
if self.currentobj == self.objlist[0]:
if self.vb: print ' | |
<reponame>leeomara/webrecorder
import os
import re
import base64
import hashlib
import json
import redis
import requests
from datetime import datetime
from collections import OrderedDict
from os.path import expandvars
from getpass import getpass
from string import ascii_lowercase as alpha
from bottle import template, request
#from cork import AAAException
from webrecorder.webreccork import ValidationException, AuthException
from webrecorder.models.base import BaseAccess, DupeNameException
from webrecorder.models.user import User, UserTable
from webrecorder.utils import load_wr_config, sanitize_title, get_bool
from webrecorder.webreccork import WebRecCork
from webrecorder.redisutils import RedisTable
# ============================================================================
class UserManager(object):
USER_RX = re.compile(r'^[A-Za-z0-9][\w-]{2,30}$')
RESTRICTED_NAMES = ['login', 'logout', 'user', 'admin', 'manager', 'coll', 'collection',
'guest', 'settings', 'profile', 'api', 'anon', 'webrecorder',
'anonymous', 'register', 'join', 'download', 'live', 'embed']
PASS_RX = re.compile(r'^(?=.*[\d\W])(?=.*[a-z])(?=.*[A-Z]).{8,}$')
LC_USERNAMES_KEY = 'h:lc_users'
def __init__(self, redis, cork, config):
self.redis = redis
self.cork = cork
self.config = config
self.default_coll = config['default_coll']
self.temp_prefix = config['temp_prefix']
mailing_list = os.environ.get('MAILING_LIST', '').lower()
self.mailing_list = mailing_list in ('true', '1', 'yes')
self.default_list_endpoint = os.environ.get('MAILING_LIST_ENDPOINT', '')
self.list_key = os.environ.get('MAILING_LIST_KEY', '')
self.list_removal_endpoint = os.path.expandvars(
os.environ.get('MAILING_LIST_REMOVAL', ''))
self.payload = os.environ.get('MAILING_LIST_PAYLOAD', '')
self.remove_on_delete = (os.environ.get('REMOVE_ON_DELETE', '')
in ('true', '1', 'yes'))
self.announce_list = os.environ.get('ANNOUNCE_MAILING_LIST_ENDPOINT', False)
invites = expandvars(config.get('invites_enabled', 'true')).lower()
self.invites_enabled = invites in ('true', '1', 'yes')
try:
self.redis.hsetnx('h:defaults', 'max_size', int(config['default_max_size']))
self.redis.hsetnx('h:defaults', 'max_anon_size', int(config['default_max_anon_size']))
except Exception as e:
print('WARNING: Unable to init defaults: ' + str(e))
self.all_users = UserTable(self.redis, self._get_access)
self.invites = RedisTable(self.redis, 'h:invites')
def register_user(self, input_data, host):
msg = OrderedDict()
redir_extra = ''
username = input_data.get('username', '')
full_name = input_data.get('full_name', '')
email = input_data.get('email', '')
if 'username' not in input_data:
msg['username'] = 'Missing Username'
elif username.startswith(self.temp_prefix):
msg['username'] = 'Sorry, this is not a valid username'
if 'email' not in input_data:
msg['email'] = 'Missing Email'
if self.invites_enabled:
try:
val_email = self.is_valid_invite(input_data['invite'])
if val_email != email:
raise ValidationException('Sorry, this invite can only be used with email: {0}'.format(val_email))
except ValidationException as ve:
msg['invite'] = str(ve)
else:
redir_extra = '?invite=' + input_data.get('invite', '')
try:
self.validate_user(username, email)
self.validate_password(input_data['password'], input_data['confirmpassword'])
except ValidationException as ve:
msg['validation'] = str(ve)
try:
move_info = self.get_move_temp_info(input_data)
except ValidationException as ve:
msg['move_info'] = str(ve)
if msg:
return msg, redir_extra
try:
desc = {'name': full_name}
if move_info:
desc['move_info'] = move_info
desc = json.dumps(desc)
self.cork.register(username, input_data['password'], email, role='archivist',
max_level=50,
subject='webrecorder.io Account Creation',
email_template='webrecorder/templates/emailconfirm.html',
description=desc,
host=host)
# add to announce list if user opted in
if input_data.get('announce_mailer') and self.announce_list:
self.add_to_mailing_list(username, email, full_name,
list_endpoint=self.announce_list)
if self.invites_enabled:
self.delete_invite(email)
# extend session for upto 90 mins to store data to be migrated
# to allow time for user to validate registration
if move_info:
self.get_session().save()
except ValidationException as ve:
msg['validation'] = str(ve)
except Exception as ex:
import traceback
traceback.print_exc()
msg['other_error'] = 'Registration failed: ' + str(ex)
if not msg:
msg['success'] = ('A confirmation e-mail has been sent to <b>{0}</b>. ' +
'Please check your e-mail to complete the registration!').format(username)
return msg, redir_extra
def get_move_temp_info(self, input_data):
move_temp = input_data.get('moveTemp')
if not move_temp:
return None
to_coll_title = input_data.get('toColl', '')
to_coll = sanitize_title(to_coll_title)
if not to_coll:
raise ValidationException('invalid_coll_name')
if not self.access.session_user.is_anon():
raise ValidationException('invalid_user_import')
return {'from_user': self.access.session_user.name,
'to_coll': to_coll,
'to_title': to_coll_title,
}
def validate_registration(self, reg_code, cookie, username):
cookie_validate = 'valreg=' + reg_code
if cookie_validate not in cookie:
return {'error': 'invalid_code'}
try:
user, first_coll = self.create_user_from_reg(reg_code, username)
return {'registered': user.name,
'first_coll_name': first_coll.name}
except ValidationException as ve:
return {'error': ve.msg}
except Exception as e:
import traceback
traceback.print_exc()
return {'error': 'invalid_code'}
def find_case_insensitive_username(self, username):
lower_username = username.lower()
new_username = self.redis.hget(self.LC_USERNAMES_KEY, lower_username)
if new_username == '-' or new_username == username or new_username is None:
return None
if new_username == '':
return lower_username
return new_username
def login_user(self, input_data):
"""Authenticate users"""
username = input_data.get('username', '')
password = input_data.get('password', '')
try:
move_info = self.get_move_temp_info(input_data)
except ValidationException as ve:
return {'error': str(ve)}
# first, authenticate the user
# if failing, see if case-insensitive username and try that
if not self.cork.is_authenticate(username, password):
username = self.find_case_insensitive_username(username)
if not username or not self.cork.is_authenticate(username, password):
return {'error': 'invalid_login'}
# if not enough space, don't continue with login
if move_info:
if not self.has_space_for_new_collection(username,
move_info['from_user'],
'temp'):
#return {'error': 'Sorry, not enough space to import this Temporary Collection into your account.'}
return {'error': 'out_of_space'}
user = self.all_users[username]
new_collection = None
try:
if move_info:
new_collection = self.move_temp_coll(user, move_info)
except DupeNameException as de:
return {'error': 'duplicate_name'}
#return {'error': 'Collection "{0}" already exists'.format(move_info['to_title'])}
remember_me = get_bool(input_data.get('remember_me'))
# login session and access system
self.access.log_in(username, remember_me)
user.update_last_login()
return {'success': '1',
'new_coll_name': new_collection.name if new_collection else None,
'user': user}
def logout(self):
sesh = self.get_session()
sesh.delete()
return
def has_user_email(self, email):
#TODO: implement a email table, if needed?
for n, user_data in self.all_users.items():
if user_data['email_addr'] == email:
return True
return False
def get_user_email(self, user):
if not user:
return ''
try:
user_data = self.all_users[user]
except:
user_data = None
if user_data:
return user_data.get('email_addr', '')
else:
return ''
def is_username_available(self, username):
username_lc = username.lower()
# username matches of the restricted names
if username_lc in self.RESTRICTED_NAMES:
return False
# username doesn't match the allowed regex
if not self.USER_RX.match(username):
return False
# lowercase username already exists
if self.redis.hexists(self.LC_USERNAMES_KEY, username_lc):
return False
# username already exists! (shouldn't match if lowercase exists, but just in case)
if username in self.all_users:
return False
return True
def validate_user(self, user, email):
if not self.is_username_available(user):
raise ValidationException('username_not_available')
if self.has_user_email(email):
raise ValidationException('email_not_available')
return True
def validate_password(self, password, confirm):
if password != confirm:
raise ValidationException('password_mismatch')
if not self.PASS_RX.match(password):
raise ValidationException('password_invalid')
return True
def _get_access(self):
return request['webrec.access']
@property
def access(self):
return self._get_access()
def get_roles(self):
return [x for x in self.cork._store.roles]
def get_user_coll(self, username, coll_name):
try:
user = self.all_users[username]
except:
return None, None
collection = user.get_collection_by_name(coll_name)
return user, collection
def get_user_coll_rec(self, username, coll_name, rec):
user, collection = self.get_user_coll(username, coll_name)
if collection:
recording = collection.get_recording(rec)
else:
recording = None
return user, collection, recording
def update_password(self, curr_password, password, confirm):
username = self.access.session_user.name
if not self.cork.verify_password(username, curr_password):
raise ValidationException('invalid_password')
self.validate_password(password, confirm)
self.cork.update_password(username, password)
def reset_password(self, password, confirm, resetcode):
self.validate_password(password, confirm)
try:
self.cork.reset_password(resetcode, password)
except AuthException:
raise ValidationException('invalid_reset_code')
def is_valid_invite(self, invitekey):
try:
if not invitekey:
return False
key = base64.b64decode(invitekey.encode('utf-8')).decode('utf-8')
key.split(':', 1)
email, hash_ = key.split(':', 1)
entry = self.invites[email]
if entry and entry.get('hash_') == hash_:
return email
except Exception as e:
print(e)
pass
msg = 'Sorry, that is not a valid invite code. Please try again or request another invite'
raise ValidationException(msg)
def delete_invite(self, email):
try:
archive_invites = RedisTable(self.redis, 'h:arc_invites')
archive_invites[email] = self.invites[email]
except:
pass
del self.invites[email]
def save_invite(self, email, name, desc=''):
if not email or not name:
return False
self.invites[email] = {'name': name, 'email': email, 'reg_data': desc}
return True
def send_invite(self, email, email_template, host):
entry = self.invites[email]
if not entry:
print('No Such Email In Invite List')
return False
hash_ = base64.b64encode(os.urandom(21)).decode('utf-8')
entry['hash_'] = hash_
full_hash = email + ':' + hash_
invitekey = base64.b64encode(full_hash.encode('utf-8')).decode('utf-8')
email_text = template(
email_template,
host=host,
email_addr=email,
name=entry.get('name', email),
invite=invitekey,
)
self.cork.mailer.send_email(email, 'You are invited to join webrecorder.io beta!', email_text)
entry['sent'] = str(datetime.utcnow())
return True
def add_to_mailing_list(self, username, email, name, list_endpoint=None):
"""3rd party mailing list subscription"""
if not (list_endpoint or self.default_list_endpoint) or not self.list_key:
print('MAILING_LIST is turned on, but required fields are '
'missing.')
return
# if no endpoint provided, use default
if list_endpoint is None:
list_endpoint = self.default_list_endpoint
try:
res = requests.post(list_endpoint,
auth=('nop', self.list_key),
data=self.payload.format(
email=email,
name=name,
username=username),
timeout=1.5)
if res.status_code != 200:
print('Unexpected mailing list API response.. '
'status code: {0.status_code}\n'
'content: {0.content}'.format(res))
except Exception as e:
if e is requests.exceptions.Timeout:
print('Mailing list API timed out..')
else:
print('Adding to mailing list failed:', e)
def remove_from_mailing_list(self, email):
"""3rd party mailing list removal"""
if not self.list_removal_endpoint or not self.list_key:
# fail silently, log info
print('REMOVE_ON_DELETE is turned on, but required '
'fields are missing.')
return
try:
email = email.encode('utf-8').lower()
email_hash = hashlib.md5(email).hexdigest()
res = requests.delete(self.list_removal_endpoint.format(email_hash),
auth=('nop', self.list_key),
timeout=1.5)
if res.status_code != 204:
print('Unexpected mailing list API response.. '
'status code: {0.status_code}\n'
'content: {0.content}'.format(res))
except Exception as e:
if e is requests.exceptions.Timeout:
print('Mailing list API timed out..')
else:
print('Removing from mailing list failed:', e)
def get_session(self):
return request.environ['webrec.session']
def create_new_user(self, username, init_info=None):
init_info = init_info or {}
user = self.all_users.make_user(username)
user.create_new()
# track lowercase username
lower_username = username.lower()
self.redis.hset(self.LC_USERNAMES_KEY, lower_username,
username if lower_username != username else '')
first_coll = None
move_info = init_info.get('move_info')
if move_info:
first_coll = self.move_temp_coll(user, move_info)
elif self.default_coll:
first_coll = user.create_collection(self.default_coll['id'],
title=self.default_coll['title'],
desc=self.default_coll['desc'].format(username),
public=False)
# email | |
"""GUI for object detection with tensorflow and distance calculation."""
__version__ = "1.0.0"
__author__ = "<NAME>"
__email__ = "tim.ros<EMAIL>z:stud.uni-frankfurt.de"
__credits__ = "Special thanks to The Anh Vuong who came up with the original idea."
# Description:
# This program uses a TensorFlow Lite model to perform object detection on a
# video. It draws boxes and scores around the objects of interest in each frame
# from the video and calculates the distance between each of these objects.
#
# This code is based off the TensorFlow Lite image classification example at:
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/examples/python/label_image.py
# The code is also based off a raspberry pi tutorial for object detection:
# https://tutorials-raspberrypi.de/raspberry-pi-objekterkennung-mittels-tensorflow-und-kamera/
#
import os
import cv2
import tkinter as tk
import tkinter.messagebox as mb
from datetime import date
from aidem.tim_camera.DetectionModels.LiveDetectionModel import LiveDetection
from aidem.tim_camera.DetectionModels.VideoDetectionModel import VideoDetection
from aidem.tim_camera.DetectionModels.LiveFaceSwapModel import LiveFaceSwap
from aidem.tim_camera.DetectionModels.BasisModel import Functionality
class DetectionGUI(tk.Frame):
"""GUI class"""
def __init__(self, master=None):
"""GUI for object detection with tensorflow and distance calculation.
:param master: Tkinter root
"""
super().__init__(master)
if master is None:
self.master = tk.Tk()
else:
self.master = master
self.master.title("Object detection")
self.master.geometry("500x200")
# Create menubar
self.menubar = tk.Menu(self.master)
main_menu = tk.Menu(self.menubar, tearoff=0)
main_menu.add_command(label="Calibrate", command=lambda: self.start_calibrate(0))
main_menu.add_command(label="Chess calibration", command=lambda: self.start_calibrate(1))
main_menu.add_separator()
main_menu.add_command(label="Options", command=self.options)
main_menu.add_command(label="Help", command=self.help)
main_menu.add_separator()
main_menu.add_command(label="Quit", command=self.__del__)
self.menubar.add_cascade(label="Menu", menu=main_menu)
self.master.config(menu=self.menubar)
self.create_widgets()
self.detection_obj = None
# Initialise calibration variables
self.focal = 0
self.cal_vals = [None, None, None]
self.calib_chess = [0, 0]
# Initialise option variables
self.autosave = False
self.debug_mode = False
self.detect_select = []
self.no_detect_select = []
self.video_folder = ""
# Declare various widgets in advance
self.options_master = None
self.calib_master = None
self.calib_entry_name = None
self.calib_entry_distance = None
self.calib_entry_width = None
# Initialise various textvariables for entry widgets
self.calib_width_textvar = tk.StringVar(value="")
self.calib_dist_textvar = tk.StringVar(value="")
self.calib_name_textvar = tk.StringVar(value="")
self.calib_cols_textvar = tk.StringVar(value="")
self.calib_rows_textvar = tk.StringVar(value="")
self.options_select_textvar = tk.StringVar(value="")
self.options_no_select_textvar = tk.StringVar(value="")
self.options_video_folder_textvar = tk.StringVar(value="")
self.options_autosave_boolvar = tk.BooleanVar()
self.options_debug_boolvar = tk.BooleanVar()
def functionality_selection(self):
# Selection of functionality (simple detection, objects counting, distance measuring) for live and video detection
funct = self.functionality_intvar.get()
if funct == 1:
self.functionality = Functionality.Detection
elif funct == 2:
self.functionality = Functionality.Counting
elif funct == 3:
self.functionality = Functionality.Distance
else:
raise ValueError("Invalid selection for functionality.")
if funct == 3:
self.live_info_label.grid(column=0, row=4)
self.distance_threshold_label.grid(column=1, row=2)
self.distance_threshold_entry.grid(column=2, row=2)
self.object_width_label.grid(column=1, row=3)
self.object_width_entry.grid(column=2, row=3)
self.objects_type_label.grid(column=1, row=4)
self.objects_type_entry.grid(column=2, row=4)
else:
self.live_info_label.grid_forget()
self.distance_threshold_label.grid_forget()
self.distance_threshold_entry.grid_forget()
self.object_width_label.grid_forget()
self.object_width_entry.grid_forget()
self.objects_type_label.grid_forget()
self.objects_type_entry.grid_forget()
def create_widgets(self):
"""
Create the needed widgets (Buttons, Labels) at startup.
"""
# Main menu widgets
self.live_detection_button = tk.Button(self.master, text="Start live detection", command=self.live_detection)
self.live_detection_button.grid(column=0, row=0)
self.video_detection_button = tk.Button(self.master, text="Start video detection", command=self.video_detection)
self.video_detection_button.grid(column=1, row=0)
self.face_swap_button = tk.Button(self.master, text="Start face detections", command=self.face_swapping)
self.face_swap_button.grid(column=2, row=0)
# Detection controlling buttons
self.stop_button = tk.Button(self.master, text="End detection", command=self.stop_detection)
self.start_button = tk.Button(self.master, text="Start detection", command=self.start_detection)
self.swap_button = tk.Button(self.master, text="Start face swapping", command=lambda: self.start_detection(1))
self.home_button = tk.Button(self.master, text="Back to main menu", command=self.go_home)
# Detection information labels
self.detection_info_label = tk.Label(self.master, text="To stop the video detection press 'q', "
"to pause press 'w', to resume 's'.")
self.live_info_label = tk.Label(self.master,
text="Info: don't forget to calibrate \n"
"the camera via the options menu.")
# Widgets for video detection
self.vid_info_label = tk.Label(self.master, text="Focal width of the used camera:")
self.vid_focal_entry = tk.Entry(self.master)
self.vid_input_label = tk.Label(self.master, text="Video path:")
self.vid_input_entry = tk.Entry(self.master)
# Radio buttons for detection
self.functionality_intvar = tk.IntVar(value=1)
self.functionality = Functionality.Detection
self.radio1 = tk.Radiobutton(self.master, text="Object detection", variable=self.functionality_intvar, value=1,
fg="#000000", command=self.functionality_selection)
self.radio2 = tk.Radiobutton(self.master, text="Object counting", variable=self.functionality_intvar, value=2,
fg="#000000", command=self.functionality_selection)
self.radio3 = tk.Radiobutton(self.master, text="Distance calculation", variable=self.functionality_intvar,
fg="#000000", value=3, command=self.functionality_selection)
self.distance_threshold_intvar = tk.IntVar()
self.object_width_intvar = tk.IntVar()
self.objects_type_textvar = tk.StringVar()
self.distance_threshold_label = tk.Label(self.master, text="Distance threshold (cm):")
self.distance_threshold_entry = tk.Entry(self.master, textvar=self.distance_threshold_intvar)
self.object_width_label = tk.Label(self.master, text="Width of objects (cm):")
self.object_width_entry = tk.Entry(self.master, textvar=self.object_width_intvar)
self.objects_type_label = tk.Label(self.master, text="Objects name:")
self.objects_type_entry = tk.Entry(self.master, textvar=self.objects_type_textvar)
def start_calibrate(self, case: int = 0):
"""
Create widgets for calibration and start preparations.
"""
# Show error when trying to calibrate without active live detection.
if (type(self.detection_obj) is not LiveDetection):
mb.showerror("Error", "Calibration only applicable for live detection!")
else:
self.calib_master = tk.Toplevel(self.master)
self.calib_master.title("Calibration")
self.calib_master.geometry("300x200")
if (case == 0):
self.calib_entry_width_label = tk.Label(self.calib_master, text="Object width (cm):")
self.calib_entry_distance_label = tk.Label(self.calib_master, text="Object distance (cm):")
self.calib_entry_name_label = tk.Label(self.calib_master, text="Object name:")
self.calib_entry_width = tk.Entry(self.calib_master, textvariable=self.calib_width_textvar)
self.calib_entry_distance = tk.Entry(self.calib_master, textvariable=self.calib_dist_textvar)
self.calib_entry_name = tk.Entry(self.calib_master, textvariable=self.calib_name_textvar)
self.calib_entry_width_label.grid(column=0, row=1)
self.calib_entry_distance_label.grid(column=0, row=2)
self.calib_entry_name_label.grid(column=0, row=3)
self.calib_entry_width.grid(column=1, row=1)
self.calib_entry_distance.grid(column=1, row=2)
self.calib_entry_name.grid(column=1, row=3)
self.do_calibration = tk.Button(self.calib_master, text="Confirm calibration input",
command=lambda: self.confirm_calibr(case))
self.do_calibration.grid(column=0, row=0, columnspan=2)
elif (case == 1):
self.calib_entry_cols_label = tk.Label(self.calib_master, text="Rows:")
self.calib_entry_rows_label = tk.Label(self.calib_master, text="Columns:")
self.calib_entry_cols = tk.Entry(self.calib_master, textvariable=self.calib_cols_textvar)
self.calib_entry_rows = tk.Entry(self.calib_master, textvariable=self.calib_rows_textvar)
self.calib_entry_cols_label.grid(column=0, row=1)
self.calib_entry_rows_label.grid(column=0, row=2)
self.calib_entry_cols.grid(column=1, row=1)
self.calib_entry_rows.grid(column=1, row=2)
self.do_calibration = tk.Button(self.calib_master, text="Confirm calibration input",
command=lambda: self.confirm_calibr(case))
self.do_calibration.grid(column=0, row=0, columnspan=2)
else:
mb.showerror("Error", "Incorrect calibration invocation. Case: " + str(case) + "(invalid)")
def confirm_calibr(self, case: int = 0):
"""
Delete widgets for calibration and start the calibration with the given values.
"""
self.calib_master.destroy()
if (case == 0):
obj_width = self.calib_width_textvar.get()
obj_dist = self.calib_dist_textvar.get()
obj_name = self.calib_name_textvar.get()
self.cal_vals = [obj_width, obj_dist, obj_name]
if (self.cal_vals[0].isdigit() and self.cal_vals[1].isdigit()):
success = self.detection_obj.calibrate(int(self.cal_vals[0]), int(self.cal_vals[1]), self.cal_vals[2],
debug=self.debug_mode)
else:
mb.showerror("Type error", "Incorrect type(s) entered. Width and distance have to be integers.")
elif (case == 1):
cols = self.calib_cols_textvar.get()
rows = self.calib_rows_textvar.get()
self.calib_chess = [cols, rows]
if (self.calib_chess[0].isdigit() and self.calib_chess[1].isdigit()):
success = self.detection_obj.calibrate_board(cols=int(self.calib_chess[0]),
rows=int(self.calib_chess[1]),
debug=self.debug_mode)
if (not success):
mb.showwarning("Calibration not successful",
"The calibration was not successful. You may try again.")
else:
mb.showerror("Type error", "Incorrect type(s) entered. Only integers are allowed")
else:
mb.showerror("Error", "Incorrect calibration confirmation. Case: " + str(case) + "(invalid)")
cv2.destroyAllWindows()
def face_swapping(self):
"""
Prepare and clean up GUI for face swapping
"""
# Delete an existing (video or live) detection object
if (self.detection_obj is not None):
del self.detection_obj
self.vid_info_label.grid_forget()
self.video_detection_button.grid_forget()
self.live_detection_button.grid_forget()
self.face_swap_button.grid_forget()
self.start_button.grid(column=0, row=0)
self.swap_button.grid(column=0, row=1)
self.home_button.grid(column=1, row=0)
self.detection_obj = LiveFaceSwap()
self.focal = 0
#self.live_info_label.grid(column=0, row=1)
def live_detection(self):
"""
Prepare and clean up GUI for live detection.
"""
# Delete an existing (video or live) detection object
if (self.detection_obj is not None):
del self.detection_obj
self.vid_info_label.grid_forget()
self.video_detection_button.grid_forget()
self.live_detection_button.grid_forget()
self.face_swap_button.grid_forget()
self.start_button.grid(column=0, row=0)
self.home_button.grid(column=1, row=0)
self.radio1.grid(column=0, row=1)
self.radio2.grid(column=0, row=2)
self.radio3.grid(column=0, row=3)
self.detection_obj = LiveDetection()
self.focal = 0
def video_detection(self):
"""
Prepare and clean up GUI for video detection.
"""
# Delete an existing (video or live) detection object
if (self.detection_obj is not None):
del self.detection_obj
self.live_info_label.grid_forget()
self.video_detection_button.grid_forget()
self.live_detection_button.grid_forget()
self.face_swap_button.grid_forget()
self.start_button.grid(column=0, row=0)
self.home_button.grid(column=1, row=0)
self.radio1.grid(column=0, row=1)
self.radio2.grid(column=0, row=2)
self.detection_obj = VideoDetection()
self.vid_info_label.grid(column=1, row=1)
self.vid_focal_entry.grid(column=2, row=1)
self.vid_input_label.grid(column=1, row=2)
self.vid_input_entry.grid(column=2, row=2)
def start_detection(self, arg: int = 0):
"""
Start the detection.
"""
self.start_button.grid_forget()
self.swap_button.grid_forget()
self.radio1.grid_forget()
self.radio2.grid_forget()
self.live_info_label.grid_forget()
self.stop_button.grid(column=0, row=0)
self.detection_info_label.grid(column=0, row=1, columnspan=2)
if (type(self.detection_obj) is VideoDetection):
print("Video Detection start")
# Collect input information
video_path = self.video_folder + self.vid_input_entry.get()
focal_str = self.vid_focal_entry.get()
if focal_str.isdigit():
focal = int(focal_str)
else:
focal = 0
self.vid_info_label.grid_forget()
self.vid_focal_entry.grid_forget()
self.vid_input_label.grid_forget()
self.vid_input_entry.grid_forget()
if (not (os.path.isfile(video_path))):
mb.showerror("Error", "Video '" + video_path + "' does not exist!")
self.stop_detection()
self.video_detection()
else:
self.detection_obj.detect(video_name=video_path, focal_width=focal, functionality=self.functionality,
debug=self.debug_mode)
elif (type(self.detection_obj) is LiveFaceSwap):
print("Live face swap start")
"""
if self.autosave:
if not (os.path.isdir(self.video_folder)):
mb.showwarning("Invalid directory", "The specified directory for videos is invalid. \n"
"The video will be saved in the same directory as "
"the python file.")
video_name = "detection_" + date.now().strftime("%Y_%m_%d_%H_%M_%S")
else:
video_name = self.video_folder + "detection_" + date.now().strftime("%Y_%m_%d_%H_%M_%S")
self.detection_obj.detect(self.detect_select, self.no_detect_select, self.autosave, video_name)
else:
"""
#video_name = "detection_" + date.now().strftime("%Y_%m_%d_%H_%M_%S")
video_name = "test123"
if arg == 0:
self.detection_obj.detect(autosave=self.autosave, video_title=video_name, debug=self.debug_mode)
elif arg == 1:
self.detection_obj.face_swap(debug=self.debug_mode)
else:
return None
else:
print("Live detection start")
self.radio3.grid_forget()
if self.functionality == Functionality.Distance:
self.distance_threshold_label.grid_forget()
self.distance_threshold_entry.grid_forget()
self.object_width_label.grid_forget()
self.object_width_entry.grid_forget()
self.objects_type_label.grid_forget()
self.objects_type_entry.grid_forget()
# Set variables for the distance calculations
self.detection_obj.objects_width_cm = self.object_width_intvar.get()
self.detection_obj.distance_threshold = self.distance_threshold_intvar.get()
object_distance_detect_name = self.objects_type_textvar.get()
if object_distance_detect_name is not None and object_distance_detect_name != "":
self.detect_select = [object_distance_detect_name]
if self.autosave:
if not (os.path.isdir(self.video_folder)):
mb.showwarning("Invalid directory", "The specified directory for videos is invalid. \n"
"The video will be saved in the same directory as "
"the python file.")
video_name = "detection_" + date.now().strftime("%Y_%m_%d_%H_%M_%S")
else:
video_name = self.video_folder + "detection_" + date.now().strftime("%Y_%m_%d_%H_%M_%S")
self.detection_obj.detect(self.detect_select, self.no_detect_select, self.functionality,
self.autosave, video_name, debug=self.debug_mode)
else:
self.detection_obj.detect(self.detect_select, self.no_detect_select, self.functionality,
debug=self.debug_mode)
def stop_detection(self):
"""
Stop the current detection
"""
if self.detection_obj is not None:
del self.detection_obj
self.detection_obj = None
self.stop_button.grid_forget()
self.detection_info_label.grid_forget()
self.home_button.grid_forget()
self.live_detection_button.grid(column=0, row=0)
self.video_detection_button.grid(column=1, row=0)
self.face_swap_button.grid(column=2, row=0)
def go_home(self):
"""
Go back to the main window
"""
self.radio1.grid_forget()
self.radio2.grid_forget()
self.radio3.grid_forget()
self.start_button.grid_forget()
self.live_info_label.grid_forget()
self.vid_info_label.grid_forget()
self.vid_focal_entry.grid_forget()
self.vid_input_label.grid_forget()
self.vid_input_entry.grid_forget()
self.stop_detection()
def | |
<filename>tests/apollo/test_skvbc_chaotic_startup.py<gh_stars>0
# Concord
#
# Copyright (c) 2019 VMware, Inc. All Rights Reserved.
#
# This product is licensed to you under the Apache 2.0 license (the "License").
# You may not use this product except in compliance with the Apache 2.0 License.
#
# This product may include a number of subcomponents with separate copyright
# notices and license terms. Your use of these subcomponents is subject to the
# terms and conditions of the subcomponent's license, as noted in the LICENSE
# file.
import os.path
import random
import unittest
from os import environ
import trio
from util.bft import with_trio, with_bft_network, with_constant_load, KEY_FILE_PREFIX
from util import bft_network_partitioning as net
from util import eliot_logging as log
from util import skvbc as kvbc
def start_replica_cmd(builddir, replica_id, view_change_timeout_milli="10000"):
"""
Return a command that starts an skvbc replica when passed to
subprocess.Popen.
Note each arguments is an element in a list.
"""
statusTimerMilli = "500"
path = os.path.join(builddir, "tests", "simpleKVBC", "TesterReplica", "skvbc_replica")
return [path,
"-k", KEY_FILE_PREFIX,
"-i", str(replica_id),
"-s", statusTimerMilli,
"-v", view_change_timeout_milli
]
def start_replica_cmd_with_vc_timeout(vc_timeout):
def wrapper(*args, **kwargs):
return start_replica_cmd(*args, **kwargs, view_change_timeout_milli=vc_timeout)
return wrapper
class SkvbcChaoticStartupTest(unittest.TestCase):
__test__ = False # so that PyTest ignores this test scenario
@unittest.skipIf(environ.get('BUILD_COMM_TCP_TLS', "").lower() == "true", "Unstable on CI (TCP/TLS only)")
@with_trio
@with_bft_network(start_replica_cmd, selected_configs=lambda n, f, c: n == 7)
async def test_inactive_window_catchup_up_to_gap(self, bft_network):
"""
In this test we check the catchup from Inactive Window when we have a gap related to the Peers.
The situation can happen if the catching up Replica's last Stable SeqNo is 3 Checkpoints behind its Peers, but
its Last Executed is only 2 Checkpoints behind.
Steps to recreate:
1) Start all replicas.
2) Isolate 1 Replica from all but the Primary. We will call it Late Replica.
3) Advance all replicas beyond the first Stable Checkpoint. The Late Replica won't be able to collect a
Stable Checkpoint.
4) Stop the Late Replica and advance all others 2 more Checkpoints.
5) Start the late Replica and verify it catches up to the end of its Working Window from the Inactive Windows of
its Peers.
"""
late_replica = 1
primary = 0
bft_network.start_all_replicas()
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
first_stable_checkpoint_to_reach = 1
checkpoints_to_advance_after_first = 2
seq_nums_per_checkpoint = 150
num_reqs_after_first_checkpoint = 4
async def write_req(num_req=1):
for _ in range(num_req):
await skvbc.write_known_kv()
with net.ReplicaOneWayTwoSubsetsIsolatingAdversary(
bft_network, {late_replica},
bft_network.all_replicas(without={primary, late_replica})) as adversary:
adversary.interfere()
# create checkpoint and wait for checkpoint propagation
await skvbc.fill_and_wait_for_checkpoint(
initial_nodes=bft_network.all_replicas(without={late_replica}),
num_of_checkpoints_to_add=first_stable_checkpoint_to_reach,
verify_checkpoint_persistency=False
)
await bft_network.wait_for_replicas_to_collect_stable_checkpoint(
bft_network.all_replicas(without={late_replica}),
first_stable_checkpoint_to_reach)
await write_req(num_reqs_after_first_checkpoint)
# Wait for late_replica to reach num_reqs_after_first_checkpoint past the 1-st Checkpoint
with trio.fail_after(seconds=30):
while True:
last_exec = await bft_network.get_metric(late_replica, bft_network, 'Gauges', "lastExecutedSeqNum")
log.log_message(message_type=f"replica = {late_replica}; lase_exec = {last_exec}")
if last_exec == seq_nums_per_checkpoint + num_reqs_after_first_checkpoint:
break
await trio.sleep(seconds=0.3)
bft_network.stop_replica(late_replica)
# create 2 checkpoints and wait for checkpoint propagation
await skvbc.fill_and_wait_for_checkpoint(
initial_nodes=bft_network.all_replicas(without={late_replica}),
num_of_checkpoints_to_add=checkpoints_to_advance_after_first,
verify_checkpoint_persistency=False
)
await bft_network.wait_for_replicas_to_collect_stable_checkpoint(
bft_network.all_replicas(without={late_replica}),
first_stable_checkpoint_to_reach + checkpoints_to_advance_after_first)
bft_network.start_replica(late_replica)
with trio.fail_after(seconds=30):
late_replica_catch_up = False
while not late_replica_catch_up:
for replica_id in bft_network.get_live_replicas():
last_stable = await bft_network.get_metric(replica_id, bft_network, 'Gauges', "lastStableSeqNum")
last_exec = await bft_network.get_metric(replica_id, bft_network, 'Gauges', "lastExecutedSeqNum")
log.log_message(message_type=f"replica = {replica_id}; last_stable = {last_stable}; lase_exec = {last_exec}")
if replica_id == late_replica and last_exec == 2*seq_nums_per_checkpoint:
late_replica_catch_up = True
await write_req()
await trio.sleep(seconds=3)
@unittest.skipIf(environ.get('BUILD_COMM_TCP_TLS', "").lower() == "true", "Unstable on CI (TCP/TLS only)")
@with_trio
@with_bft_network(start_replica_cmd, selected_configs=lambda n, f, c: n == 7)
async def test_inactive_window(self, bft_network):
"""
The goal of this test is to verify full catch up of a Replica only from the Inactive Window.
1) Start all Replicas without Replica 1, which will later catch up from the Primary's Inactive Window.
2) Advance all Replicas to 1 sequence number beyond the first stable and verify they have all collected
Stable Checkpoints.
3) Start and isolate the late Replica 1 form all others except the Primary. This way it will not be able
to start State Transfer and will only be able to catch up from the Primary's Inactive Window.
4) Verify that Replica 1 has managed to catch up.
"""
late_replica = 1
bft_network.start_replicas(bft_network.all_replicas(without={late_replica}))
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
stable_checkpoint_to_reach = 1
num_reqs_to_catch_up = 151
async def write_req(num_req=1):
for _ in range(num_req):
await skvbc.write_known_kv()
# create checkpoint and wait for checkpoint propagation
await skvbc.fill_and_wait_for_checkpoint(
initial_nodes=bft_network.get_live_replicas(),
num_of_checkpoints_to_add=stable_checkpoint_to_reach,
verify_checkpoint_persistency=False
)
await bft_network.wait_for_replicas_to_collect_stable_checkpoint(bft_network.get_live_replicas(),
stable_checkpoint_to_reach)
with trio.fail_after(seconds=30):
with net.ReplicaOneWayTwoSubsetsIsolatingAdversary(bft_network, {1}, {6, 5, 4, 3, 2}) as adversary:
adversary.interfere()
bft_network.start_replica(late_replica)
late_replica_catch_up = False
while not late_replica_catch_up:
for replica_id in bft_network.all_replicas():
last_stable = await bft_network.get_metric(replica_id, bft_network, 'Gauges', "lastStableSeqNum")
last_exec = await bft_network.get_metric(replica_id, bft_network, 'Gauges', "lastExecutedSeqNum")
log.log_message(message_type=f"replica = {replica_id}; last_stable = {last_stable}; lase_exec = {last_exec}")
if replica_id == late_replica and last_exec >= num_reqs_to_catch_up:
late_replica_catch_up = True
await write_req()
await trio.sleep(seconds=3)
@unittest.skip("Edge case scenario - not part of CI")
@with_trio
@with_bft_network(start_replica_cmd, selected_configs=lambda n, f, c: f == 2)
async def test_view_change_with_f_replicas_collected_stable_checkpoint(self, bft_network):
"""
The goal of this test is to leave the system with F Replicas that have collected a Stable Checkpoint and to
cause a View Change. In this way we get a misalignment in the Restrictions of the previous View and we get in an
indefinite View Change scenario.
1) Start all Replicas.
2) Move all Replicas to 1 SeqNo prior to the stable Checkpoint.
3) Stop Replicas 1 and 2.
4) Isolate Replica 3 from 6, 5 and 4 only in one direction - 3 will be able to send messages to all, but won't
receive from 6, 5 and 4. this way 3 won't be able to collect a Stable Checkpoint.
5) With the isolation on Replica 3, send Client Requests until 2*F replicas collect a Stable Checkpoint.
Only Replicas 0, 6, 5 and 4 will collect, 3 will not because it does not receive messages from 6, 5 and 4.
6) We stop Replicas 0 and 6 and start 1 and 2. This way we will cause View Change and we will have only 2
Replicas with a Stable Checkpoint (5 and 4).
7) Within this state the system must be able to finalize a View Change, because we have (2*F + 1) live Replicas,
but we have only F that have collected a Stable Checkpoint that are live.
"""
# step 1
bft_network.start_all_replicas()
skvbc = kvbc.SimpleKVBCProtocol(bft_network)
num_reqs_before_first_stable = 149
async def write_req(num_req=1):
for _ in range(num_req):
await skvbc.write_known_kv()
await write_req(num_reqs_before_first_stable)
# step 2
while True:
last_exec_seqs = []
for replica_id in bft_network.all_replicas():
last_stable = await bft_network.get_metric(replica_id, bft_network, 'Gauges', "lastStableSeqNum")
last_exec = await bft_network.get_metric(replica_id, bft_network, 'Gauges', "lastExecutedSeqNum")
log.log_message(message_type=f"replica = {replica_id}; last_stable = {last_stable};\
last_exec = {last_exec}")
last_exec_seqs.append(last_exec)
if sum(x == num_reqs_before_first_stable for x in last_exec_seqs) == bft_network.config.n:
break
else:
last_exec_seqs.clear()
# step 3
bft_network.stop_replica(1)
bft_network.stop_replica(2)
await write_req()
last_stable_seqs = []
# step 4
with net.ReplicaOneWayTwoSubsetsIsolatingAdversary(bft_network, {3}, {6, 5, 4}) as adversary:
adversary.interfere()
while True:
for replica_id in bft_network.get_live_replicas():
last_stable = await bft_network.get_metric(replica_id, bft_network, 'Gauges', "lastStableSeqNum")
last_exec = await bft_network.get_metric(replica_id, bft_network, 'Gauges', "lastExecutedSeqNum")
log.log_message(message_type=f"replica = {replica_id}; last_stable = {last_stable};\
lase_exec = {last_exec}")
last_stable_seqs.append(last_stable)
if sum(x == num_reqs_before_first_stable + 1 for x in last_stable_seqs) == 2 * bft_network.config.f:
# step 5 completed
break
else:
last_stable_seqs.clear()
await write_req()
await trio.sleep(seconds=3)
# step 6
bft_network.stop_replica(0)
bft_network.stop_replica(6)
bft_network.start_replica(1)
bft_network.start_replica(2)
# Send a Client Request to trigger View Change
with trio.move_on_after(seconds=3):
await write_req()
# step 7
await bft_network.wait_for_view(
replica_id=1,
expected=lambda v: v == 1,
err_msg="Make sure a view change happens from 0 to 1"
)
await skvbc.wait_for_liveness()
# @unittest.skipIf(environ.get('BUILD_COMM_TCP_TLS', "").lower() == "true", "Unstable on CI (TCP/TLS only)")
@unittest.skip("Disabled due to BC-6816")
@with_trio
@with_bft_network(start_replica_cmd, selected_configs=lambda n, f, c: n == 7)
@with_constant_load
async def test_missed_two_view_changes(self, bft_network, skvbc, constant_load):
"""
The purpose of this test is to verify that if a Replica's View is behind the peers by more than 1
it manages to catch up properly and to join and participate in the View its peers are working in.
1) Start all replicas and store the current View they are in.
2) Stop Replica 2 which we will later bring back
3) Isolate Replica 0 and verify View Change happens
4) Isolate Replica 1 and verify View Change happens. This time we are going to go to View = 3,
because we previously stopped | |
<filename>cogs/kickban.py
import discord
import time
import datetime
from discord.ext import commands
from typing import Optional, Union
from utils import utils, crud
from utils.checks import is_staff, check_bot_or_staff
class KickBan(commands.Cog):
"""
Kicking and banning users.
"""
def __init__(self, bot):
self.bot = bot
async def cog_check(self, ctx):
if ctx.guild is None:
raise commands.NoPrivateMessage()
return True
async def meme(self, beaner: discord.Member, beaned: discord.Member, action: str, channel: discord.TextChannel, reason: str):
await channel.send(f"Seriously? What makes you think it's okay to try and {action} another staff or helper like that?")
msg = f"{beaner.mention} attempted to {action} {beaned.mention}|{beaned} in {channel.mention} "
if reason != "":
msg += "for the reason " + reason
await self.bot.channels['meta'].send(msg + (" without a reason" if reason == "" else ""))
@is_staff("HalfOP")
@commands.bot_has_permissions(kick_members=True)
@commands.command(name="kick")
async def kick_member(self, ctx, member: discord.Member, *, reason=""):
"""Kicks a user from the server. Staff only."""
if await check_bot_or_staff(ctx, member, "kick"):
return
msg = f"You were kicked from {ctx.guild.name}."
if reason != "":
msg += " The given reason is: " + reason
msg += "\n\nYou are able to rejoin the server, but please read the rules in #welcome-and-rules before participating again."
await utils.send_dm_message(member, msg, ctx)
try:
self.bot.actions.append("uk:" + str(member.id))
await member.kick(reason=reason)
except discord.errors.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
return
await ctx.send(f"{member} is now gone. 👌")
msg = f"👢 **Kick**: {ctx.author.mention} kicked {member.mention} | {self.bot.escape_text(member)}\n🏷 __User ID__: {member.id}"
if reason != "":
msg += "\n✏️ __Reason__: " + reason
await self.bot.channels['server-logs'].send(msg)
signature = utils.command_signature(ctx.command)
await self.bot.channels['mod-logs'].send(msg + (f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user." if reason == "" else ""))
@is_staff("OP")
@commands.bot_has_permissions(ban_members=True)
@commands.command(name="ban", aliases=["yeet"])
async def ban_member(self, ctx, member: Union[discord.Member, discord.User], days: Optional[int] = 0, *, reason=""):
"""Bans a user from the server. OP+ only. Optional: [days] Specify up to 7 days of messages to delete."""
if await check_bot_or_staff(ctx, member, "ban"):
return
if days > 7:
days = 7
elif days < 0:
days = 0
if isinstance(member, discord.Member):
msg = f"You were banned from {ctx.guild.name}."
if reason != "":
msg += " The given reason is: " + reason
msg += "\n\nThis ban does not expire."
await utils.send_dm_message(member, msg, ctx)
try:
await crud.remove_timed_restriction(member.id, 'timeban')
self.bot.actions.append("ub:" + str(member.id))
await ctx.guild.ban(member, reason=reason, delete_message_days=days)
except discord.errors.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
return
await ctx.send(f"{member} is now b&. 👍")
msg = f"⛔ **Ban**: {ctx.author.mention} banned {member.mention} | {self.bot.escape_text(member)}\n🏷 __User ID__: {member.id}"
if reason != "":
msg += "\n✏️ __Reason__: " + reason
await self.bot.channels['server-logs'].send(msg)
signature = utils.command_signature(ctx.command)
await self.bot.channels['mod-logs'].send(msg + (f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user." if reason == "" else ""))
@is_staff("OP")
@commands.bot_has_permissions(ban_members=True)
@commands.command(name="superban", aliases=["superyeet"])
async def superban(self, ctx, member: Union[discord.Member, discord.User], days: Optional[int] = 0, *, reason=""):
"""Bans a user from the server. OP+ only. Optional: [days] Specify up to 7 days of messages to delete."""
if await check_bot_or_staff(ctx, member, "ban"):
return
if days > 7:
days = 7
elif days < 0:
days = 0
if isinstance(member, discord.Member):
msg = f"You were superbanned from {ctx.guild.name}."
if reason != "":
msg += " The given reason is: " + reason
msg += "\n\nThis ban does not expire.\n\nhttps://nintendohomebrew.com/assets/img/banned.gif"
await utils.send_dm_message(member, msg, ctx)
try:
await crud.remove_timed_restriction(member.id, 'timeban')
self.bot.actions.append("ub:" + str(member.id))
await ctx.guild.ban(member, reason=reason, delete_message_days=days)
except discord.errors.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
return
await ctx.send(f"{member} is now SUPER BANNED. 👍 https://nintendohomebrew.com/assets/img/banned.gif")
msg = f"⛔ **Ban**: {ctx.author.mention} banned {member.mention} | {self.bot.escape_text(member)}\n🏷 __User ID__: {member.id}"
if reason != "":
msg += "\n✏️ __Reason__: " + reason
await self.bot.channels['server-logs'].send(msg)
signature = utils.command_signature(ctx.command)
await self.bot.channels['mod-logs'].send(msg + (f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user." if reason == "" else ""))
@is_staff("OP")
@commands.bot_has_permissions(ban_members=True)
@commands.command(name="unban", aliases=["unyeet"])
async def unban_member(self, ctx, user: Union[discord.Member, discord.User], *, reason=""):
"""Unbans a user from the server. OP+ only."""
if reason == "":
reason = "No reason provided."
try:
await ctx.guild.fetch_ban(user)
except discord.errors.NotFound:
return await ctx.send(f"{user} is not banned!")
await crud.remove_timed_restriction(user.id, 'timeban')
self.bot.actions.append("tbr:" + str(user.id))
await ctx.guild.unban(user, reason=reason)
await ctx.send(f"{user} is now unbanned.")
msg = f"⚠ **Unban**: {ctx.author.mention} unbanned {user.mention} | {self.bot.escape_text(user)}\n🏷 __User ID__: {user.id}\n✏️ __Reason__: {reason}"
await self.bot.channels['mod-logs'].send(msg)
await self.bot.channels['server-logs'].send(msg)
@is_staff("OP")
@commands.bot_has_permissions(ban_members=True)
@commands.command(name="silentban", aliases=["quietyeet"])
async def silentban_member(self, ctx, member: discord.Member, days: Optional[int] = 0, *, reason=""):
"""Bans a user from the server, without a notification. OP+ only. Optional: [days] Specify up to 7 days of messages to delete."""
if await check_bot_or_staff(ctx, member, "ban"):
return
if days > 7:
days = 7
elif days < 0:
days = 0
try:
self.bot.actions.append("ub:" + str(member.id))
await ctx.cog.remove_timed_restriction(member.id, 'timeban')
await member.ban(reason=reason, delete_message_days=days)
except discord.errors.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
return
await ctx.send(f"{member} is now b&. 👍")
msg = f"⛔ **Silent ban**: {ctx.author.mention} banned {member.mention} | {self.bot.escape_text(member)}\n"\
f"🏷 __User ID__: {member.id}"
if reason != "":
msg += "\n✏️ __Reason__: " + reason
await self.bot.channels['server-logs'].send(msg)
signature = utils.command_signature(ctx.command)
await self.bot.channels['mod-logs'].send(msg + (f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}`." if reason == "" else ""))
@is_staff("OP")
@commands.bot_has_permissions(ban_members=True)
@commands.command(name="timeban", aliases=["timeyeet"])
async def timeban_member(self, ctx, member: Union[discord.Member, discord.User], length, *, reason=""):
"""Bans a user for a limited period of time. OP+ only.\n\nLength format: #d#h#m#s"""
if await check_bot_or_staff(ctx, member, "timeban"):
return
if (seconds := utils.parse_time(length)) == -1:
return await ctx.send("💢 I don't understand your time format.")
timestamp = datetime.datetime.now()
delta = datetime.timedelta(seconds=seconds)
unban_time = timestamp + delta
unban_time_string = unban_time.strftime("%Y-%m-%d %H:%M:%S")
if isinstance(member, discord.Member):
msg = f"You were banned from {ctx.guild.name}."
if reason != "":
msg += " The given reason is: " + reason
msg += f"\n\nThis ban expires {unban_time_string} {time.tzname[0]}."
await utils.send_dm_message(member, msg, ctx)
try:
self.bot.actions.append("ub:" + str(member.id))
await ctx.guild.ban(member, reason=reason, delete_message_days=0)
except discord.errors.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
return
await crud.add_timed_restriction(member.id, unban_time, 'timeban')
await ctx.send(f"{member} is now b& until {unban_time_string} {time.tzname[0]}. 👍")
msg = f"⛔ **Time ban**: {ctx.author.mention} banned {member.mention} until {unban_time_string} | {member}\n🏷 __User ID__: {member.id}"
if reason != "":
msg += "\n✏️ __Reason__: " + reason
await self.bot.channels['server-logs'].send(msg)
signature = utils.command_signature(ctx.command)
await self.bot.channels['mod-logs'].send(msg + (f"\nPlease add an explanation below. In the future, it is recommended to use `{signature}` as the reason is automatically sent to the user." if reason == "" else ""))
@is_staff("OP")
@commands.bot_has_permissions(kick_members=True)
@commands.command(name="softban", aliases=["gentleyeet"])
async def softban_member(self, ctx, member: Union[discord.Member, discord.User], *, reason):
"""Soft-ban a user. OP+ only.
This "bans" the user without actually doing a ban on Discord. The bot will instead kick the user every time they join. Discord bans are account- and IP-based."""
if await check_bot_or_staff(ctx, member, "softban"):
return
await crud.add_softban(member.id, ctx.author.id, reason)
if isinstance(member, discord.Member):
msg = f"This account is no longer permitted to participate in {ctx.guild.name}. The reason is: {reason}"
await utils.send_dm_message(member, msg, ctx)
try:
await member.kick(reason=reason)
except discord.errors.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
return
await ctx.send(f"{member} is now b&. 👍")
msg = f"⛔ **Soft-ban**: {ctx.author.mention} soft-banned {member.mention} | {self.bot.escape_text(member)}\n🏷 __User ID__: {member.id}\n✏️ __Reason__: {reason}"
await self.bot.channels['mod-logs'].send(msg)
await self.bot.channels['server-logs'].send(msg)
@is_staff("OP")
@commands.command(name="unsoftban")
async def unsoftban_member(self, ctx, user: Union[discord.Member, discord.User]):
"""Un-soft-ban a user based on ID. OP+ only."""
await crud.remove_softban(user.id)
await ctx.send(f"{user} has been unbanned!")
msg = f"⚠️ **Un-soft-ban**: {ctx.author.mention} un-soft-banned {self.bot.escape_text(user)}"
await self.bot.channels['mod-logs'].send(msg)
@is_staff("SuperOP")
@commands.command(name="scamban")
async def scamban_member(self, ctx, member: discord.Member, site: str):
"""Bans member deleting message from last day and add a scamming site to the filter"""
if site in self.bot.wordfilter.filter['scamming site']:
await ctx.send("Site is already in the filter!")
elif ' ' in site or '-' in site:
await ctx.send("Filtered words can't contain dashes or spaces, please add the site properly with wordfilter command.")
else:
await self.bot.wordfilter.add(word=site, kind="scamming site")
await self.bot.channels['mod-logs'].send(f"🆕 **Added**: {ctx.author.mention} added `{site}` to the word filter!")
await member.ban(reason="Linking scamming site", delete_message_days=1)
await ctx.send(f"{member} is now b&. 👍")
msg = f"⛔ **Ban**: {ctx.author.mention} banned {member.mention} | {self.bot.escape_text(member)}\n🏷 __User | |
# Update token for user
token = tokens.token_show_by_id(context, data_dict)
data_dict['user_id'] = token['user_id']
# removed because it is saved in next step. User is allowed to click on /validate link several times
# get_action('user_extra_update')(context, data_dict)
except NotFound:
return OnbUserNotFound
except Exception as e:
error_summary = str(e)
return self.error_message(error_summary)
user = model.User.get(data_dict['user_id'])
template_data = ue_helpers.get_user_extra(user_id=data_dict['user_id'])
template_data['data']['current_step'] = user_model.HDX_ONBOARDING_DETAILS
template_data['data']['email'] = user.email
template_data['data']['name'] = user.name
template_data['capcha_api_key'] = configuration.config.get('ckan.recaptcha.publickey')
return render('home/index.html', extra_vars=template_data)
def register_details(self, data=None, errors=None, error_summary=None):
'''
Step 3: user enters details for registration (username, password, firstname, lastname and captcha
:param data:
:param errors:
:param error_summary:
:return:
'''
temp_schema = user_reg_schema.register_details_user_schema()
if 'name' in temp_schema:
temp_schema['name'] = [name_validator_with_changed_msg if var == name_validator else var for var in
temp_schema['name']]
data_dict = logic.clean_dict(unflatten(logic.tuplize_dict(logic.parse_params(request.params))))
user_obj = model.User.get(data_dict['id'])
context = {'model': model, 'session': model.Session, 'user': user_obj.name,
'schema': temp_schema}
# data_dict['name'] = data_dict['email']
first_name = data_dict['first-name']
last_name = data_dict['last-name']
data_dict['fullname'] = first_name + ' ' + last_name
try:
# is_captcha_enabled = configuration.config.get('hdx.captcha', 'false')
# if is_captcha_enabled == 'true':
# captcha_response = data_dict.get('g-recaptcha-response', None)
# if not self.is_valid_captcha(response=captcha_response):
# raise ValidationError(CaptchaNotValid, error_summary=CaptchaNotValid)
check_access('user_update', context, data_dict)
except NotAuthorized:
return OnbNotAuth
# except ValidationError, e:
# error_summary = e.error_summary
# if error_summary == CaptchaNotValid:
# return OnbCaptchaErr
# return self.error_message(error_summary)
except Exception, e:
error_summary = e.error_summary
return self.error_message(error_summary)
# hack to disable check if user is logged in
save_user = c.user
c.user = None
try:
token_dict = tokens.token_show(context, data_dict)
data_dict['token'] = token_dict['token']
get_action('user_update')(context, data_dict)
tokens.token_update(context, data_dict)
# ue_dict = self._get_ue_dict(data_dict['id'], user_model.HDX_ONBOARDING_USER_VALIDATED)
# get_action('user_extra_update')(context, ue_dict)
#
# ue_dict = self._get_ue_dict(data_dict['id'], user_model.HDX_ONBOARDING_DETAILS)
# get_action('user_extra_update')(context, ue_dict)
ue_data_dict = {'user_id': data_dict.get('id'), 'extras': [
{'key': user_model.HDX_ONBOARDING_USER_VALIDATED, 'new_value': 'True'},
{'key': user_model.HDX_ONBOARDING_DETAILS, 'new_value': 'True'},
{'key': user_model.HDX_FIRST_NAME, 'new_value': first_name},
{'key': user_model.HDX_LAST_NAME, 'new_value': last_name},
]}
get_action('user_extra_update')(context, ue_data_dict)
if configuration.config.get('hdx.onboarding.send_confirmation_email') == 'true':
link = config['ckan.site_url'] + '/login'
full_name = data_dict.get('fullname')
subject = u'Thank you for joining the HDX community!'
email_data = {
'user_first_name': first_name,
'username': data_dict.get('name'),
}
hdx_mailer.mail_recipient([{'display_name': full_name, 'email': data_dict.get('email')}], subject,
email_data, footer=data_dict.get('email'),
snippet='email/content/onboarding_confirmation_of_registration.html')
except NotAuthorized:
return OnbNotAuth
except NotFound, e:
return OnbUserNotFound
except DataError:
return OnbIntegrityErr
except ValidationError, e:
error_summary = ''
if 'Name' in e.error_summary:
error_summary += str(e.error_summary.get('Name'))
if 'Password' in e.error_summary:
error_summary += str(e.error_summary.get('Password'))
return self.error_message(error_summary or e.error_summary)
except IntegrityError:
return OnbExistingUsername
except Exception, e:
error_summary = str(e)
return self.error_message(error_summary)
c.user = save_user
return OnbSuccess
def follow_details(self, data=None, errors=None, error_summary=None):
'''
Step 4: user follows key entities
:param data:
:param errors:
:param error_summary:
:return:
'''
data_dict = logic.clean_dict(unflatten(logic.tuplize_dict(logic.parse_params(request.params))))
name = c.user or data_dict['id']
user_obj = model.User.get(name)
user_id = user_obj.id
context = {'model': model, 'session': model.Session, 'user': user_obj.name, 'auth_user_obj': c.userobj}
try:
ue_dict = self._get_ue_dict(user_id, user_model.HDX_ONBOARDING_FOLLOWS)
get_action('user_extra_update')(context, ue_dict)
except NotAuthorized:
return OnbNotAuth
except NotFound, e:
return OnbUserNotFound
except DataError:
return OnbIntegrityErr
except ValidationError, e:
error_summary = e.error_summary
return self.error_message(error_summary)
except Exception, e:
error_summary = str(e)
return self.error_message(error_summary)
return OnbSuccess
def request_new_organization(self):
'''
Step 5a: user can request to create a new organization
:return:
'''
context = {'model': model, 'session': model.Session, 'auth_user_obj': c.userobj,
'user': c.user}
try:
check_access('hdx_send_new_org_request', context)
except NotAuthorized:
return OnbNotAuth
try:
user = model.User.get(context['user'])
data = self._process_new_org_request(user)
self._validate_new_org_request_field(data,context)
get_action('hdx_send_new_org_request')(context, data)
if data.get('user_extra'):
ue_dict = self._get_ue_dict(user.id, user_model.HDX_ONBOARDING_ORG)
get_action('user_extra_update')(context, ue_dict)
except hdx_mail.NoRecipientException, e:
error_summary = e.error_summary
return self.error_message(error_summary)
except logic.ValidationError, e:
error_summary = e.error_summary.get('Message') if 'Message' in e.error_summary else e.error_summary
return self.error_message(error_summary)
except Exception, e:
error_summary = str(e)
return self.error_message(error_summary)
return OnbSuccess
def request_membership(self):
'''
Step 5b: user can request membership to an existing organization
:return:
'''
context = {'model': model, 'session': model.Session,
'user': c.user, 'auth_user_obj': c.userobj}
try:
check_access('hdx_send_new_org_request', context)
except NotAuthorized:
return OnbNotAuth
try:
org_id = request.params.get('org_id', '')
msg = request.params.get('message', 'please add me to this organization')
data_dict = {
'organization': org_id,
'message': msg,
'save': u'save',
'role': u'member',
'group': org_id
}
member = get_action('member_request_create')(context, data_dict)
ue_dict = self._get_ue_dict(c.userobj.id, user_model.HDX_ONBOARDING_ORG)
get_action('user_extra_update')(context, ue_dict)
except hdx_mail.NoRecipientException, e:
return self.error_message(_(str(e)))
except ValidationError as e:
log.error(str(e))
if isinstance(e.error_summary, dict):
error_summary = ' '.join(e.error_summary.values())
else:
error_summary = json.dumps(e.error_summary)
return self.error_message(error_summary)
except Exception, e:
log.error(str(e))
return self.error_message(_('Request can not be sent. Contact an administrator.'))
return OnbSuccess
def invite_friends(self):
'''
Step 6: user can invite friends by email to access HDX
:return:
'''
context = {'model': model, 'session': model.Session, 'auth_user_obj': c.userobj,
'user': c.user}
try:
check_access('hdx_basic_user_info', context)
except NotAuthorized:
return OnbNotAuth
try:
if not c.user:
return OnbNotAuth
# usr = c.userobj.display_name or c.user
user_id = c.userobj.id or c.user
ue_dict = self._get_ue_dict(user_id, user_model.HDX_ONBOARDING_FRIENDS)
get_action('user_extra_update')(context, ue_dict)
subject = u'Invitation to join the Humanitarian Data Exchange (HDX)'
email_data = {
'user_fullname': c.userobj.fullname,
'user_email': c.userobj.email,
}
cc_recipients_list = [{'display_name': c.userobj.fullname, 'email': c.userobj.email}]
friends = [request.params.get('email1'), request.params.get('email2'), request.params.get('email3')]
for f in friends:
if f and configuration.config.get('hdx.onboarding.send_confirmation_email', 'false') == 'true':
hdx_mailer.mail_recipient([{'display_name': f, 'email': f}], subject, email_data,
cc_recipients_list=cc_recipients_list,
snippet='email/content/onboarding_invite_others.html')
except Exception, e:
error_summary = str(e)
return self.error_message(error_summary)
return OnbSuccess
def _get_ue_dict(self, user_id, key, value='True'):
ue_dict = self._build_extras_dict(key, value)
ue_dict['user_id'] = user_id
return ue_dict
def _build_extras_dict(self, key, value='True'):
return {'extras': [{'key': key, 'new_value': value}]}
# def _get_ue_dict_for_key_list(self, user_id, data_list):
# ue_dict = {'user_id': user_id}
# extras = []
# for item in data_list:
# extras.append({'key': item.get('key')), 'new_value': value})
# ue_dict['extras'] = extras
# return ue_dict
@staticmethod
@maintain.deprecated('The functionality of sending emails with new user requests has been deprecated')
def _validate_form(data, errors):
''' The functionality of sending emails with new user requests has been deprecated.
Deprecated since 13.08.2014 - hdx 0.3.6
'''
if not data['fullname']:
errors['fullname'] = [_(u'Fullname is required!')]
if not data['email']:
errors['email'] = [_(u'Email is required!')]
else:
if not EMAIL_REGEX.match(data['email']):
errors['email'] = [_(u'Email entered is not valid!')]
if not data['org']:
errors['org'] = [_(u'Organisation is required!')]
@maintain.deprecated('The functionality of sending emails with new user requests has been deprecated')
def request(self, data=None, errors=None, error_summary=None):
''' The functionality of sending emails with new user requests has been deprecated.
Deprecated since hdx 0.3.5
'''
context = {'model': model, 'session': model.Session,
'user': c.user,
'request': 'request' in request.params}
# try:
# check_access('request_register', context)
# except NotAuthorized:
# abort(401, _('Unauthorized to request new registration.'))
if context['request'] and not data:
data = logic.clean_dict(unflatten(
logic.tuplize_dict(logic.parse_params(request.params))))
errors = dict()
error_summary = dict()
self._validate_form(data, errors)
try:
captcha.check_recaptcha(request)
except captcha.CaptchaError:
error_msg = _(u'Bad Captcha. Please try again.')
error_summary['captcha'] = error_msg
errors['captcha'] = [error_msg]
if errors == {}:
name = data['fullname']
email = data['email']
org = data['org']
reason = data['reason']
h.log.info(
'Request access for {name} ({email}) of {org} with reason: {reason}'.format(name=name, email=email,
org=org, reason=reason))
try:
# send_mail(name, email, org, reason)
h.flash_success(_('We will check your request and we will send you an email!'))
h.redirect_to('/')
except mailer.MailerException, e:
error_summary['sendError'] = _('Could not send request for access: %s') % unicode(e)
data = data or {}
errors = errors or {}
error_summary = error_summary or {}
vars = {'data': data,
'errors': errors,
'error_summary': error_summary,
'capcha_api_key': configuration.config.get('ckan.recaptcha.publickey')}
c.form = render(self.request_register_form, extra_vars=vars)
return base.render(self.request_register_form, cache_force=True, extra_vars=vars)
def register(self, data=None, errors=None, error_summary=None):
"""
Creates a new user, but allows logged in users to create
additional accounts as per HDX requirements at the time.
"""
context = {'model': model, 'session': model.Session, 'user': c.user}
try:
check_access('user_create', context)
except NotAuthorized:
abort(403, _('Unauthorized to register as a user.'))
# hack to disable check if user is logged in
# save_user = c.user
# c.user = None
# result = self.new(data, errors, error_summary)
# c.user = save_user
if c.user:
# #1799 Don't offer the registration form if already logged in
return render('user/logout_first.html')
template_data = {}
if not c.user:
template_data = ue_helpers.get_register(True, "")
return render('home/index.html', extra_vars=template_data)
# return result
def post_register(self):
"""
If the user has registered but not validated their email
redirect to a special page reminding them why they can't
login.
"""
if not c.user:
user = request.params.get('user')
vars = {'user': user}
return render('user/post_register.html', extra_vars=vars)
else:
return render('user/logout_first.html')
def validation_resend(self, id):
# Get user by id
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj,
'for_view': True}
data_dict = {'id': id,
'user_obj': c.userobj}
try:
user = get_action('user_show')(context, data_dict)
except NotFound, e:
abort(404, _('User not found'))
except:
abort(500, _('Error'))
# Get token for user
try:
token = tokens.token_show(context, data_dict)
except NotFound, e:
abort(404, _('User not found'))
except:
| |
import sys, os
import time
import cv2
import numpy as np
import math
from scipy.signal import convolve2d
from scipy.ndimage import label,sum
from scipy.misc import imrotate
from matplotlib import pyplot as plt
from skimage import morphology
from skimage.segmentation import slic
from bwmorph import bwmorph_thin
from collections import deque
class SOFT:
# def __init__(self):
# pass
def soft(self,image,sigma=1.5, clahe=True, canny_thresh=0.05, stroke=20):
"""
Find Lines using Orientations and Projections
Note: +CT means, increases computation time.
'K' Number of neighbors to consider in the Orientation Field
Transform.Each neighbor is evaluated against a candidate angle
and then add up. The biggest the lines, the better the
result for a bigger K. (K big,+CT)
Default: 12.
'Delta' Angle increment from 0.1 to 90.
The resulting projection will be more accurate if
the increment is small. (Delta small, +CT)
Default: 1.
'dispim' If true, images are shown. If false,no images are shown.
Default: True
%
'wiener' Two-element vector of positive integers: [M N].
[M N] specifies the number of tile rows and
columns. Both M and N must be at least 2.
The total number of image tiles is equal to M*N.
If the lines are too thin, it is used to dilate them.
Default: [2 2].
Use 0 to not execute the wiener filter.
'strokeWidth' When the Stroke Width Transform is executed, for each
pixel, rays are created from the pixel to the next
change of gradient. If your stroke is big, use a bigger
width.(strokeWidth big, ++CT)
Default: 20.
'canthresh' Automatic canny thresholding is performed using an
iterative loop. If the percentage of white pixels is bigger than a
threshold,then we are assuming the image is getting more
and more clutter.
Default: 0.075, means a 7.5% of the pixels is white.
'Sigma' Preprocessing gaussian filter. Helps with noisy images
of after CLAHE. Values between 0 and 2 are recommended.
Default: 0 (not applied).
'clahe' If true, CLAHE (automatic brightness and contrast
balance) is applied.
Default: False.
##########################################
saved values:
R - map of projections
peaks - peaks detected
prepro - image after preprocessing (clahe and gaussian filter)
bwedge - image after automatic canny filtering
orientim - image with ridge orientations
reliability - probabilistic plot of orientations
:return:
"""
if (sigma < 0):
print 'Invalid value. Sigma cannot be smaller than 0.'
sigma = 0
self.sigma = sigma
self.clahe = clahe
if (canny_thresh > 1 or canny_thresh <= 0):
print 'Invalid threshold. Cannot be bigger than 1 or smaller than 0. Setting default value.'
canny_thresh = 0.05
self.canthresh = canny_thresh
if (stroke < 2 or stroke > 1024):
print 'Invalid stroke size. Accepted values between and half the size of your image.Setting default value.'
stroke = 20
self.stroke = stroke
print("Preprocessing")
start = time.time()
prepro = image
if(self.clahe):
print('CLAHE true, performed at clipLimit 0.01 and tileGridSize of 32,32')
if(self.sigma>0):
sze = int(math.ceil(6*self.sigma))
if(sze%2 == 0):
sze = sze+1
h = self.fspecial_gauss2D((sze,sze),self.sigma)
I = convolve2d(prepro,h,'same')
print('Gaussian blur performed with Size ' +str(sze)+ ' and sigma '+ str(self.sigma))
PREPRO = I
end = time.time()
print "Preprocessing done: "+str(end - start)+" s."
##### Gradient
start = time.time()
gradient,orientation = self.canny(I,2)
end = time.time()
print "Gradient done: "+str(end - start)+" s."
# plt.subplot(121),plt.imshow(orientation,cmap='gray')
# plt.subplot(122),plt.imshow(gradient*10,cmap='gray')
# plt.show()
start = time.time()
# nm = self.nonmaxsup(gradient,orientation,1.5)
nm = self.nonmaxsup_python(gradient,orientation,1.5)
end = time.time()
print "NMS done: "+str(end - start)+" s."
start = time.time()
# nm = nonmaxsup(gradient,orientation,1.5)
BWEDGE = self.autocanny2(prepro,nm,16)
end = time.time()
print "Autocanny done: "+str(end - start)+" s."
m_size = np.array([2,2])
J = self.borderEnhancer(BWEDGE,m_size)
print 'Border Enhancement done'
start = time.time()
ORIENTIM, _reliability = self.ridgeorient(gradient, 1, 5, 5)
segments = slic(prepro, n_segments=2500, sigma=1.5, compactness=0.08)
num_labels = np.max(segments) + 1
orientim_slic = np.copy(ORIENTIM)
for i in range(num_labels):
orientim_slic[np.where(segments == i)] = np.median(ORIENTIM[np.where(segments == i)])
ORIENTIM = orientim_slic
_, RELIABILITY = self.ridgeorient(gradient, 1, 3, 3)
RELIABILITY[RELIABILITY<0.5] = 0
end = time.time()
print "Ridges done: "+str(end - start)+" s."
# plt.imshow(orientim2 ,cmap='jet')
tl = np.multiply(J,RELIABILITY) # Enhance the bw image removing disordered regions
if self.stroke>0:
print "Starting SWT with strokeWidth of "+str(self.stroke)
start = time.time()
iSWT= self.SWT_Total(I,tl,ORIENTIM,self.stroke)
end = time.time()
print "SWT done: "+str(end - start)+" s."
start = time.time()
print('Removing ill components')
FSWT = self.cleanswt2(iSWT,J)
end = time.time()
print "Removing done: " + str(end - start) + " s.\n"
plt.show()
return PREPRO,BWEDGE,ORIENTIM,RELIABILITY,iSWT,FSWT
def autocanny(self,nm):
med = np.median(nm[nm>0])
max_factor = 0.8*np.max(nm)
factor_a = max_factor
factor_b = 0.4
lenm = nm.shape
bwedge = np.zeros(lenm)
value = 0
msize = (lenm[0]*lenm[1])
while(value<self.canthresh):
bwedge = self.hysthresh(nm, factor_a*med,factor_b*med);
value = np.sum(bwedge)/msize
factor_a = factor_a*0.9
# Coarse part or histeresis accomplished
while(value>self.canthresh):
factor_a = factor_a + 0.01
bwedge = self.hysthresh(nm, factor_a*med,factor_b*med);
value = np.sum(bwedge)/msize
print 'Automatic Canny Done'
print 'Lower threshold reached at '+str(factor_b)
print 'Upper threshold reached at '+str(factor_a)
return bwedge
def autocanny2(self, prepro, nm, blocksize):
m,n = prepro.shape
im_size = np.array([m,n])
size_pixels = im_size / blocksize
size_pixels = int(size_pixels[0] * size_pixels[1])
# Clustering of image
segments = slic(prepro, n_segments=size_pixels, sigma=1.5, compactness=0.08)
num_labels = np.max(segments) + 1
med = float(np.median(nm[nm > 0]))
max_factor = 0.95 * np.max(nm)
factor_a = max_factor
factor_b = 0.4
bwedge = []
value = 0
msize = m*n
while (value < self.canthresh):
bwedge = self.hysthresh(nm, factor_a * med, factor_b * med)
value = np.sum(bwedge)/msize
factor_a = factor_a * 0.9
if (factor_a < 1e-15):
break
while (value > self.canthresh):
factor_a = factor_a + 0.01
bwedge = self.hysthresh(nm, factor_a * med, factor_b * med);
value = np.sum(bwedge)/msize
expected_density = (msize * self.canthresh) / size_pixels # Expected
label_counter = 0
for i in range(num_labels):
label_density = np.sum(bwedge[np.where(segments == i)])
if (label_density < 2 * expected_density):
nm[segments == i]= 0
else:
bwedge[np.where(segments == i)] = 0;
label_counter = label_counter + 1
subsize = label_counter * blocksize * blocksize
canthresh = (subsize/(msize*1.0))*self.canthresh
factor_a = max_factor
factor_b = 0.4
value = 0
bwedge2 = np.zeros((m,n))
while (value < canthresh):
bwedge2 = self.hysthresh(nm, factor_a * med, factor_b * med);
value = np.sum(bwedge2) / subsize;
factor_a = factor_a * 0.9;
if (factor_a < 1e-15):
break
while (value > canthresh):
factor_a = factor_a + 0.01;
bwedge2 = self.hysthresh(nm, factor_a * med, factor_b * med);
value = sum(sum(bwedge2)) / msize
bwedge[bwedge2>0] = 1
print 'Automatic Canny Done'
print 'Lower threshold reached at ' + str(factor_b)
print 'Upper threshold reached at ' + str(factor_a)
return bwedge
def gaussfilt(self,img,sigma):
sze = int(math.ceil(6*sigma))
if(sze%2 == 0):
sze = sze+1
h = self.fspecial_gauss2D((sze,sze),sigma)
# conv2(image, mask) is the same as filter2(rot90(mask,2), image)
image = convolve2d(img,h,'same')
return image
def derivative5(self,i_image):
# 5 tap 1st derivative cofficients. These are optimal if you are just
# seeking the 1st derivatives
# Copyright (c) 2010 <NAME>
p = np.array([0.037659,0.249153,0.426375,0.249153,0.037659], dtype = np.float32)
d1 =np.array([0.109604,0.276691,0.000000,-0.276691,-0.109604],dtype = np.float32)
a = p[:,np.newaxis]*d1.transpose()
b = d1[:,np.newaxis]*p.transpose()
Ix = convolve2d(i_image,a,'same')
Iy = convolve2d(i_image,b,'same')
return Ix,Iy
def fspecial_gauss2D(self,shape=(3,3),sigma=0.5):
"""
2D gaussian mask - should give the same result as MATLAB's
fspecial('gaussian',[shape],[sigma])
"""
m,n = [(ss-1.)/2. for ss in shape]
y,x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )
h[ h < np.finfo(h.dtype).eps*h.max() ] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
return h
def nonmaxsup_python(self,gradient,orientation,radius = 1.2):
"""
# Input:
# inimage - Image to be non-maxima suppressed.
# orient - Image containing feature normal orientation angles in degrees
# (0-180), angles positive anti-clockwise.
# radius - Distance in pixel units to be looked at on each side of each
# pixel when determining whether it is a local maxima or not.
# This value cannot be less than 1.
# (Suggested value about 1.2 - 1.5)
# Returns:
# im - Non maximally suppressed image.
#
# Notes:
# The suggested radius value is 1.2 - 1.5 for the following reason. If the
# radius parameter is set to 1 there is a chance that a maxima will not be
# identified on a broad peak where adjacent pixels have the same value. To
# overcome this one typically uses a radius value | |
in range(len(legend_list)):
# identify the current battery
cur_bat = legend_list[i]
line, = ax[v].plot(pack_data[self.dict_bat_to_code[cur_bat]]['time'],
pack_data[self.dict_bat_to_code[cur_bat]][var_list[v]][self.dict_bat_to_packindex[cur_bat]],
color_list[i])
handle_list.append(line)
# plotting behavior for aggregate current behavior
else:
for i in range(len(bat_list)):
ax[v].plot(pack_data[bat_list[i]]['time'],
pack_data[bat_list[i]]['?ai'],
color_list_ai[i])
# initialize legend for multiple subplot case
if legend_on == False:
leg = fig.legend(handles=handle_list, labels=legend_list, loc=2, prop={'size':10})
legend_on = True
for legobj in leg.legendHandles:
legobj.set_linewidth(2.0)
try:
# pause plot in order for new data to update
plt.pause(self.bus.wait_time)
except tk.TclError:
print('>> Graph closed')
break
# Serial Port is no longer connected
except serial.serialutil.SerialException:
print('>> Lost connection to Serial Bus')
self.bus_connected = False
def callback_trm_ex(self):
''' execute terminal plotting creation dependent on user inputs
:raises error message:
if there is not a connection with the serial bus
if the user has not specified battery scan time
if the user has not selected batteries to graph
if the user has not selected variables to graph
'''
# check serial bus connection
if not self.bus_connected:
messagebox.showerror('ERROR', 'You are not connected to the Serial Bus')
# check that scan time has been specified
elif self.scan_time == None:
messagebox.showerror('ERROR', 'Please enter a valid Scan Time in the Battery Pack window')
# check that batteries to plot have been specified
elif ((self.var_trm_b1.get() +
self.var_trm_b2.get() +
self.var_trm_b3.get() +
self.var_trm_b4.get()) ==0):
messagebox.showerror('ERROR', 'Please select one or more batteries to graph')
# check that variables to plot have been specified
elif ((self.var_trm_v.get() +
self.var_trm_i.get() +
self.var_trm_ai.get() +
self.var_trm_p.get() +
self.var_trm_k.get() +
self.var_trm_q.get() +
self.var_trm_c.get()) == 0):
messagebox.showerror('ERROR', 'Please select one or more variables to graph')
else:
# begin the graphing process
print('>> New terminal plot created')
# initialize graph specific parameters
bat_list = []
var_list = []
pack_data = {}
# determine battery and question commands to send to the batteries via the serial bus
if self.var_trm_b1.get() == 1:
bat_list.append('#bat1')
if self.var_trm_b2.get() == 1:
bat_list.append('#bat2')
if self.var_trm_b3.get() == 1:
bat_list.append('#bat3')
if self.var_trm_b4.get() == 1:
bat_list.append('#bat4')
if self.var_trm_v.get() == 1:
var_list.append('?v')
if self.var_trm_ai.get() == 1:
var_list.append('?i')
var_list.append('?ai')
self.var_gra_i.set(1)
if self.var_trm_i.get() == 1:
if '?i' not in var_list:
var_list.append('?i')
if self.var_trm_p.get() == 1:
var_list.append('?p')
if self.var_trm_k.get() == 1:
var_list.append('?k')
if self.var_trm_q.get() == 1:
var_list.append('?q')
if self.var_trm_c.get() == 1:
var_list.append('?c')
# determine the appropriate legend(s) for the user specified graph
legend_list_ai = [self.dict_code_to_pack[code] for code in bat_list]
legend_list = []
for bat in bat_list:
legend_list.extend(self.dict_pack_to_bat[self.dict_code_to_pack[bat]])
# configure battery scan time and initialize pack_data variable
for bat in bat_list:
# determine scan command
scan_cmd = bat + '!s' + '{0:x}'.format(self.scan_time) + '_'
# send scan command to each battery via the serial bus
self.bus.send_cmd(scan_cmd)
# initialize pack_data variable
pack_data[bat] = {}
pack_data[bat]['time'] = []
for var in var_list:
if var == '?ai':
pack_data[bat][var] = []
else:
pack_data[bat][var] = [[] for i in range(self.dict_pack_to_nums[self.dict_code_to_pack[bat]])]
# account for when the Serial Port is no longer connected
try:
# enter infinite loop
while(True):
# pause to allow batteries to acquire new scan data
time.sleep(self.scan_time)
print('\n')
# display the current UTC time
print('- Time: '+ str(datetime.datetime.utcnow()))
# interate through the batteries to be questioned
for bat in bat_list:
# extract the current UTC time
pack_data[bat]['time'].append(datetime.datetime.utcnow())
# iterate through the variables to be questioned
for var in var_list:
# extract latency and bat_readings data (when aggregate current not requested)
if var != '?ai':
latency, bat_readings = self.bus.send_cmd(bat+var)
# decode the bat_readings data into integer format
bat_readings_int = [int(element,self.hex_base) for element in bat_readings if element!='']
# convert temperature data from K*10 -> C as necessary
if var == '?k':
bat_readings_int = [int(reading/10 - 273.15) for reading in bat_readings_int]
# convert current data to negative values as necessary (negative indicates battery discharge)
elif var == '?i':
bat_readings_int = [reading - self.current_adjust if reading > self.current_threshold else reading for reading in bat_readings_int]
# add converted bat_reading_int data to pack_data variable
for i in range(self.dict_pack_to_nums[self.dict_code_to_pack[bat]]):
pack_data[bat][var][i].append(bat_readings_int[i])
# print relevant data
print('-', self.dict_code_to_pack[bat], self.dict_axis_info[var], [bat_readings_int[i] for i in range(self.dict_pack_to_nums[self.dict_code_to_pack[bat]])])
# if aggregate current is requested, sum over the current readings of the individual batteries, then add to pack_data
else:
pack_data[bat][var].append(sum([pack_data[bat]['?i'][bat_num][-1] for bat_num in range(len(pack_data[bat]['?i']))]))
print('-', self.dict_code_to_pack[bat], self.dict_axis_info[var], pack_data[bat][var][-1])
# Serial Port is no longer connected
except serial.serialutil.SerialException:
print('>> Lost connection to Serial Bus')
self.bus_connected = False
def callback_select_all_pitch(self):
''' allows user to select all battery relays for the pitch pack at once
'''
# find the relevant relay variables for the pitch pack
relay_vars = [relay for relay in self.dict_bat_relay_var.keys() if relay[:5] == '#bat4']
# determine if all of the batteries are currently selected
if sum([self.dict_bat_relay_var[relay].get() for relay in relay_vars]) < len(relay_vars):
# select all the checkboxes
for relay in relay_vars:
self.dict_bat_relay_var[relay].set(1)
else:
# deselect all the checkboxes
for relay in relay_vars:
self.dict_bat_relay_var[relay].set(0)
def callback_select_all_payload(self):
''' allows user to select all battery relays for the payload pack at once
'''
# find the relevant relay variables for the payload pack
relay_vars = [relay for relay in self.dict_bat_relay_var.keys() if relay[:5] == '#bat1']
# determine if all of the batteries are currently selected
if sum([self.dict_bat_relay_var[relay].get() for relay in relay_vars]) < len(relay_vars):
# select all the checkboxes
for relay in relay_vars:
self.dict_bat_relay_var[relay].set(1)
else:
# deselect all the checkboxes
for relay in relay_vars:
self.dict_bat_relay_var[relay].set(0)
def callback_select_all_aftshort(self):
''' allows user to select all battery relays for the aftshort pack at once
'''
# find the relevant relay variables for the aftshort pack
relay_vars = [relay for relay in self.dict_bat_relay_var.keys() if relay[:5] == '#bat3']
# determine if all of the batteries are currently selected
if sum([self.dict_bat_relay_var[relay].get() for relay in relay_vars]) < len(relay_vars):
# select all the checkboxes
for relay in relay_vars:
self.dict_bat_relay_var[relay].set(1)
else:
# deselect all the checkboxes
for relay in relay_vars:
self.dict_bat_relay_var[relay].set(0)
def callback_select_all_aftlong(self):
''' allows user to select all battery relays for the aftlong pack at once
'''
# find the relevant relay variables for the aftlong pack
relay_vars = [relay for relay in self.dict_bat_relay_var.keys() if relay[:5] == '#bat2']
# determine if all of the batteries are currently selected
if sum([self.dict_bat_relay_var[relay].get() for relay in relay_vars]) < len(relay_vars):
# select all the checkboxes
for relay in relay_vars:
self.dict_bat_relay_var[relay].set(1)
else:
# deselect all the checkboxes
for relay in relay_vars:
self.dict_bat_relay_var[relay].set(0)
def callback_recharge_off(self):
''' commands power supply to zero voltage, zero current, and both loads off
:raises error message:
if there is not a connection with the serial bus
'''
# check serial bus connection
if not self.bus_connected:
messagebox.showerror('ERROR', 'You are not connected to the Serial Bus')
# send relevant commands to the powersupply
else:
desired_voltage_s1 = 0
desired_current_s1 = 0
desired_voltage_s2 = 0
desired_current_s2 = 0
self.pwr_supply.set_voltage(desired_voltage_s1, self.supply1_v_channel, self.bus)
self.pwr_supply.set_current(desired_current_s1, self.supply1_c_channel, self.bus)
self.pwr_supply.set_voltage(desired_voltage_s2, self.supply2_v_channel, self.bus)
self.pwr_supply.set_current(desired_current_s2, self.supply2_c_channel, self.bus)
self.entry_pwr_s1_v.delete(0, tk.END)
self.entry_pwr_s1_i.delete(0, tk.END)
self.entry_pwr_s1_v.insert(0, desired_voltage_s1)
self.entry_pwr_s1_i.insert(0, desired_current_s1)
self.entry_pwr_s2_v.delete(0, tk.END)
self.entry_pwr_s2_i.delete(0, tk.END)
self.entry_pwr_s2_v.insert(0, desired_voltage_s2)
self.entry_pwr_s2_i.insert(0, desired_current_s2)
if self.discharge_on:
load1 = 0
load2 = 0
self.pwr_supply.set_load(load1, load2, self.bus)
print('>> RECHARGE OFF command sent')
def callback_connect(self):
''' connects to serial port and initializes serial bus object
:raises error message:
if no serial ports are avaible (either macbook of raspi port)
'''
# look for available serial ports
available_ports = self.get_serial_ports()
# custom serial vus parameters
mac_port = '/dev/cu.usbserial'
pi_port = '/dev/ttyUSB0'
baud = 9600
time_out = 1
wait_time = 0.1
# custom power supply parameters
pwr_name = '#ada'
v_gain = 0.00078141
v_offset = -0.053842
i_gain = 0.00020677
i_offset = 0.014475
# macbook serial port available
if mac_port in available_ports:
print('>> Connecting to serial port:', mac_port)
self.bus = Bus(mac_port, baud, time_out, wait_time)
self.pwr_supply = PowerSupply(pwr_name, v_gain, v_offset, i_gain, i_offset)
self.bus_connected = True
# raspberry pi serial port available
elif pi_port in available_ports:
print('>> Connecting to serial port:', pi_port)
self.bus = Bus(pi_port, baud, time_out, wait_time)
self.pwr_supply = PowerSupply(pwr_name, v_gain, v_offset, i_gain, i_offset)
self.bus_connected = True
# no serial port is available
else:
self.bus_connected = False
messagebox.showerror('ERROR', 'Serial Bus not available')
def init_frame_containers(self):
#############################################################
# MAIN WINDOW CONTAINER #####################################
#############################################################
| |
<gh_stars>0
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from collections import OrderedDict
from functools import partial
from typing import Any, IO, Iterable, List, Optional, Sequence, Type, Union
import torch
from deprecate.utils import void
from torch.utils.data.dataloader import DataLoader
import pytorch_lightning as pl
from pytorch_lightning.accelerators import GPUAccelerator
from pytorch_lightning.loops.dataloader import DataLoaderLoop
from pytorch_lightning.loops.epoch import EvaluationEpochLoop
from pytorch_lightning.trainer.connectors.logger_connector.result import _OUT_DICT, _ResultCollection
from pytorch_lightning.trainer.states import TrainerFn
from pytorch_lightning.utilities.apply_func import apply_to_collection
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.fetching import (
AbstractDataFetcher,
DataFetcher,
DataLoaderIterDataFetcher,
InterBatchParallelDataFetcher,
)
from pytorch_lightning.utilities.imports import _RICH_AVAILABLE
from pytorch_lightning.utilities.rank_zero import rank_zero_warn
from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature
from pytorch_lightning.utilities.types import EPOCH_OUTPUT
if _RICH_AVAILABLE:
from rich.console import Console
from rich.table import Column, Table
class EvaluationLoop(DataLoaderLoop):
"""Loops over all dataloaders for evaluation."""
def __init__(self, verbose: bool = True) -> None:
super().__init__()
self.epoch_loop = EvaluationEpochLoop()
self.verbose = verbose
self._results = _ResultCollection(training=False)
self._outputs: List[EPOCH_OUTPUT] = []
self._logged_outputs: List[_OUT_DICT] = []
self._max_batches: List[int] = []
self._has_run: bool = False
self._data_fetcher: Optional[AbstractDataFetcher] = None
@property
def num_dataloaders(self) -> int:
"""Returns the total number of dataloaders."""
# case where user does:
# return dl1, dl2
dataloaders = self.dataloaders
if dataloaders is None:
return 0
length = len(dataloaders)
if length > 0 and isinstance(dataloaders[0], (list, tuple)):
length = len(dataloaders[0])
return length
@property
def dataloaders(self) -> Sequence[DataLoader]:
"""Returns the validation or test dataloaders."""
dataloaders = self.trainer.test_dataloaders if self.trainer.testing else self.trainer.val_dataloaders
if dataloaders is None:
raise RuntimeError("Dataloaders should be available.")
return dataloaders
@property
def prefetch_batches(self) -> int:
batches = self.trainer.num_test_batches if self.trainer.testing else self.trainer.num_val_batches
is_unsized = batches[self.current_dataloader_idx] == float("inf")
inter_batch_parallelism = os.getenv("PL_INTER_BATCH_PARALLELISM", "0") == "1"
return 1 if is_unsized or inter_batch_parallelism else 0
def connect(self, epoch_loop: EvaluationEpochLoop) -> None: # type: ignore[override]
"""Connect the evaluation epoch loop with this loop."""
self.epoch_loop = epoch_loop
@property
def done(self) -> bool:
"""Returns whether all dataloaders are processed or evaluation should be skipped altogether."""
return super().done or self.skip
@property
def skip(self) -> bool:
"""Returns whether the evaluation should be skipped."""
max_batches = self._get_max_batches()
return sum(max_batches) == 0
def reset(self) -> None:
"""Resets the internal state of the loop."""
self._max_batches = self._get_max_batches()
# bookkeeping
self._outputs = []
self._logged_outputs = []
if isinstance(self._max_batches, int):
self._max_batches = [self._max_batches] * len(self.dataloaders)
super().reset()
# when restarting, if we are running `validate` or `test` twice, since there's no concept of `max_epochs` we
# need to reset the current state when the loop has finished running
if self.done and self.trainer.state.fn != TrainerFn.FITTING:
self.dataloader_progress.reset_on_run()
def on_skip(self) -> List:
return []
def on_run_start(self, *args: Any, **kwargs: Any) -> None:
"""Runs the ``_on_evaluation_model_eval``, ``_on_evaluation_start`` and ``_on_evaluation_epoch_start``
hooks."""
void(*args, **kwargs)
data_fetcher_cls = _select_data_fetcher_type(self.trainer)
self._data_fetcher = data_fetcher_cls(prefetch_batches=self.prefetch_batches)
# hook
self._on_evaluation_model_eval()
self.trainer.lightning_module.zero_grad()
self._on_evaluation_start()
self._on_evaluation_epoch_start()
def advance(self, *args: Any, **kwargs: Any) -> None:
"""Performs evaluation on one single dataloader."""
void(*args, **kwargs)
dataloader_idx = self.current_dataloader_idx
dataloader = self.trainer.strategy.process_dataloader(self.current_dataloader)
assert self._data_fetcher is not None
self._data_fetcher.setup(
dataloader,
batch_to_device=partial(self.trainer._call_strategy_hook, "batch_to_device", dataloader_idx=dataloader_idx),
)
dl_max_batches = self._max_batches[dataloader_idx]
kwargs = OrderedDict()
if self.num_dataloaders > 1:
kwargs["dataloader_idx"] = dataloader_idx
dl_outputs = self.epoch_loop.run(self._data_fetcher, dl_max_batches, kwargs)
# store batch level output per dataloader
self._outputs.append(dl_outputs)
if not self.trainer.sanity_checking:
# indicate the loop has run
self._has_run = True
def on_advance_end(self) -> None:
self.trainer._logger_connector.epoch_end_reached()
self._logged_outputs.append(self.trainer._logger_connector.update_eval_epoch_metrics())
super().on_advance_end()
def on_run_end(self) -> List[_OUT_DICT]:
"""Runs the ``_on_evaluation_epoch_end`` hook."""
# if `done` returned True before any iterations were done, this won't have been called in `on_advance_end`
self.trainer._logger_connector.epoch_end_reached()
# hook
self._evaluation_epoch_end(self._outputs)
self._outputs = [] # free memory
# hook
self._on_evaluation_epoch_end()
logged_outputs, self._logged_outputs = self._logged_outputs, [] # free memory
# include any logged outputs on epoch_end
epoch_end_logged_outputs = self.trainer._logger_connector.update_eval_epoch_metrics()
for dl_outputs in logged_outputs:
dl_outputs.update(epoch_end_logged_outputs)
# log metrics
self.trainer._logger_connector.log_eval_end_metrics()
# hook
self._on_evaluation_end()
# enable train mode again
self._on_evaluation_model_train()
if self.verbose and self.trainer.is_global_zero:
assert self.trainer.state.stage is not None
self._print_results(logged_outputs, self.trainer.state.stage)
return logged_outputs
def teardown(self) -> None:
if self._data_fetcher is not None:
self._data_fetcher.teardown()
self._data_fetcher = None
self._results.cpu()
self.epoch_loop.teardown()
def _get_max_batches(self) -> List[int]:
"""Returns the max number of batches for each dataloader."""
if self.trainer.testing:
max_batches = self.trainer.num_test_batches
else:
if self.trainer.sanity_checking:
max_batches = self.trainer.num_sanity_val_batches
else:
max_batches = self.trainer.num_val_batches
return max_batches
def _reload_evaluation_dataloaders(self) -> None:
"""Reloads dataloaders if necessary."""
if self.trainer.testing:
self.trainer.reset_test_dataloader()
elif self.trainer.val_dataloaders is None or self.trainer._data_connector._should_reload_val_dl:
self.trainer.reset_val_dataloader()
def _on_evaluation_start(self, *args: Any, **kwargs: Any) -> None:
"""Runs ``on_{validation/test}_start`` hooks."""
assert self._results is not None
self._results.to(device=self.trainer.lightning_module.device)
if self.trainer.testing:
self.trainer._call_callback_hooks("on_test_start", *args, **kwargs)
self.trainer._call_lightning_module_hook("on_test_start", *args, **kwargs)
self.trainer._call_strategy_hook("on_test_start", *args, **kwargs)
else:
self.trainer._call_callback_hooks("on_validation_start", *args, **kwargs)
self.trainer._call_lightning_module_hook("on_validation_start", *args, **kwargs)
self.trainer._call_strategy_hook("on_validation_start", *args, **kwargs)
def _on_evaluation_model_eval(self) -> None:
"""Sets model to eval mode."""
if self.trainer.testing:
self.trainer._call_lightning_module_hook("on_test_model_eval")
else:
self.trainer._call_lightning_module_hook("on_validation_model_eval")
def _on_evaluation_model_train(self) -> None:
"""Sets model to train mode."""
if self.trainer.testing:
self.trainer._call_lightning_module_hook("on_test_model_train")
else:
self.trainer._call_lightning_module_hook("on_validation_model_train")
def _on_evaluation_end(self, *args: Any, **kwargs: Any) -> None:
"""Runs ``on_{validation/test}_end`` hook."""
if self.trainer.testing:
self.trainer._call_callback_hooks("on_test_end", *args, **kwargs)
self.trainer._call_lightning_module_hook("on_test_end", *args, **kwargs)
self.trainer._call_strategy_hook("on_test_end", *args, **kwargs)
else:
self.trainer._call_callback_hooks("on_validation_end", *args, **kwargs)
self.trainer._call_lightning_module_hook("on_validation_end", *args, **kwargs)
self.trainer._call_strategy_hook("on_validation_end", *args, **kwargs)
# reset the logger connector state
self.trainer._logger_connector.reset_results()
def _on_evaluation_epoch_start(self, *args: Any, **kwargs: Any) -> None:
"""Runs ``on_epoch_start`` and ``on_{validation/test}_epoch_start`` hooks."""
self.trainer._logger_connector.on_epoch_start()
self.trainer._call_callback_hooks("on_epoch_start", *args, **kwargs)
self.trainer._call_lightning_module_hook("on_epoch_start", *args, **kwargs)
if self.trainer.testing:
self.trainer._call_callback_hooks("on_test_epoch_start", *args, **kwargs)
self.trainer._call_lightning_module_hook("on_test_epoch_start", *args, **kwargs)
else:
self.trainer._call_callback_hooks("on_validation_epoch_start", *args, **kwargs)
self.trainer._call_lightning_module_hook("on_validation_epoch_start", *args, **kwargs)
def _evaluation_epoch_end(self, outputs: List[EPOCH_OUTPUT]) -> None:
"""Runs ``{validation/test}_epoch_end``"""
self.trainer._logger_connector._evaluation_epoch_end()
# with a single dataloader don't pass a 2D list
output_or_outputs: Union[EPOCH_OUTPUT, List[EPOCH_OUTPUT]] = (
outputs[0] if len(outputs) > 0 and self.num_dataloaders == 1 else outputs
)
# call the model epoch end
if self.trainer.testing:
self.trainer._call_lightning_module_hook("test_epoch_end", output_or_outputs)
else:
self.trainer._call_lightning_module_hook("validation_epoch_end", output_or_outputs)
def _on_evaluation_epoch_end(self) -> None:
"""Runs ``on_{validation/test}_epoch_end`` hook."""
hook_name = "on_test_epoch_end" if self.trainer.testing else "on_validation_epoch_end"
self.trainer._call_callback_hooks(hook_name)
self.trainer._call_lightning_module_hook(hook_name)
self.trainer._call_callback_hooks("on_epoch_end")
self.trainer._call_lightning_module_hook("on_epoch_end")
self.trainer._logger_connector.on_epoch_end()
@staticmethod
def _get_keys(data: dict) -> Iterable[str]:
if any(isinstance(v, dict) for v in data.values()):
for v in data.values():
yield from apply_to_collection(v, dict, dict.keys)
else:
yield from data.keys()
@staticmethod
def _find_value(data: dict, target: str) -> Iterable[Any]:
for k, v in data.items():
if k == target:
yield v
elif isinstance(v, dict):
yield from EvaluationLoop._find_value(v, target)
@staticmethod
def _print_results(results: List[_OUT_DICT], stage: str, file: Optional[IO[str]] = None) -> None:
# remove the dl idx suffix
results = [{k.split("/dataloader_idx_")[0]: v for k, v in result.items()} for result in results]
metrics = sorted({k for keys in apply_to_collection(results, dict, EvaluationLoop._get_keys) for k in keys})
headers = [f"DataLoader {i}" for i in range(len(results))]
# fallback is useful for testing of printed output
term_size = shutil.get_terminal_size(fallback=(120, 30)).columns or 120
max_length = int(min(max(len(max(metrics + headers, key=len)), 25), term_size / 2))
rows: List[List[Any]] = [[] for _ in metrics]
for result in results:
for metric, row in zip(metrics, rows):
v = list(EvaluationLoop._find_value(result, metric))
if v:
val = v[0]
if isinstance(val, torch.Tensor):
val = val.item() if val.numel() == 1 else val.tolist()
row.append(f"{val}")
else:
row.append(" ")
# keep one column with max length for metrics
num_cols = int((term_size - max_length) / max_length)
for i in range(0, len(headers), num_cols):
table_headers = headers[i : (i + num_cols)]
table_rows = [row[i : (i + num_cols)] for row in rows]
table_headers.insert(0, f"{stage} Metric".capitalize())
if _RICH_AVAILABLE:
console = Console(file=file)
columns = [Column(h, justify="center", style="magenta", width=max_length) for h in table_headers]
columns[0].style = "cyan"
table = Table(*columns)
for metric, row in zip(metrics, table_rows):
row.insert(0, metric)
table.add_row(*row)
console.print(table)
else:
row_format = f"{{:^{max_length}}}" * len(table_headers)
half_term_size = int(term_size / 2)
bar = "─" * term_size
lines = [bar, row_format.format(*table_headers).rstrip(), bar]
for metric, row in zip(metrics, table_rows):
# deal with column overflow
if len(metric) > half_term_size:
while len(metric) > half_term_size:
row_metric = metric[:half_term_size]
metric = metric[half_term_size:]
lines.append(row_format.format(row_metric, *row).rstrip())
lines.append(row_format.format(metric, " ").rstrip())
else:
lines.append(row_format.format(metric, *row).rstrip())
lines.append(bar)
print(os.linesep.join(lines), file=file)
def _select_data_fetcher_type(trainer: "pl.Trainer") -> Type[AbstractDataFetcher]:
lightning_module = trainer.lightning_module
step_fx_name = "test_step" if trainer.testing else "validation_step"
step_fx = getattr(lightning_module, step_fx_name)
if is_param_in_hook_signature(step_fx, "dataloader_iter", explicit=True):
rank_zero_warn(
f"Found `dataloader_iter` argument in the `{step_fx_name}`. Note that the support for "
"this signature is experimental and the behavior is subject to change."
)
return DataLoaderIterDataFetcher
elif os.getenv("PL_INTER_BATCH_PARALLELISM", "0") == "1":
if not isinstance(trainer.accelerator, GPUAccelerator):
raise MisconfigurationException("Inter batch parallelism is available | |
rezervaci cekajicich na ockovani (kalendar pro 1. davky)
rezervace_cekajici_1_zmena_den = Column(Integer) # pocet rezervaci cekajicich na ockovani (kalendar pro 1. davky) - zmena za den
rezervace_cekajici_1_zmena_tyden = Column(Integer) # pocet rezervaci cekajicich na ockovani (kalendar pro 1. davky) - zmena za tyden
rezervace_cekajici_2 = Column(Integer) # pocet rezervaci cekajicich na ockovani (kalendar pro 2. davky)
rezervace_cekajici_2_zmena_den = Column(Integer) # pocet rezervaci cekajicich na ockovani (kalendar pro 2. davky) - zmena za den
rezervace_cekajici_2_zmena_tyden = Column(Integer) # pocet rezervaci cekajicich na ockovani (kalendar pro 2. davky) - zmena za tyden
rezervace_kapacita = Column(Integer) # kapacita na aktualni den (vsechny kalendare)
rezervace_kapacita_zmena_den = Column(Integer) # kapacita na aktualni den (vsechny kalendare) - zmena za den
rezervace_kapacita_zmena_tyden = Column(Integer) # kapacita na aktualni den (vsechny kalendare) - zmena za tyden
rezervace_kapacita_1 = Column(Integer) # kapacita na aktualni den (kalendar pro 1. davky)
rezervace_kapacita_1_zmena_den = Column(Integer) # kapacita na aktualni den (kalendar pro 1. davky) - zmena za den
rezervace_kapacita_1_zmena_tyden = Column(Integer) # kapacita na aktualni den (kalendar pro 1. davky) - zmena za tyden
rezervace_kapacita_2 = Column(Integer) # kapacita na aktualni den (kalendar pro 2. davky)
rezervace_kapacita_2_zmena_den = Column(Integer) # kapacita na aktualni den (kalendar pro 2. davky) - zmena za den
rezervace_kapacita_2_zmena_tyden = Column(Integer) # kapacita na aktualni den (kalendar pro 2. davky) - zmena za tyden
rezervace_nejblizsi_volno = Column(Date) # nejblizsi den s volnym mistem
registrace_celkem = Column(Integer) # pocet vsech registraci
registrace_celkem_zmena_den = Column(Integer) # pocet vsech registraci - zmena za den
registrace_celkem_zmena_tyden = Column(Integer) # pocet vsech registraci - zmena za tyden
registrace_fronta = Column(Integer) # pocet registraci bez rezervace
registrace_fronta_zmena_den = Column(Integer) # pocet registraci bez rezervace - zmena za den
registrace_fronta_zmena_tyden = Column(Integer) # pocet registraci bez rezervace - zmena za tyden
registrace_tydenni_uspesnost = Column(Float) # uspesnost rezervaci za poslednich 7 dni
registrace_tydenni_uspesnost_zmena_den = Column(Float) # uspesnost rezervaci za poslednich 7 dni - zmena za den
registrace_tydenni_uspesnost_zmena_tyden = Column(Float) # uspesnost rezervaci za poslednich 7 dni - zmena za tyden
registrace_14denni_uspesnost = Column(Float) # uspesnost rezervaci za poslednich 14 dni
registrace_14denni_uspesnost_zmena_den = Column(Float) # uspesnost rezervaci za poslednich 14 dni - zmena za den
registrace_14denni_uspesnost_zmena_tyden = Column(Float) # uspesnost rezervaci za poslednich 14 dni - zmena za tyden
registrace_30denni_uspesnost = Column(Float) # uspesnost rezervaci za poslednich 30 dni
registrace_30denni_uspesnost_zmena_den = Column(Float) # uspesnost rezervaci za poslednich 30 dni - zmena za den
registrace_30denni_uspesnost_zmena_tyden = Column(Float) # uspesnost rezervaci za poslednich 30 dni - zmena za tyden
registrace_prumer_cekani = Column(Float) # prumerna doba cekani na rezervaci z poslednich 7 dni
registrace_prumer_cekani_zmena_den = Column(Float) # prumerna doba cekani na rezervaci z poslednich 7 dni - zmena za den
registrace_prumer_cekani_zmena_tyden = Column(Float) # prumerna doba cekani na rezervaci z poslednich 7 dni - zmena za tyden
registrace_odhad_cekani = Column(Float) # odhadovana doba cekani na rezervaci
registrace_odhad_cekani_zmena_den = Column(Float) # odhadovana doba cekani na rezervaci - zmena za den
registrace_odhad_cekani_zmena_tyden = Column(Float) # odhadovana doba cekani na rezervaci - zmena za tyden
registrace_fronta_prumer_cekani = Column(Float) # prumerna doba ve fronte
registrace_fronta_prumer_cekani_zmena_den = Column(Float) # prumerna doba ve fronte - zmena za den
registrace_fronta_prumer_cekani_zmena_tyden = Column(Float) # prumerna doba ve fronte - zmena za tyden
registrace_rezervace_prumer = Column(Float) # prumerny pocet novych rezervaci za posledni tyden
registrace_rezervace_prumer_zmena_den = Column(Float) # prumerny pocet novych rezervaci za posledni tyden - zmena za den
registrace_rezervace_prumer_zmena_tyden = Column(Float) # prumerny pocet novych rezervaci za posledni tyden - zmena za tyden
ockovani_pocet_davek = Column(Integer) # pocet ockovanych davek
ockovani_pocet_davek_zmena_den = Column(Integer) # pocet ockovanych davek - zmena za den
ockovani_pocet_davek_zmena_tyden = Column(Integer) # pocet ockovanych davek - zmena za tyden
ockovani_pocet_castecne = Column(Integer) # pocet ockovanych alespon castecne
ockovani_pocet_castecne_zmena_den = Column(Integer) # pocet ockovanych alespon castecne - zmena za den
ockovani_pocet_castecne_zmena_tyden = Column(Integer) # pocet ockovanych alespon castecne - zmena za tyden
ockovani_pocet_plne = Column(Integer) # pocet ockovanych plne (vsechny davky)
ockovani_pocet_plne_zmena_den = Column(Integer) # pocet ockovanych plne (vsechny davky) - zmena za den
ockovani_pocet_plne_zmena_tyden = Column(Integer) # pocet ockovanych plne (vsechny davky) - zmena za tyden
ockovani_odhad_cekani = Column(Float) # odhad casu potrebneho na naockovani lidi ve fronte a rezervaci
ockovani_odhad_cekani_zmena_den = Column(Float) # odhad casu potrebneho na naockovani lidi ve fronte a rezervaci - zmena za den
ockovani_odhad_cekani_zmena_tyden = Column(Float) # odhad casu potrebneho na naockovani lidi ve fronte a rezervaci - zmena za tyden
vakciny_prijate_pocet = Column(Integer) # pocet prijatych vakcin
vakciny_prijate_pocet_zmena_den = Column(Integer) # pocet prijatych vakcin - zmena za den
vakciny_prijate_pocet_zmena_tyden = Column(Integer) # pocet prijatych vakcin - zmena za tyden
vakciny_ockovane_pocet = Column(Integer) # pocet ockovanych vakcin
vakciny_ockovane_pocet_zmena_den = Column(Integer) # pocet ockovanych vakcin - zmena za den
vakciny_ockovane_pocet_zmena_tyden = Column(Integer) # pocet ockovanych vakcin - zmena za tyden
vakciny_znicene_pocet = Column(Integer) # pocet znicenych vakcin
vakciny_znicene_pocet_zmena_den = Column(Integer) # pocet znicenych vakcin - zmena za den
vakciny_znicene_pocet_zmena_tyden = Column(Integer) # pocet znicenych vakcin - zmena za tyden
vakciny_skladem_pocet = Column(Integer) # odhad vakcin skladem
vakciny_skladem_pocet_zmena_den = Column(Integer) # odhad vakcin skladem - zmena za den
vakciny_skladem_pocet_zmena_tyden = Column(Integer) # odhad vakcin skladem - zmena za tyden
misto = relationship("OckovaciMisto", back_populates="metriky")
def __repr__(self):
return f"<OckovaciMistoMetriky(misto_id='{self.misto_id}', datum='{self.datum}')>"
OckovaciMisto.metriky = relationship("OckovaciMistoMetriky", back_populates="misto")
class CrMetriky(db.Model):
__tablename__ = 'cr_metriky'
datum = Column(DateTime, primary_key=True)
pocet_obyvatel_celkem = Column(Integer)
pocet_obyvatel_dospeli = Column(Integer)
rezervace_celkem = Column(Integer) # pocet vsech rezervaci (vsechny kalendare)
rezervace_celkem_zmena_den = Column(Integer) # pocet vsech rezervaci (vsechny kalendare) - zmena za den
rezervace_celkem_zmena_tyden = Column(Integer) # pocet vsech rezervaci (vsechny kalendare) - zmena za tyden
rezervace_cekajici = Column(Integer) # pocet rezervaci cekajicich na ockovani (vsechny kalendare)
rezervace_cekajici_zmena_den = Column(Integer) # pocet rezervaci cekajicich na ockovani (vsechny kalendare) - zmena za den
rezervace_cekajici_zmena_tyden = Column(Integer) # pocet rezervaci cekajicich na ockovani (vsechny kalendare) - zmena za tyden
rezervace_cekajici_1 = Column(Integer) # pocet rezervaci cekajicich na ockovani (kalendar pro 1. davky)
rezervace_cekajici_1_zmena_den = Column(Integer) # pocet rezervaci cekajicich na ockovani (kalendar pro 1. davky) - zmena za den
rezervace_cekajici_1_zmena_tyden = Column(Integer) # pocet rezervaci cekajicich na ockovani (kalendar pro 1. davky) - zmena za tyden
rezervace_cekajici_2 = Column(Integer) # pocet rezervaci cekajicich na ockovani (kalendar pro 2. davky)
rezervace_cekajici_2_zmena_den = Column(Integer) # pocet rezervaci cekajicich na ockovani (kalendar pro 2. davky) - zmena za den
rezervace_cekajici_2_zmena_tyden = Column(Integer) # pocet rezervaci cekajicich na ockovani (kalendar pro 2. davky) - zmena za tyden
rezervace_kapacita = Column(Integer) # kapacita na aktualni den (vsechny kalendare)
rezervace_kapacita_zmena_den = Column(Integer) # kapacita na aktualni den (vsechny kalendare) - zmena za den
rezervace_kapacita_zmena_tyden = Column(Integer) # kapacita na aktualni den (vsechny kalendare) - zmena za tyden
rezervace_kapacita_1 = Column(Integer) # kapacita na aktualni den (kalendar pro 1. davky)
rezervace_kapacita_1_zmena_den = Column(Integer) # kapacita na aktualni den (kalendar pro 1. davky) - zmena za den
rezervace_kapacita_1_zmena_tyden = Column(Integer) # kapacita na aktualni den (kalendar pro 1. davky) - zmena za tyden
rezervace_kapacita_2 = Column(Integer) # kapacita na aktualni den (kalendar pro 2. davky)
rezervace_kapacita_2_zmena_den = Column(Integer) # kapacita na aktualni den (kalendar pro 2. davky) - zmena za den
rezervace_kapacita_2_zmena_tyden = Column(Integer) # kapacita na aktualni den (kalendar pro 2. davky) - zmena za tyden
registrace_celkem = Column(Integer) # pocet vsech registraci
registrace_celkem_zmena_den = Column(Integer) # pocet vsech registraci - zmena za den
registrace_celkem_zmena_tyden = Column(Integer) # pocet vsech registraci - zmena za tyden
registrace_fronta = Column(Integer) # pocet registraci bez rezervace
registrace_fronta_zmena_den = Column(Integer) # pocet registraci bez rezervace - zmena za den
registrace_fronta_zmena_tyden = Column(Integer) # pocet registraci bez rezervace - zmena za tyden
registrace_tydenni_uspesnost = Column(Float) # uspesnost rezervaci za poslednich 7 dni
registrace_tydenni_uspesnost_zmena_den = Column(Float) # uspesnost rezervaci za poslednich 7 dni - zmena za den
registrace_tydenni_uspesnost_zmena_tyden = Column(Float) # uspesnost rezervaci za poslednich 7 dni - zmena za tyden
registrace_14denni_uspesnost = Column(Float) # uspesnost rezervaci za poslednich 14 dni
registrace_14denni_uspesnost_zmena_den = Column(Float) # uspesnost rezervaci za poslednich 14 dni - zmena za den
registrace_14denni_uspesnost_zmena_tyden = Column(Float) # uspesnost rezervaci za poslednich 14 dni - zmena za tyden
registrace_30denni_uspesnost = Column(Float) # uspesnost | |
)
def test_particles_m003_particles_m003_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Elt:Sequence -
RecurseAsIfGroup) element R drived by restriction from (Sequence) B :
B's parent is choice, B's minOccurs=2, R's minOccurs=3
"""
assert_bindings(
schema="msData/particles/particlesM003.xsd",
instance="msData/particles/particlesM003.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_m002_particles_m002_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Elt:Sequence -
RecurseAsIfGroup) element R drived by restriction from (Sequence) B :
B's parent is choice, B's minOccurs=2, R's minOccurs=2
"""
assert_bindings(
schema="msData/particles/particlesM002.xsd",
instance="msData/particles/particlesM002.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_l029_particles_l029_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Elt:Choice -
RecurseAsIfGroup) element R drived by restriction from (Choice) B :
B's parent is sequence, B has element 'foo' with final=#all, R has
element 'foo' with final=#all
"""
assert_bindings(
schema="msData/particles/particlesL029.xsd",
instance="msData/particles/particlesL029.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_l028_particles_l028_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Elt:Choice -
RecurseAsIfGroup) element R drived by restriction from (Choice) B :
B's parent is sequence, B has element 'foo' with block=substitution, R
has element 'foo' with block=substitution
"""
assert_bindings(
schema="msData/particles/particlesL028.xsd",
instance="msData/particles/particlesL028.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_l025_particles_l025_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Elt:Choice -
RecurseAsIfGroup) element R drived by restriction from (Choice) B :
B's parent is sequence, B has element 'foo' with block=substitution, R
has element 'foo' with block=#all
"""
assert_bindings(
schema="msData/particles/particlesL025.xsd",
instance="msData/particles/particlesL025.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_l023_particles_l023_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Elt:Choice -
RecurseAsIfGroup) element R drived by restriction from (Choice) B :
B's parent is sequence, B has element 'foo' with block=restriction, R
has element 'foo' with block=restriction
"""
assert_bindings(
schema="msData/particles/particlesL023.xsd",
instance="msData/particles/particlesL023.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_l021_particles_l021_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Elt:Choice -
RecurseAsIfGroup) element R drived by restriction from (Choice) B :
B's parent is sequence, B has element 'foo' with block=restriction, R
has element 'foo' with block=#all
"""
assert_bindings(
schema="msData/particles/particlesL021.xsd",
instance="msData/particles/particlesL021.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_l018_particles_l018_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Elt:Choice -
RecurseAsIfGroup) element R drived by restriction from (Choice) B :
B's parent is sequence, B has element 'foo' with block=extension, R
has element 'foo' with block=extension
"""
assert_bindings(
schema="msData/particles/particlesL018.xsd",
instance="msData/particles/particlesL018.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_l017_particles_l017_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Elt:Choice -
RecurseAsIfGroup) element R drived by restriction from (Choice) B :
B's parent is sequence, B has element 'foo' with block=extension, R
has element 'foo' with block=#all
"""
assert_bindings(
schema="msData/particles/particlesL017.xsd",
instance="msData/particles/particlesL017.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_l013_particles_l013_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Elt:Choice -
RecurseAsIfGroup) element R drived by restriction from (Choice) B :
B's parent is sequence, B has element 'foo' with block=#all, R has
element 'foo' with block=#all
"""
assert_bindings(
schema="msData/particles/particlesL013.xsd",
instance="msData/particles/particlesL013.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_l012_particles_l012_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Elt:Choice -
RecurseAsIfGroup) element R drived by restriction from (Choice) B :
B's parent is sequence, B has element 'foo' with mixed=TRUE, R has
element 'foo' with mixed=FALSE
"""
assert_bindings(
schema="msData/particles/particlesL012.xsd",
instance="msData/particles/particlesL012.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_l007_particles_l007_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Elt:Choice -
RecurseAsIfGroup) element R drived by restriction from (Choice) B :
B's parent is sequence, B's minOccurs=B' maxOccurs=absent, R's
maxOccurs=1
"""
assert_bindings(
schema="msData/particles/particlesL007.xsd",
instance="msData/particles/particlesL007.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_l006_particles_l006_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Elt:Choice -
RecurseAsIfGroup) element R drived by restriction from (Choice) B :
B's parent is sequence, B's minOccurs=B' maxOccurs=absent, R's
minOccurs=1
"""
assert_bindings(
schema="msData/particles/particlesL006.xsd",
instance="msData/particles/particlesL006.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_l003_particles_l003_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Elt:Choice -
RecurseAsIfGroup) element R drived by restriction from (Choice) B :
B's parent is sequence, B's minOccurs= 2, R's maxOccurs=2
"""
assert_bindings(
schema="msData/particles/particlesL003.xsd",
instance="msData/particles/particlesL003.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_k008_particles_k008_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Elt:All -
RecurseAsIfGroup) element R drived by restriction from (all) B : R has
an element (min=maxOccurs=1) from a different namespace than the
targetNS
"""
assert_bindings(
schema="msData/particles/particlesK008.xsd",
instance="msData/particles/particlesK008.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_k005_particles_k005_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Elt:All -
RecurseAsIfGroup) element R drived by restriction from (all) B : B's
minOccurs=0, B' maxOccurs=absent, R's minOccurs=1, R's maxOccurs=1
"""
assert_bindings(
schema="msData/particles/particlesK005.xsd",
instance="msData/particles/particlesK005.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_k003_particles_k003_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Elt:All -
RecurseAsIfGroup) element R drived by restriction from (all) B : B's
minOccurs=1, B' maxOccurs=absent, R's minOccurs=absent, R's
maxOccurs=1
"""
assert_bindings(
schema="msData/particles/particlesK003.xsd",
instance="msData/particles/particlesK003.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_k002_particles_k002_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Elt:All -
RecurseAsIfGroup) element R drived by restriction from (all) B : B's
minOccurs=absent, B' maxOccurs=1, R's minOccurs=1, R's
maxOccurs=absent
"""
assert_bindings(
schema="msData/particles/particlesK002.xsd",
instance="msData/particles/particlesK002.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_k001_particles_k001_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (Elt:All -
RecurseAsIfGroup) element R drived by restriction from (all) B : B's
minOccurs=1, B' maxOccurs=1, R's minOccurs=1, R's maxOccurs=1
"""
assert_bindings(
schema="msData/particles/particlesK001.xsd",
instance="msData/particles/particlesK001.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_ju003_particles_ju003_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (elt:Any) element R
drived by restriction from wildcard (any) B : B's namespace=list
(##local, ##targetNamespace, nsFoo, nsBar), R's
targetNamespace=targetNamespace, B's minOccurs less than R's minOccurs
, B's maxOccurs > R's maxOccurs
"""
assert_bindings(
schema="msData/particles/particlesJu003.xsd",
instance="msData/particles/particlesJu003.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_ju002_particles_ju002_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (elt:Any) element R
drived by restriction from wildcard (any) B : B's namespace=list
(##local, ##targetNamespace, nsFoo, nsBar), R's targetNamespace=nsFoo,
B's minOccurs less than R's minOccurs , B's maxOccurs > R's maxOccurs
"""
assert_bindings(
schema="msData/particles/particlesJu002.xsd",
instance="msData/particles/particlesJu002.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_ju001_particles_ju001_v(mode, save_output, output_format):
"""
TEST :3.9.1 The Particle Schema Component [ check length of element
information items ] : Particle Derivation OK (elt:Any) element R
drived by restriction from wildcard (any) B : B's namespace=list
(##local, ##targetNamespace, nsFoo, nsBar), R's
targetNamespace=absent, B's minOccurs less than R's minOccurs , B's
maxOccurs > R's maxOccurs
"""
assert_bindings(
schema="msData/particles/particlesJu001.xsd",
instance="msData/particles/particlesJu001.xml",
class_name="Doc",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_particles_js001_particles_js001_v(mode, | |
<gh_stars>0
# Generated with love
import typing
from vkbottle.types import responses
from .access import APIAccessibility
from .method import BaseMethod
class FriendsAdd(BaseMethod):
kwargs: dict = {}
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(
self, user_id: int = None, text: str = None, follow: bool = None
) -> responses.friends.Add:
""" friends.add
From Vk Docs: Approves or creates a friend request.
Access from user token(s)
:param user_id: ID of the user whose friend request will be approved or to whom a friend request will be sent.
:param text: Text of the message (up to 500 characters) for the friend request, if any.
:param follow: '1' to pass an incoming request to followers list.
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in {**locals(), **self.kwargs}.items()
if k not in ["self"] and v is not None
}
return await self.request(
"friends.add", params, response_model=responses.friends.AddModel
)
class FriendsAddList(BaseMethod):
kwargs: dict = {}
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(
self, name: str, user_ids: typing.List = None
) -> responses.friends.AddList:
""" friends.addList
From Vk Docs: Creates a new friend list for the current user.
Access from user token(s)
:param name: Name of the friend list.
:param user_ids: IDs of users to be added to the friend list.
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in {**locals(), **self.kwargs}.items()
if k not in ["self"] and v is not None
}
return await self.request(
"friends.addList", params, response_model=responses.friends.AddListModel
)
class FriendsAreFriends(BaseMethod):
kwargs: dict = {}
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(
self, user_ids: typing.List, need_sign: bool = None
) -> responses.friends.AreFriends:
""" friends.areFriends
From Vk Docs: Checks the current user's friendship status with other specified users.
Access from user token(s)
:param user_ids: IDs of the users whose friendship status to check.
:param need_sign: '1' — to return 'sign' field. 'sign' is md5("{id}_{user_id}_{friends_status}_{application_secret}"), where id is current user ID. This field allows to check that data has not been modified by the client. By default: '0'.
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in {**locals(), **self.kwargs}.items()
if k not in ["self"] and v is not None
}
return await self.request(
"friends.areFriends",
params,
response_model=responses.friends.AreFriendsModel,
)
class FriendsDelete(BaseMethod):
kwargs: dict = {}
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(self, user_id: int = None) -> responses.friends.Delete:
""" friends.delete
From Vk Docs: Declines a friend request or deletes a user from the current user's friend list.
Access from user token(s)
:param user_id: ID of the user whose friend request is to be declined or who is to be deleted from the current user's friend list.
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in {**locals(), **self.kwargs}.items()
if k not in ["self"] and v is not None
}
return await self.request(
"friends.delete", params, response_model=responses.friends.DeleteModel
)
class FriendsDeleteAllRequests(BaseMethod):
kwargs: dict = {}
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(self,) -> responses.ok_response.OkResponse:
""" friends.deleteAllRequests
From Vk Docs: Marks all incoming friend requests as viewed.
Access from user token(s)
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in {**locals(), **self.kwargs}.items()
if k not in ["self"] and v is not None
}
return await self.request(
"friends.deleteAllRequests",
params,
response_model=responses.ok_response.OkResponseModel,
)
class FriendsDeleteList(BaseMethod):
kwargs: dict = {}
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(self, list_id: int) -> responses.ok_response.OkResponse:
""" friends.deleteList
From Vk Docs: Deletes a friend list of the current user.
Access from user token(s)
:param list_id: ID of the friend list to delete.
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in {**locals(), **self.kwargs}.items()
if k not in ["self"] and v is not None
}
return await self.request(
"friends.deleteList",
params,
response_model=responses.ok_response.OkResponseModel,
)
class FriendsEdit(BaseMethod):
kwargs: dict = {}
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(
self, user_id: int, list_ids: typing.List = None
) -> responses.ok_response.OkResponse:
""" friends.edit
From Vk Docs: Edits the friend lists of the selected user.
Access from user token(s)
:param user_id: ID of the user whose friend list is to be edited.
:param list_ids: IDs of the friend lists to which to add the user.
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in {**locals(), **self.kwargs}.items()
if k not in ["self"] and v is not None
}
return await self.request(
"friends.edit", params, response_model=responses.ok_response.OkResponseModel
)
class FriendsEditList(BaseMethod):
kwargs: dict = {}
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(
self,
list_id: int,
name: str = None,
user_ids: typing.List = None,
add_user_ids: typing.List = None,
delete_user_ids: typing.List = None,
) -> responses.ok_response.OkResponse:
""" friends.editList
From Vk Docs: Edits a friend list of the current user.
Access from user token(s)
:param name: Name of the friend list.
:param list_id: Friend list ID.
:param user_ids: IDs of users in the friend list.
:param add_user_ids: (Applies if 'user_ids' parameter is not set.), User IDs to add to the friend list.
:param delete_user_ids: (Applies if 'user_ids' parameter is not set.), User IDs to delete from the friend list.
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in {**locals(), **self.kwargs}.items()
if k not in ["self"] and v is not None
}
return await self.request(
"friends.editList",
params,
response_model=responses.ok_response.OkResponseModel,
)
class FriendsGet(BaseMethod):
kwargs: dict = {}
access_token_type: APIAccessibility = [
APIAccessibility.USER,
APIAccessibility.SERVICE,
]
async def __call__(
self,
user_id: int = None,
order: str = None,
list_id: int = None,
count: int = None,
offset: int = None,
fields: typing.List = None,
name_case: str = None,
ref: str = None,
) -> responses.friends.Get:
""" friends.get
From Vk Docs: Returns a list of user IDs or detailed information about a user's friends.
Access from user, service token(s)
:param user_id: User ID. By default, the current user ID.
:param order: Sort order: , 'name' — by name (enabled only if the 'fields' parameter is used), 'hints' — by rating, similar to how friends are sorted in My friends section, , This parameter is available only for [vk.com/dev/standalone|desktop applications].
:param list_id: ID of the friend list returned by the [vk.com/dev/friends.getLists|friends.getLists] method to be used as the source. This parameter is taken into account only when the uid parameter is set to the current user ID. This parameter is available only for [vk.com/dev/standalone|desktop applications].
:param count: Number of friends to return.
:param offset: Offset needed to return a specific subset of friends.
:param fields: Profile fields to return. Sample values: 'uid', 'first_name', 'last_name', 'nickname', 'sex', 'bdate' (birthdate), 'city', 'country', 'timezone', 'photo', 'photo_medium', 'photo_big', 'domain', 'has_mobile', 'rate', 'contacts', 'education'.
:param name_case: Case for declension of user name and surname: , 'nom' — nominative (default) , 'gen' — genitive , 'dat' — dative , 'acc' — accusative , 'ins' — instrumental , 'abl' — prepositional
:param ref:
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in {**locals(), **self.kwargs}.items()
if k not in ["self"] and v is not None
}
return await self.request(
"friends.get", params, response_model=responses.friends.GetModel
)
class FriendsGetAppUsers(BaseMethod):
kwargs: dict = {}
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(self,) -> responses.friends.GetAppUsers:
""" friends.getAppUsers
From Vk Docs: Returns a list of IDs of the current user's friends who installed the application.
Access from user token(s)
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in {**locals(), **self.kwargs}.items()
if k not in ["self"] and v is not None
}
return await self.request(
"friends.getAppUsers",
params,
response_model=responses.friends.GetAppUsersModel,
)
class FriendsGetByPhones(BaseMethod):
kwargs: dict = {}
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(
self, phones: typing.List = None, fields: typing.List = None
) -> responses.friends.GetByPhones:
""" friends.getByPhones
From Vk Docs: Returns a list of the current user's friends whose phone numbers, validated or specified in a profile, are in a given list.
Access from user token(s)
:param phones: List of phone numbers in MSISDN format (maximum 1000). Example: "+79219876543,+79111234567"
:param fields: Profile fields to return. Sample values: 'nickname', 'screen_name', 'sex', 'bdate' (birthdate), 'city', | |
<reponame>alexkreidler/scholarphi<filename>data-processing/entities/definitions/model/trainer.py
import logging
import os
import shutil
from collections import Counter
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import AdamW, get_linear_schedule_with_warmup
from .utils import compute_metrics
class Trainer(object):
def __init__(
self,
args: List[Any],
model: Any,
train_dataset: Optional[TensorDataset] = None,
dev_dataset: Optional[TensorDataset] = None,
test_dataset: Optional[TensorDataset] = None,
) -> None:
self.args, self.model_args, self.data_args = args
self.train_dataset = train_dataset
self.dev_dataset = dev_dataset
self.test_dataset = test_dataset
# Use cross entropy ignore index as padding label id so that only real label IDs contribute to the loss later.
self.pad_token_label_id = self.args.ignore_index
self.slot_label_lst = model.slot_label_lst
self.model = model
# Determine whether to use GPU or CPU.
self.device = (
"cuda" if torch.cuda.is_available() and not self.args.no_cuda else "cpu"
)
self.model.to(self.device)
def train(self) -> Tuple[int, float]:
train_sampler = RandomSampler(self.train_dataset)
train_dataloader = DataLoader(
self.train_dataset,
sampler=train_sampler,
batch_size=self.args.train_batch_size,
)
if self.args.max_steps > 0:
t_total = self.args.max_steps
self.args.num_train_epochs = (
self.args.max_steps
// (len(train_dataloader) // self.args.gradient_accumulation_steps)
+ 1
)
else:
t_total = (
len(train_dataloader)
// self.args.gradient_accumulation_steps
* self.args.num_train_epochs
)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in self.model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": self.args.weight_decay,
},
{
"params": [
p
for n, p in self.model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=self.args.learning_rate,
eps=self.args.adam_epsilon,
)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=self.args.warmup_steps,
num_training_steps=t_total,
)
# Train the model
logging.info( # pylint: disable=logging-not-lazy
"Running training, with number examples = %d, "
+ "number of epochs = %d, gradient accumulation steps = %d, "
+ "train batch size = %d, optimization steps = %d, "
+ "logging steps = %d, save steps = %d",
len(self.train_dataset),
self.args.num_train_epochs,
self.args.gradient_accumulation_steps,
t_total,
self.args.logging_steps,
self.args.save_steps,
)
global_step = 0
tr_loss = 0.0
dev_score_history, dev_step_history = List[float], List[float]
self.model.zero_grad()
train_iterator = trange(int(self.args.num_train_epochs), desc="Epoch")
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration")
for step, batch in enumerate(epoch_iterator):
self.model.train()
batch = tuple(t.to(self.device) for t in batch) # GPU or CPU
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"intent_label_ids": batch[3],
"slot_labels_ids": batch[4],
}
if self.model.config.model_type != "distilbert":
inputs["token_type_ids"] = batch[2]
if self.args.use_pos:
inputs["pos_label_ids"] = batch[5]
if self.args.use_np:
inputs["np_label_ids"] = batch[6]
if self.args.use_vp:
inputs["vp_label_ids"] = batch[7]
if self.args.use_entity:
inputs["entity_label_ids"] = batch[8]
if self.args.use_acronym:
inputs["acronym_label_ids"] = batch[9]
outputs = self.model(**inputs)
loss = outputs[0]
if self.args.gradient_accumulation_steps > 1:
loss = loss / self.args.gradient_accumulation_steps
loss.backward()
epoch_iterator.set_description(
"step {}/{} loss={:.2f}".format(
step, global_step, tr_loss / (global_step + 1)
)
)
tr_loss += loss.item()
if (step + 1) % self.args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(
self.model.parameters(), self.args.max_grad_norm
)
# Update learning rate schedule.
optimizer.step()
scheduler.step()
self.model.zero_grad()
global_step += 1
if (
self.args.logging_steps > 0
and global_step % self.args.logging_steps == 0
):
results = self.evaluate("dev")
result_dict = {
"model": self.model_args.model_name_or_path,
"use_pos": self.args.use_pos,
"use_np": self.args.use_np,
"use_vp": self.args.use_vp,
"use_entity": self.args.use_entity,
"use_acronym": self.args.use_acronym,
"global_step": global_step,
}
for k, v in results.items():
if type(v) == list:
for lidx, vone in enumerate(v):
result_dict["{}_{}".format(k, lidx)] = vone
else:
result_dict[k] = v
# Save model.
dev_score = result_dict["slot_f1_macro"]
if global_step == self.args.logging_steps or float(
dev_score
) > max(dev_score_history):
self.save_model()
logging.info(
"New best model saved at step %d: %f",
global_step,
dev_score,
)
dev_score_history += [dev_score]
dev_step_history += [global_step]
result_dict["best_slot_f1_macro"] = max(dev_score_history)
result_dict["best_global_step"] = dev_step_history[
dev_score_history.index(result_dict["best_slot_f1_macro"])
]
# Save log
filename = os.path.join(
"logs",
"logs_train_{}_{}.txt".format(
self.data_args.kfold, self.model_args.model_name_or_path
),
)
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, "a") as f:
if self.data_args.kfold == 0:
f.write("{}\n".format("\t".join(result_dict.keys())))
f.write(
"{}\n".format(
"\t".join([str(v) for v in result_dict.values()])
)
)
if 0 < self.args.max_steps < global_step:
epoch_iterator.close()
break
if 0 < self.args.max_steps < global_step:
train_iterator.close()
break
return global_step, tr_loss / global_step
def heuristic_filters(
self,
intent_preds: List[int],
intent_labels: List[int],
slot_preds: List[List[str]],
slot_labels: List[List[str]],
) -> Tuple[List[int], List[List[str]]]:
"""
filter out term/definition only cases
filter out multi-term/definition cases
threshold inclusion
"""
new_intent_preds, new_slot_preds = [], []
# Never use {intent,slot}_label in heuristic filtering, but they are
# only used for sanity checking.
for intent_pred, intent_label, slot_pred, slot_label in zip(
intent_preds, intent_labels, slot_preds, slot_labels
):
new_slot_pred = slot_pred
new_intent_pred = intent_pred
# 1. [slot] Filter out term / definition only cases.
pred_counter = dict(Counter(slot_pred))
term_exist, def_exist = False, False
for c in pred_counter:
if c.endswith("TERM"):
term_exist = True
if c.endswith("DEF"):
def_exist = True
if not term_exist and def_exist:
new_slot_pred = ["O" for p in slot_pred]
# 2. [intent] Change intent label if no term + def detected.
if (not term_exist and not def_exist) or (term_exist and not def_exist):
new_intent_pred = 0
# 3. [slot] Replace UNK with O.
new_slot_pred = ["O" if sp == "UNK" else sp for sp in new_slot_pred]
# 4. [slot] Fill out missing term/def within threshold.
temp_new_slot_pred = new_slot_pred.copy()
for sid, sp in enumerate(temp_new_slot_pred):
if sid < len(new_slot_pred) - 2 and sp.endswith("TERM"):
if temp_new_slot_pred[sid + 1] == "O" and temp_new_slot_pred[
sid + 2
].endswith("TERM"):
new_slot_pred[sid + 1] = "I-TERM"
temp_new_slot_pred = new_slot_pred.copy()
for sid, sp in enumerate(temp_new_slot_pred):
if sid < len(new_slot_pred) - 2 and sp.endswith("DEF"):
if temp_new_slot_pred[sid + 1] == "O" and temp_new_slot_pred[
sid + 2
].endswith("DEF"):
new_slot_pred[sid + 1] = "I-DEF"
temp_new_slot_pred = new_slot_pred.copy()
for sid, sp in enumerate(temp_new_slot_pred):
if sid < len(new_slot_pred) - 3 and sp.endswith("DEF"):
if (
temp_new_slot_pred[sid + 1] == "O"
and temp_new_slot_pred[sid + 2] == "O"
and temp_new_slot_pred[sid + 3].endswith("DEF")
):
new_slot_pred[sid + 1] = "I-DEF"
new_slot_pred[sid + 2] = "I-DEF"
# 5. Change I-TERM I-DEF starting cases.
temp_new_slot_pred = new_slot_pred.copy()
term_start, def_start = False, False
for sid, sp in enumerate(temp_new_slot_pred):
if not term_start and sp == "I-TERM":
new_slot_pred[sid] = "B-TERM"
if sp.endswith("TERM"):
term_start = True
else:
term_start = False
if not def_start and sp == "I-DEF":
new_slot_pred[sid] = "B-DEF"
if sp.endswith("DEF"):
def_start = True
else:
def_start = False
logging.debug(
"Prediction: %s -> %s %s %s -> %s -> %s",
intent_pred,
new_intent_pred,
intent_label,
" ".join(slot_pred),
" ".join(new_slot_pred),
" ".join(slot_label),
)
new_intent_preds.append(new_intent_pred)
new_slot_preds.append(new_slot_pred)
return new_intent_preds, new_slot_preds
def evaluate_from_input(self, dataset: TensorDataset) -> Tuple[List[int], List[List[str]], List[List[float]]]:
eval_sampler = SequentialSampler(dataset)
eval_dataloader = DataLoader(
dataset, sampler=eval_sampler, batch_size=self.args.eval_batch_size
)
# Run evaluation.
eval_loss = 0.0
nb_eval_steps = 0
intent_preds = None
slot_preds = None
slot_conf = None
gold_intent_label_ids = []
gold_slot_labels_ids = []
self.model.eval()
for batch in eval_dataloader:
batch = tuple(t.to(self.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"intent_label_ids": batch[3],
"slot_labels_ids": batch[4],
}
if self.model.config.model_type != "distilbert":
inputs["token_type_ids"] = batch[2]
if self.args.use_pos:
inputs["pos_label_ids"] = batch[5]
if self.args.use_np:
inputs["np_label_ids"] = batch[6]
if self.args.use_vp:
inputs["vp_label_ids"] = batch[7]
if self.args.use_entity:
inputs["entity_label_ids"] = batch[8]
if self.args.use_acronym:
inputs["acronym_label_ids"] = batch[9]
outputs = self.model(**inputs)
tmp_eval_loss, (intent_logits, slot_logits) = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
# Predict intent.
intent_probs = torch.softmax(intent_logits, dim=1).detach().cpu().numpy()
if intent_preds is None:
intent_preds = intent_logits.detach().cpu().numpy()
gold_intent_label_ids = (
inputs["intent_label_ids"].detach().cpu().numpy()
)
else:
intent_preds = np.append(
intent_preds, intent_logits.detach().cpu().numpy(), axis=0
)
gold_intent_label_ids = np.append(
gold_intent_label_ids,
inputs["intent_label_ids"].detach().cpu().numpy(),
axis=0,
)
# Predict slots.
slot_probs = torch.softmax(slot_logits,dim=2).detach().cpu().numpy()
if slot_preds is None:
if self.args.use_crf:
slot_logits_crf = np.array(self.model.crf.decode(slot_logits))
# Decode() in `torchcrf` returns list with best index directly.
slot_preds = slot_logits_crf
# Get confidence from softmax.
I, J = np.ogrid[:slot_logits_crf.shape[0], :slot_logits_crf.shape[1]]
slot_conf = slot_probs[I, J, slot_logits_crf]
else:
slot_preds = slot_logits.detach().cpu().numpy()
gold_slot_labels_ids = inputs["slot_labels_ids"].detach().cpu().numpy()
else:
if self.args.use_crf:
slot_logits_crf = np.array(self.model.crf.decode(slot_logits))
slot_preds = np.append(slot_preds, slot_logits_crf, axis=0)
# Get confidence from softmax.
I, J = np.ogrid[:slot_logits_crf.shape[0], :slot_logits_crf.shape[1]]
slot_conf = np.append(slot_conf, slot_probs[I, J, slot_logits_crf], axis=0)
else:
slot_preds = np.append(
slot_preds, slot_logits.detach().cpu().numpy(), axis=0
)
gold_slot_labels_ids = np.append(
gold_slot_labels_ids,
inputs["slot_labels_ids"].detach().cpu().numpy(),
axis=0,
)
eval_loss = eval_loss / nb_eval_steps
# Finally compute the intent.
intent_preds = np.argmax(intent_preds, axis=1)
# Finally compute the slots.
if not self.args.use_crf:
# Get confidence from softmax.
I, J = np.ogrid[:slot_preds.shape[0], :slot_preds.shape[1]]
slot_conf = slot_preds[I, J, np.argmax(slot_preds, axis=2)]
slot_preds = np.argmax(slot_preds, axis=2)
slot_label_map = {i: label for i, label in enumerate(self.slot_label_lst)}
gold_slot_num_batch = int(len(gold_slot_labels_ids))
gold_slot_num_length = int(len(gold_slot_labels_ids[0]))
gold_slot_label_list = [[] for _ in range(gold_slot_num_batch)]
slot_preds_list = [[] for _ in range(gold_slot_num_batch)]
slot_conf_list = [[] for _ in range(gold_slot_labels_ids.shape[0])]
for i in range(gold_slot_num_batch):
for j in range(gold_slot_num_length):
if gold_slot_labels_ids[i, j] != | |
"""Class to fit PEtab problem via time discretization in Julia
:Author: <NAME> <<EMAIL>>
:Date: 2020-04-15
:Copyright: 2020, <NAME>
:License: MIT
"""
import importlib
import libsbml
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import petab
import re
import sys
import warnings
import yaml
from julia.api import Julia
importlib.reload(libsbml)
class SBML2JuliaProblem(object):
"""Class to create and solve an optimization and retreive the results
"""
def __init__(self, petab_yaml, t_steps=None, n_starts=1, infer_ic_from_sbml=False,
optimizer_options={}, custom_code_dict={}):
"""
Args:
petab_yaml (:obj:`str`): path to petab yaml file
t_steps (:obj:`int`, optional): number of time-discretiation steps
n_starts (:obj:`int`): number of multistarts
infer_ic_from_sbml (:obj:`bool`): infer missing initial conditions from SBML
optimizer_options (:obj:`dict`): optimization solver options
custom_code_dict (:obj:`dict`): dict with replaced code as keys
and replacement code as values
"""
print('Initialising problem...')
self._calling_function = sys._getframe(1).f_code.co_name
self._initialization = True
self._optimized = False
self._files_written = False
self._plotted = False
self._jl = Julia(compiled_modules=False)
# self._results = {}
self._petab_dirname = os.path.dirname(petab_yaml)
self._set_petab_problem(petab_yaml)
self.t_steps = t_steps
self.n_starts = n_starts
self.optimizer_options = optimizer_options
self.infer_ic_from_sbml = infer_ic_from_sbml
self._set_julia_code()
self.custom_code_dict = custom_code_dict
self._initialization = False
print('Problem initialized.')
@property
def petab_yaml_dict(self):
"""Get petab_yaml_dict
Returns:
:obj:`dict`: petab_yaml_dict
"""
return self._petab_yaml_dict
@property
def petab_problem(self):
"""Get petab_problem
Returns:
:obj:`petab.problem.Problem`: petab problem
"""
return self._petab_problem
@property
def t_steps(self):
"""Get t_steps
Returns:
t_steps (:obj:`int`, optional): number of time-discretiation steps
"""
return self._t_steps
@t_steps.setter
def t_steps(self, value):
"""Set t_steps
Args:
value (:obj:`int`, optional): number of time-discretiation steps
Raises:
ValueError: if t_steps is not a positive integer.
"""
if value is None:
n_exp = len(set(self.petab_problem.measurement_df['time']))
if n_exp == 1:
value = 101
else:
value = int(np.ceil(100/(n_exp-1))*(n_exp-1) + 1)
if not (isinstance(value, int)) or (value <= 0):
raise ValueError('`t_steps` must be a positive integer.')
self._t_steps = value
if not self._initialization:
self._set_julia_code()
@property
def n_starts(self):
"""Get n_starts
Returns:
:obj:`int`: number of multistarts
"""
return self._n_starts
@n_starts.setter
def n_starts(self, value):
"""Set n_starts
Args:
value (:obj:`int`): number of multistarts
Raises:
ValueError: if n_starts is not a positive integer
"""
if not isinstance(value, int) or not (value > 0):
raise ValueError('`n_starts` must be a positive integer')
self._n_starts = value
if not self._initialization:
self._set_julia_code()
@property
def infer_ic_from_sbml(self):
"""Get infer_ic_from_sbml
Returns:
:obj:`bool`: if missing initial conditions shall be infered from SBML model
"""
return self._infer_ic_from_sbml
@infer_ic_from_sbml.setter
def infer_ic_from_sbml(self, value):
"""Set infer_ic_from_sbml
Args:
value (:obj:`bool): if missing initial conditions shall be infered from SBML model
Raises:
ValueError: if infer_ic_from_sbml is not boolean
"""
if not isinstance(value, bool):
raise ValueError('`infer_ic_from_sbml` must be boolean')
self._infer_ic_from_sbml = value
if not self._initialization:
self._set_julia_code()
@property
def optimizer_options(self):
"""Get optimizer_options
Returns:
:obj:`dict`: optimization solver options
"""
return self._optimizer_options
@optimizer_options.setter
def optimizer_options(self, value):
"""Set optimizer_options
Args:
value (:obj:`dict`): optimization solver options
Raises:
ValueError: if optimizer_options is not a dict
"""
if not isinstance(value, dict):
raise ValueError('`optimizer_options` must be a dictionary')
self._optimizer_options = value
if not self._initialization:
self._set_julia_code()
@property
def custom_code_dict(self):
"""Get custom_code_dict
Returns:
:obj:`dict`: custom code dict
"""
return self._custom_code_dict
@custom_code_dict.setter
def custom_code_dict(self, value):
"""Set custom_code_dict
Args:
value (:obj:`dict`): custom code dict
Raises:
ValueError: if custom_code_dict is not a dict
ValueError: if dict values are not strings
ValueError: if dict keys are not string
"""
if not isinstance(value, dict):
raise ValueError('`custom_code_dict` must be a dictionary')
for k in value.keys():
if not isinstance(k, str):
raise ValueError('Keys of `custom_code_dict` must be strings '
f'but `{k}` is `{type(k)}`.')
for v in value.values():
if not isinstance(v, str):
raise ValueError('Values of `custom_code_dict` must be strings '
f'but `{v}` is `{type(v)}`.')
self._custom_code_dict = value
if not self._initialization:
self._set_julia_code()
if value:
self.insert_custom_code(value)
@property
def julia_code(self):
"""Get julia_code
Returns:
:obj:`str`: julia code for optimization
"""
return self._julia_code
@property
def results(self):
"""Get results
Returns:
:obj:`dict`: optimization results
"""
return self._results
def import_julia_code(self, file):
"""Summary
Args:
file (TYPE): Description
"""
with open(file, 'r') as f:
self._julia_code = f.read()
def _set_petab_problem(self, petab_yaml):
"""Converts petab yaml to dict and creates petab.problem.Problem object
Args:
petab_yaml (:obj:`str`): path to petab yaml file
"""
petab_problem = petab.problem.Problem()
petab_problem = petab_problem.from_yaml(petab_yaml)
petab.lint.lint_problem(petab_problem) # Returns `False` if no error occured. Raises exception otherwise.
self._check_for_not_implemented_features(petab_problem)
petab_problem = self._sort_condition_df_problem(petab_problem)
self._petab_problem = petab_problem
self._petab_yaml_dict, self._condition2index, self._j_to_parameters,\
self._n_conditions, self._condition_specific_pars, self._global_pars =\
self._get_translation_vars(petab_yaml, petab_problem)
def _check_for_not_implemented_features(self, petab_problem):
"""Checks if the petab_problem contains not implemented features
Args:
petab_problem (:obj:`petab.problem.Problem`): PEtab problem
Raises:
NotImplementedError: if `time` column in measurement table contains `Inf`
NotImplementedError: if simulation conditions are associated with > 1 preequilibration
NotImplementedError: if conditions are used for both, preequilibration and simulation
NotImplementedError: if priors for noise parameters are provided
NotImplementedError: if priors for observable parameters are provided
"""
if np.inf in list(petab_problem.measurement_df['time']):
raise NotImplementedError('Fitting steady state problems is not implemented.') # Todo: consider implementing it.
t_conds = []
for obs, data_1 in petab_problem.measurement_df.groupby('observableId'):
t_conds.append(tuple(data_1['time']))
if 'preequilibrationConditionId' in petab_problem.measurement_df.columns \
and not petab_problem.measurement_df['preequilibrationConditionId'].empty:
p2c = petab_problem.measurement_df.loc[:, ['preequilibrationConditionId',
'simulationConditionId']].drop_duplicates()
for sCId, data in p2c.groupby('simulationConditionId'):
if len(data.index) > 1:
raise NotImplementedError(f'{sCId} must be assiciated with <= 1 '
'preequilibrationConditionIds. Please modify PEtab'
'problem accordingly.')
condition_conflicts = set(p2c['preequilibrationConditionId'])\
.intersection(set(p2c['simulationConditionId']))
if len(condition_conflicts) > 0:
raise NotImplementedError('The following conditions are used for both, simulation '
'and preequilibration. Please modify '
f'PEtab problem. {condition_conflicts}.')
noiseParameter_names = set()
if 'noiseParameters' in petab_problem.measurement_df.columns:
for vals in petab_problem.measurement_df['noiseParameters']:
pars = str(vals).rstrip(';').split(';')
for par in pars:
noiseParameter_names.add(par.strip())
observableParameter_names = set()
if 'observableParameters' in petab_problem.measurement_df.columns:
for vals in petab_problem.measurement_df['observableParameters']:
pars = str(vals).rstrip(';').split(';')
for par in pars:
observableParameter_names.add(par.strip())
if 'objectivePriorType' in petab_problem.parameter_df.columns:
for l, par in enumerate(petab_problem.parameter_df.index):
if par in noiseParameter_names and\
isinstance(petab_problem.parameter_df['objectivePriorType'][l], str):
raise NotImplementedError('Priors for noiseParameter '
'overrides are not implemented.')
if par in observableParameter_names and\
isinstance(petab_problem.parameter_df['objectivePriorType'][l], str):
raise NotImplementedError('Priors for observableParameter overrides are '
'not implemented.')
def _sort_condition_df_problem(self, petab_problem):
"""Sorts the rows of the contition table based on the first
occurence of the respective condition in the measurement table
Args:
petab_problem (:obj:`petab.problem.Problem`): PEtab problem
Returns:
:obj:`petab.problem.Problem`: PEtab problem
"""
idx = 1e6*np.ones(len(petab_problem.condition_df.index))
for i, cond in enumerate(petab_problem.measurement_df['simulationConditionId']
.drop_duplicates()):
for j, c in enumerate(petab_problem.condition_df.index):
if c == cond:
idx[j] = i
petab_problem.condition_df['sorting'] = idx
petab_problem.condition_df = petab_problem.condition_df\
.sort_values(by='sorting').drop(columns=['sorting'])
return petab_problem
def _get_translation_vars(self, petab_yaml, petab_problem):
"""Gets variables required for translation from PEtab to JuMP
Args:
petab_yaml (:obj:`string`): path to petab yaml file
petab_problem (:obj:`petab.problem.Problem`): PEtab problem
Raises:
SystemExit: if `yaml.YAMLError` occured
Returns:
:obj:`tuple`: (yaml_dict, condition2index, j_to_parameters, n_conditions,
condition_specific_pars, global_pars)
"""
with open(petab_yaml, 'r') as f:
try:
yaml_dict = yaml.safe_load(f)
except yaml.YAMLError as error:
raise SystemExit('Error occured: {}'.format(str(error)))
condition2index = {petab_problem.condition_df.index[i]:
i for i in range(len(petab_problem.condition_df.index))}
simulationConditionIdx = list(np.arange(len(condition2index))+1)
preequilibrationConditionIdx = ['']*len(condition2index)
if 'preequilibrationConditionId' in petab_problem.measurement_df.columns\
and not self.petab_problem.measurement_df['preequilibrationConditionId'].empty:
p2c = self.petab_problem.measurement_df\
.loc[:, ['preequilibrationConditionId', 'simulationConditionId']].drop_duplicates()
simulationConditionIdx = [condition2index[c]+1 for c in p2c['simulationConditionId']]
preequilibrationConditionIdx = [condition2index[c]+1 if isinstance(c, str)
else '' for c in p2c['preequilibrationConditionId']]
j_to_parameters = (simulationConditionIdx, preequilibrationConditionIdx)
n_conditions = len(petab_problem.condition_df.index)
condition_specific_pars = {}
for parameter in petab_problem.condition_df.columns:
if parameter != 'conditionName':
condition_specific_pars[parameter] = [val for val in
petab_problem.condition_df[parameter]]
global_pars = {}
for parameter in petab_problem.parameter_df.index:
global_pars[parameter] = petab_problem.parameter_df.loc[parameter, 'estimate']
return (yaml_dict, condition2index, j_to_parameters, n_conditions,
condition_specific_pars, global_pars)
def insert_custom_code(self, custom_code_dict):
"""Inserts custom code into Julia code
Args:
custom_code_dict (:obj:`dict`): dict with replaced code as keys
and replacement code as values
"""
positions = custom_code_dict.keys()
code = self.julia_code
for pos in positions:
code = re.sub(pos, custom_code_dict[pos], code)
self._julia_code = code
def write_jl_file(self, path=os.path.join('.', 'julia_code.jl')):
"""Write code to julia file
Args:
path (:obj:`str`, optional): path to output Julia file
"""
with open(path, 'w') as f:
f.write(self.julia_code)
self._julia_file = path
self._files_written = True
def optimize(self):
"""Optimize SBML2JuliaProblem
Returns:
:obj:`dict`: Results in a dict with keys 'species',
'observables', 'parameters' and 'par_est'
"""
print('Entering Julia for optimization...')
self._results_all = self._jl.eval(self.julia_code)
print('Results transferred. Exited Julia.')
self._best_iter = min(self._results_all['objective_value'],
key=self._results_all['objective_value'].get)
self._results = {}
self._results['par_est'] = self._get_param_ratios(self._results_all['parameters'])
self._results['species'] = self._results_to_frame(
self._results_all['species'][self._best_iter], variable_type='speciesId')
self._results['observables'] = self._results_to_frame(
self._results_all['observables'][self._best_iter], variable_type='observableId')
self.petab_problem.simulation_df = self._results['observables']
# Todo: remove the removal of the `observableParamters` column once the bug it causes in petab.calculate_llh is fixed.
cols = [not b for b in self.petab_problem.measurement_df.columns
.isin(['observableParameters'])] # , 'noiseParameters'])]
ndf = pd.DataFrame()
if 'noiseParameters' in self.petab_problem.measurement_df.columns:
ndf = self.petab_problem.measurement_df['noiseParameters']
try:
self._results['fval'] = -petab.calculate_llh(
self.petab_problem.measurement_df.loc[:, cols],
pd.concat([self.petab_problem.simulation_df.rename(
columns={'measurement': 'simulation'}), | |
archive_path=dataset_dir / (prefix + "p999758-p1020138.7z"),
page_ids=range(999758, 1020139),
darus_id=94139,
sha1="c471eae156fc86c8e2388165906b4cea62789b4b",
size=163440175,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1020139-p1038943.7z"),
page_ids=range(1020139, 1038944),
darus_id=92952,
sha1="a79452394d056b83804751b006130efa40f51866",
size=159754233,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1038944-p1061605.7z"),
page_ids=range(1038944, 1061606),
darus_id=92956,
sha1="fbdb3c2bd3a97c29729b440a5f0b643831c678be",
size=163428844,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1061606-p1087322.7z"),
page_ids=range(1061606, 1087323),
darus_id=92957,
sha1="d007845a095497f100af90b76841debc5025235d",
size=171711225,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1087323-p1110419.7z"),
page_ids=range(1087323, 1110420),
darus_id=92959,
sha1="f952ee93629bd336731ef666c4fc22197b46eec5",
size=166861072,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1110420-p1114931.7z"),
page_ids=range(1110420, 1114932),
darus_id=92961,
sha1="b1e8a6784782d723c09ac39a381fa77bed784c85",
size=34113337,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1114932-p1140511.7z"),
page_ids=range(1114932, 1140512),
darus_id=92962,
sha1="1c5829f4ae6bb467654f39025ff0869b7423451a",
size=177832765,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1140512-p1169996.7z"),
page_ids=range(1140512, 1169997),
darus_id=92966,
sha1="3dd88de8b61a10b3f03dbc952ec34cdeb9cb113d",
size=180358954,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1169997-p1199827.7z"),
page_ids=range(1169997, 1199828),
darus_id=92971,
sha1="c5b7670232f7e6af7562e882db9ed7e1c3aab6a3",
size=173696196,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1199828-p1213308.7z"),
page_ids=range(1199828, 1213309),
darus_id=92979,
sha1="1779e63159b5e0e045ac8e5b0f68bbb5e6a32500",
size=107602038,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1213309-p1241142.7z"),
page_ids=range(1213309, 1241143),
darus_id=92982,
sha1="53200b6b03b010c65f862b6cd1b2743ebec13613",
size=169901671,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1241143-p1266995.7z"),
page_ids=range(1241143, 1266996),
darus_id=92988,
sha1="1b2007e1900bc82118ffaaec613a875e5e07e04a",
size=164823697,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1266996-p1288714.7z"),
page_ids=range(1266996, 1288715),
darus_id=92993,
sha1="3f2b075b1fe45d7814d352326f8a99d73e7303f7",
size=163139473,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1288715-p1309479.7z"),
page_ids=range(1288715, 1309480),
darus_id=92998,
sha1="016c7a40c2243666d9b09a57166cd4cba57040c2",
size=163041015,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1309480-p1332609.7z"),
page_ids=range(1309480, 1332610),
darus_id=93001,
sha1="51b6ce594bef67e25059af65289c06faded92908",
size=164674250,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1332610-p1360939.7z"),
page_ids=range(1332610, 1360940),
darus_id=93006,
sha1="d6157ba9ee70ab3ad7da070721d6cabf5bfc3297",
size=171608841,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1360940-p1386989.7z"),
page_ids=range(1360940, 1386990),
darus_id=93011,
sha1="441b47302fae9f87a298f58baf21c9dd65938b7a",
size=170375893,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1386990-p1415334.7z"),
page_ids=range(1386990, 1415335),
darus_id=93015,
sha1="0204c77fe15df740ffb5acfee2806599148dd7ac",
size=170036527,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1415335-p1445937.7z"),
page_ids=range(1415335, 1445938),
darus_id=93019,
sha1="ed744d4e22a6ea54d1b62ec6916a7f9dcb22dfe2",
size=175400588,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1445938-p1475885.7z"),
page_ids=range(1445938, 1475886),
darus_id=93022,
sha1="be8bcf3e5286a8bd4f5b6bf0e6489ded793897ce",
size=173437181,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1475886-p1507223.7z"),
page_ids=range(1475886, 1507224),
darus_id=93025,
sha1="cd19db1e43d4013afc942eb23d619583aa32a1a8",
size=176111828,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1507224-p1532361.7z"),
page_ids=range(1507224, 1532362),
darus_id=93030,
sha1="9c7557997e63a6c7980271127b549992fcbfef79",
size=163392367,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1532362-p1556817.7z"),
page_ids=range(1532362, 1556818),
darus_id=93039,
sha1="b797411cd6c15e267cfab70fbdb7c89ad9583836",
size=164789926,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1556818-p1582119.7z"),
page_ids=range(1556818, 1582120),
darus_id=93045,
sha1="03704f97177a75d2a39c2ca6087550f553c5de89",
size=171608880,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1582120-p1612571.7z"),
page_ids=range(1582120, 1612572),
darus_id=93052,
sha1="d1a0aa8c2ce8523d7a4f9a906bdfdaf0e77b62d9",
size=176130362,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1612572-p1636811.7z"),
page_ids=range(1612572, 1636812),
darus_id=93061,
sha1="b0b50eb4b5f6d6d28d7aa7fbe1d869c739685258",
size=165817214,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1636812-p1665485.7z"),
page_ids=range(1636812, 1665486),
darus_id=93068,
sha1="7a50e492f5432c8e8d55d60e72605596ed8afc3a",
size=168975161,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1665486-p1693476.7z"),
page_ids=range(1665486, 1693477),
darus_id=93075,
sha1="e3a2086f854db81a7d1f823f76d785219d0497fe",
size=171656648,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1693477-p1719359.7z"),
page_ids=range(1693477, 1719360),
darus_id=93082,
sha1="ea17c5ba8c85587b39338c2edd94f0ddcc1c2605",
size=169955308,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1719360-p1750315.7z"),
page_ids=range(1719360, 1750316),
darus_id=93092,
sha1="1ab1ffd5042189738482557fa4325380e8f74b73",
size=174643582,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1750316-p1787442.7z"),
page_ids=range(1750316, 1787443),
darus_id=93101,
sha1="9eed1d1a93436d23215c1c864758ddb821a22c65",
size=186133230,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1787443-p1818652.7z"),
page_ids=range(1787443, 1818653),
darus_id=93108,
sha1="98f17e2d98dca413b5970ba7d0de87e254aee2ed",
size=169918080,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1818653-p1845570.7z"),
page_ids=range(1818653, 1845571),
darus_id=93115,
sha1="43fc31c2eb23ae54a4f255cccb893cab1987799e",
size=165323969,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1845571-p1872120.7z"),
page_ids=range(1845571, 1872121),
darus_id=93120,
sha1="cec707437f2b00bdba86ed088315924693604acc",
size=163017890,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1872121-p1901138.7z"),
page_ids=range(1872121, 1901139),
darus_id=93125,
sha1="13026be25aee12dcb856a68d4528079c7d809f43",
size=165751788,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1901139-p1928896.7z"),
page_ids=range(1901139, 1928897),
darus_id=93132,
sha1="f68d24a2eaccc4c1a2f50a7df31d565a5d07573a",
size=165149564,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1928897-p1956860.7z"),
page_ids=range(1928897, 1956861),
darus_id=93139,
sha1="112eb7bfa7a99686cfc966975432c9525743b7b0",
size=166294444,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1956861-p1984904.7z"),
page_ids=range(1956861, 1984905),
darus_id=93144,
sha1="ea06cabab4602e169435d059809c2a255c08dd6e",
size=164064371,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p1984905-p2009681.7z"),
page_ids=range(1984905, 2009682),
darus_id=93152,
sha1="8fbc4eb0108cd34aa73abe1a41905beff55a20a3",
size=161959930,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2009682-p2039654.7z"),
page_ids=range(2009682, 2039655),
darus_id=93159,
sha1="0fb16eccaaca109f579108fd0a601c186f26de76",
size=166059935,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2039655-p2049641.7z"),
page_ids=range(2039655, 2049642),
darus_id=93164,
sha1="252bb6fbd860475d9341b7f58693b30258b6c8bc",
size=52791240,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2049642-p2080529.7z"),
page_ids=range(2049642, 2080530),
darus_id=93166,
sha1="f9baf415770335bac5dc48aa5ce740bd76454973",
size=164984804,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2080530-p2109126.7z"),
page_ids=range(2080530, 2109127),
darus_id=93171,
sha1="25d5004766515d2ed75b71e9b02219625325c47e",
size=162247058,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2109127-p2138729.7z"),
page_ids=range(2109127, 2138730),
darus_id=93175,
sha1="5a3fa9d124a53aa16d789ad2c74a30e1f72c8816",
size=158357939,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2138730-p2167303.7z"),
page_ids=range(2138730, 2167304),
darus_id=93180,
sha1="b5dd74e808e4856c413bc30db6df083c15aa7696",
size=161072856,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2167304-p2197811.7z"),
page_ids=range(2167304, 2197812),
darus_id=93185,
sha1="b846833510c3b8359aec2fa4ae5b46a018f8da0a",
size=165469432,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2197812-p2227060.7z"),
page_ids=range(2197812, 2227061),
darus_id=93191,
sha1="db08189c797b9b09be942c43479fa1222bbe8930",
size=164531040,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2227061-p2257747.7z"),
page_ids=range(2227061, 2257748),
darus_id=93195,
sha1="1329f23eb92586bbea8ffc97f3958c9aa4e7a648",
size=165158518,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2257748-p2286775.7z"),
page_ids=range(2257748, 2286776),
darus_id=93200,
sha1="dc7841bfca747c2f12fff16acf4c30434b73dca5",
size=165409070,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2286776-p2317484.7z"),
page_ids=range(2286776, 2317485),
darus_id=93204,
sha1="1173f6f194079c9d8268188b8126b5691369af79",
size=165164581,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2317485-p2346122.7z"),
page_ids=range(2317485, 2346123),
darus_id=93211,
sha1="81a10ba21e6e6605473e504a5963b7d6d6f55708",
size=164317542,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2346123-p2376379.7z"),
page_ids=range(2346123, 2376380),
darus_id=93219,
sha1="6caa5af6ff8b94fdacb796bcf1ae65fd03a67793",
size=165914034,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2376380-p2404447.7z"),
page_ids=range(2376380, 2404448),
darus_id=93227,
sha1="e3eb81c09c7bdc4a2e9e4597792688657dabe338",
size=163304304,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2404448-p2436077.7z"),
page_ids=range(2404448, 2436078),
darus_id=93234,
sha1="1e3b414a6e1b65393bf5b7f808b72635bc26afbf",
size=166123819,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2436078-p2465829.7z"),
page_ids=range(2436078, 2465830),
darus_id=93239,
sha1="18d8b6587473cbfd43cbd504eec925141b995bf7",
size=165755294,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2465830-p2493357.7z"),
page_ids=range(2465830, 2493358),
darus_id=93245,
sha1="ea730bd207c4f8c4c75d64d317192d851532e223",
size=164931216,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2493358-p2521227.7z"),
page_ids=range(2493358, 2521228),
darus_id=93249,
sha1="2896daf19612eb43ae44877a409ab7b5132f3fad",
size=165105670,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2521228-p2545412.7z"),
page_ids=range(2521228, 2545413),
darus_id=93255,
sha1="a61aa6e310d6dc7a6b84cde6309557f03ddee5f5",
size=162754719,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2545413-p2571974.7z"),
page_ids=range(2545413, 2571975),
darus_id=93259,
sha1="782504378ff328ad9b295a3c53d337e8d8cc5e24",
size=161608632,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2571975-p2599280.7z"),
page_ids=range(2571975, 2599281),
darus_id=93265,
sha1="a16416ad93427fa3deef6b24a83b6af2dfa3f8bf",
size=158583427,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2599281-p2625525.7z"),
page_ids=range(2599281, 2625526),
darus_id=93271,
sha1="38a851f734b220bdf0b744db7a988666bf1945f6",
size=163711600,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2625526-p2658135.7z"),
page_ids=range(2625526, 2658136),
darus_id=93277,
sha1="5612721af5360dfab94adc236c07e65b32930b03",
size=167615200,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2658136-p2696517.7z"),
page_ids=range(2658136, 2696518),
darus_id=93285,
sha1="bb9d4603c2092695e318922d8e4484723890d0fb",
size=168692070,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2696518-p2729700.7z"),
page_ids=range(2696518, 2729701),
darus_id=93292,
sha1="244c5f67b42790259c9b8148de285381e19f4b37",
size=169245166,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2729701-p2765858.7z"),
page_ids=range(2729701, 2765859),
darus_id=93297,
sha1="9b3d203c46c92c1547078fb846dc0480ee9695bf",
size=170553846,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2765859-p2799420.7z"),
page_ids=range(2765859, 2799421),
darus_id=93301,
sha1="d3bdb9b68e5e3f1a579a765c7d993b5af38809d9",
size=172628396,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2799421-p2836050.7z"),
page_ids=range(2799421, 2836051),
darus_id=93304,
sha1="082f23903f1095c6386ce3d9645a658e4cab715c",
size=173842720,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2836051-p2874996.7z"),
page_ids=range(2836051, 2874997),
darus_id=93310,
sha1="b6e5aff4b7c83098f3ea94d28ca1e8cec3901978",
size=174008349,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2874997-p2905306.7z"),
page_ids=range(2874997, 2905307),
darus_id=93318,
sha1="5d6623eefbfffdfd1af56380a83799747d2922d0",
size=165523918,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2905307-p2940042.7z"),
page_ids=range(2905307, 2940043),
darus_id=93325,
sha1="df610383f403068ff2b5dac0c6e9514a9079ce82",
size=170002834,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2940043-p2968919.7z"),
page_ids=range(2940043, 2968920),
darus_id=93344,
sha1="e8b9db5e1a9ff5b4d2d013022ca6f0677f1ec71b",
size=167431876,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p2968920-p3000553.7z"),
page_ids=range(2968920, 3000554),
darus_id=93351,
sha1="f38928749dc6573f6d9b1dc62c829190e1a531f9",
size=168463058,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p3000554-p3030061.7z"),
page_ids=range(3000554, 3030062),
darus_id=93360,
sha1="2ea53d11c950e1e8bedb496e9851f4cb13a2d419",
size=166143036,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p3030062-p3058293.7z"),
page_ids=range(3030062, 3058294),
darus_id=93370,
sha1="2568181a1b1c68ed96a05bf1f1e7ad129beb39ac",
size=166254157,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p3058294-p3097438.7z"),
page_ids=range(3058294, 3097439),
darus_id=93377,
sha1="07e0c1aba7bb7581d923e6ff2e3396ebe19f7be2",
size=177255290,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p3097439-p3098446.7z"),
page_ids=range(3097439, 3098447),
darus_id=93382,
sha1="c6b5de1adf1feeb9c5e16be06ab79d48cf3de198",
size=5866527,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p3098447-p3133745.7z"),
page_ids=range(3098447, 3133746),
darus_id=93383,
sha1="a7df085884158f35ee126a44b285465e52617260",
size=177610503,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p3133746-p3161912.7z"),
page_ids=range(3133746, 3161913),
darus_id=93389,
sha1="2c889db624b1bc80f67933a2c76ae8135b103df3",
size=166096035,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p3161913-p3197362.7z"),
page_ids=range(3161913, 3197363),
darus_id=93393,
sha1="3e78aefcfd07addcbcdc340cc13f8851a569531c",
size=171273201,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p3197363-p3229529.7z"),
page_ids=range(3197363, 3229530),
darus_id=93402,
sha1="77491bf40cbce9cb21bcc2dde9d8883cf8880180",
size=169857315,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p3229530-p3271270.7z"),
page_ids=range(3229530, 3271271),
darus_id=93410,
sha1="0ac9ec05ef466b18037a0605e6311980a55441a1",
size=177860659,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p3271271-p3304285.7z"),
page_ids=range(3271271, 3304286),
darus_id=93414,
sha1="35a3ca702d140399639e7ab5ff73eccb688b83c2",
size=170590178,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p3304286-p3342534.7z"),
page_ids=range(3304286, 3342535),
darus_id=93418,
sha1="a953f1191bac134a645b9735e33cec591e11510c",
size=175831961,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p3342535-p3382245.7z"),
page_ids=range(3342535, 3382246),
darus_id=93422,
sha1="97a17911074b9acfe5eb29eea4d7a517c2000e70",
size=174933094,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p3382246-p3420661.7z"),
page_ids=range(3382246, 3420662),
darus_id=93426,
sha1="5ab24647924371132e94bbc05b7fe999c247a1ea",
size=176764091,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p3420662-p3455615.7z"),
page_ids=range(3420662, 3455616),
darus_id=93428,
sha1="10ed7880c1643ce4bd5405ff40cf41c8cb56e309",
size=169936583,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p3455616-p3500266.7z"),
page_ids=range(3455616, 3500267),
darus_id=93432,
sha1="5d89ceb7d347277a9eacbe56f60eea0d89d3421a",
size=180064394,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p3500267-p3539147.7z"),
page_ids=range(3500267, 3539148),
darus_id=93439,
sha1="7a0a22e1d7e330a4eabbfc07be567b95aca03b92",
size=170627431,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p3539148-p3574949.7z"),
page_ids=range(3539148, 3574950),
darus_id=93449,
sha1="571821b8bd193ea25e5deb7f22e77a7082818ef3",
size=166299760,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p3574950-p3602923.7z"),
page_ids=range(3574950, 3602924),
darus_id=93458,
sha1="bab3b0a6d51a5dd3548f450aa17cf7ba78dacfbd",
size=159381310,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p3602924-p3637204.7z"),
page_ids=range(3602924, 3637205),
darus_id=93465,
sha1="8beafe4ca2dea140387ac2ef9fa3ecb22deebd7a",
size=168093805,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p3637205-p3673059.7z"),
page_ids=range(3637205, 3673060),
darus_id=93473,
sha1="5ee9464c360ce2b2c16b9ec64c825721458c31fb",
size=170769920,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p3673060-p3718186.7z"),
page_ids=range(3673060, 3718187),
darus_id=93481,
sha1="ca72e6e60d68f50e7fcd666e395db7d89d88fc93",
size=182752086,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p3718187-p3757449.7z"),
page_ids=range(3718187, 3757450),
darus_id=93488,
sha1="f810c319fdf7daa962838dda9258639d62d61782",
size=171199201,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p3757450-p3803372.7z"),
page_ids=range(3757450, 3803373),
darus_id=93498,
sha1="1526f3f580745bd5e45fc79b753a3cc9290e25c0",
size=178518562,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p3803373-p3851412.7z"),
page_ids=range(3803373, 3851413),
darus_id=93509,
sha1="8c76a96bd9cabe18b4af91bb374674c0423b7474",
size=180261604,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p3851413-p3893678.7z"),
page_ids=range(3851413, 3893679),
darus_id=93519,
sha1="445b543ec6e5f4b563562dc96ae2ca4c66a80f46",
size=189703889,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p3893679-p3930350.7z"),
page_ids=range(3893679, 3930351),
darus_id=93530,
sha1="99571759c7beb3566445a9a3e03226ca9ac65f72",
size=183585939,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p3930351-p3971324.7z"),
page_ids=range(3930351, | |
<filename>pyhanko/sign/validation.py
import hashlib
import logging
import os
from collections import namedtuple
from dataclasses import dataclass
from dataclasses import field as data_field
from datetime import datetime
from enum import Enum, unique
from typing import IO, Iterator, Optional, Type, TypeVar, Union
from asn1crypto import cms, core
from asn1crypto import crl as asn1_crl
from asn1crypto import keys
from asn1crypto import ocsp as asn1_ocsp
from asn1crypto import pdf as asn1_pdf
from asn1crypto import tsp, x509
from asn1crypto.x509 import Certificate
from cryptography.hazmat.primitives import hashes
from pyhanko_certvalidator import CertificateValidator, ValidationContext
from pyhanko_certvalidator.path import ValidationPath
from pyhanko.pdf_utils import generic, misc
from pyhanko.pdf_utils.generic import pdf_name
from pyhanko.pdf_utils.incremental_writer import IncrementalPdfFileWriter
from pyhanko.pdf_utils.misc import DEFAULT_CHUNK_SIZE, OrderedEnum, get_and_apply
from pyhanko.pdf_utils.reader import PdfFileReader, XRefCache, process_data_at_eof
from pyhanko.pdf_utils.rw_common import PdfHandler
from .diff_analysis import (
DEFAULT_DIFF_POLICY,
DiffPolicy,
DiffResult,
ModificationLevel,
SuspiciousModification,
)
from .fields import (
FieldMDPSpec,
MDPPerm,
SeedLockDocument,
SigSeedSubFilter,
SigSeedValFlags,
SigSeedValueSpec,
)
from .general import (
DEFAULT_WEAK_HASH_ALGORITHMS,
KeyUsageConstraints,
MultivaluedAttributeError,
NonexistentAttributeError,
SignatureStatus,
SignatureValidationError,
UnacceptableSignerError,
extract_message_digest,
find_unique_cms_attribute,
get_pyca_cryptography_hash,
match_issuer_serial,
validate_sig_integrity,
)
from .timestamps import TimestampSignatureStatus
__all__ = [
'SignatureCoverageLevel', 'PdfSignatureStatus', 'DocumentTimestampStatus',
'StandardCMSSignatureStatus', 'ModificationInfo',
'EmbeddedPdfSignature', 'DocMDPInfo',
'RevocationInfoValidationType', 'VRI', 'DocumentSecurityStore',
'apply_adobe_revocation_info', 'get_timestamp_chain',
'read_certification_data', 'validate_pdf_ltv_signature',
'validate_pdf_signature', 'validate_cms_signature',
'validate_detached_cms', 'validate_pdf_timestamp',
'collect_validation_info',
'add_validation_info',
'ValidationInfoReadingError', 'SigSeedValueValidationError'
]
from ..pdf_utils.writer import BasePdfFileWriter
logger = logging.getLogger(__name__)
class ValidationInfoReadingError(ValueError):
"""Error reading validation info."""
pass
class NoDSSFoundError(ValidationInfoReadingError):
def __init__(self):
super().__init__("No DSS found")
class SigSeedValueValidationError(SignatureValidationError):
"""Error validating a signature's seed value constraints."""
# TODO perhaps we can encode some more metadata here, such as the
# seed value that tripped the failure.
def __init__(self, failure_message):
self.failure_message = str(failure_message)
super().__init__(failure_message)
def _get_signer_predicate(sid: cms.SignerIdentifier):
if sid.name == 'issuer_and_serial_number':
return lambda c: match_issuer_serial(sid.chosen, c)
elif sid.name == 'subject_key_identifier':
# subject key identifier (not all certs have this, but that shouldn't
# be an issue in this case)
ski = sid.chosen.native
logger.warning(
"The signature in this file seems to be identified by a subject "
"key identifier --- this is legal in CMS, but many PDF viewers and "
"SDKs do not support this."
)
return lambda c: c.key_identifier == ski
raise NotImplementedError
def partition_certs(certs, signer_info):
# The 'certificates' entry is defined as a set in PCKS#7.
# In particular, we cannot make any assumptions about the order.
# This means that we have to manually dig through the list to find
# the actual signer
predicate = _get_signer_predicate(signer_info['sid'])
cert = None
other_certs = []
for c in certs:
if predicate(c):
cert = c
else:
other_certs.append(c)
if cert is None:
raise SignatureValidationError(
'signer certificate not included in signature'
)
return cert, other_certs
StatusType = TypeVar('StatusType', bound=SignatureStatus)
def _extract_signer_info(signed_data: cms.SignedData) -> cms.SignerInfo:
try:
signer_info, = signed_data['signer_infos']
return signer_info
except ValueError: # pragma: nocover
raise ValueError(
'signer_infos should contain exactly one entry'
)
def _extract_self_reported_ts(signer_info: cms.SignerInfo) \
-> Optional[datetime]:
try:
sa = signer_info['signed_attrs']
st = find_unique_cms_attribute(sa, 'signing_time')
return st.native
except (NonexistentAttributeError, MultivaluedAttributeError):
pass
def _extract_tst_data(signer_info, signed=False) -> Optional[cms.SignedData]:
try:
if signed:
sa = signer_info['signed_attrs']
tst = find_unique_cms_attribute(sa, 'content_time_stamp')
else:
ua = signer_info['unsigned_attrs']
tst = find_unique_cms_attribute(ua, 'signature_time_stamp_token')
tst_signed_data = tst['content']
return tst_signed_data
except (NonexistentAttributeError, MultivaluedAttributeError):
pass
def _compute_tst_digest(signer_info: cms.SignerInfo) -> Optional[bytes]:
tst_data = _extract_tst_data(signer_info)
if tst_data is None:
return None
eci = tst_data['encap_content_info']
mi = eci['content'].parsed['message_imprint']
tst_md_algorithm = mi['hash_algorithm']['algorithm'].native
signature_bytes = signer_info['signature'].native
tst_md_spec = get_pyca_cryptography_hash(tst_md_algorithm)
md = hashes.Hash(tst_md_spec)
md.update(signature_bytes)
return md.finalize()
def _extract_signer_info_and_certs(signed_data: cms.SignedData):
certs = [c.parse() for c in signed_data['certificates']]
signer_info = _extract_signer_info(signed_data)
cert, other_certs = partition_certs(certs, signer_info)
return signer_info, cert, other_certs
def _validate_cms_signature(signed_data: cms.SignedData,
status_cls: Type[StatusType] = SignatureStatus,
raw_digest: bytes = None,
validation_context: ValidationContext = None,
status_kwargs: dict = None,
key_usage_settings: KeyUsageConstraints = None,
encap_data_invalid=False):
"""
Validate CMS and PKCS#7 signatures.
"""
signer_info, cert, other_certs = _extract_signer_info_and_certs(signed_data)
weak_hash_algos = None
if validation_context is not None:
weak_hash_algos = validation_context.weak_hash_algos
if weak_hash_algos is None:
weak_hash_algos = DEFAULT_WEAK_HASH_ALGORITHMS
signature_algorithm: cms.SignedDigestAlgorithm = \
signer_info['signature_algorithm']
mechanism = signature_algorithm['algorithm'].native
md_algorithm = signer_info['digest_algorithm']['algorithm'].native
eci = signed_data['encap_content_info']
expected_content_type = eci['content_type'].native
if raw_digest is None:
# this means that there should be encapsulated data
raw = bytes(eci['content'])
md_spec = get_pyca_cryptography_hash(md_algorithm)
md = hashes.Hash(md_spec)
md.update(raw)
raw_digest = md.finalize()
# first, do the cryptographic identity checks
intact, valid = validate_sig_integrity(
signer_info, cert, expected_content_type=expected_content_type,
actual_digest=raw_digest, weak_hash_algorithms=weak_hash_algos
)
# if the data being encapsulated by the signature is itself invalid,
# this flag is set
intact &= not encap_data_invalid
valid &= intact
# next, validate trust
trusted = revoked = False
path = None
if valid:
validator = CertificateValidator(
cert, intermediate_certs=other_certs,
validation_context=validation_context
)
trusted, revoked, path = status_cls.validate_cert_usage(
validator, key_usage_settings=key_usage_settings
)
status_kwargs = status_kwargs or {}
status_kwargs.update(
intact=intact, valid=valid, signing_cert=cert,
md_algorithm=md_algorithm, pkcs7_signature_mechanism=mechanism,
revoked=revoked, trusted=trusted,
validation_path=path
)
return status_kwargs
def validate_cms_signature(signed_data: cms.SignedData,
status_cls: Type[StatusType] = SignatureStatus,
raw_digest: bytes = None,
validation_context: ValidationContext = None,
status_kwargs: dict = None,
key_usage_settings: KeyUsageConstraints = None,
encap_data_invalid=False):
"""
Validate a CMS signature (i.e. a ``SignedData`` object).
.. versionchanged:: 0.7.0
Now handles both detached and enveloping signatures.
:param signed_data:
The :class:`.asn1crypto.cms.SignedData` object to validate.
:param status_cls:
Status class to use for the validation result.
:param raw_digest:
Raw digest, computed from context.
:param validation_context:
Validation context to validate the signer's certificate.
:param status_kwargs:
Other keyword arguments to pass to the ``status_class`` when reporting
validation results.
:param key_usage_settings:
A :class:`.KeyUsageConstraints` object specifying which key usage
extensions must or must not be present in the signer's certificate.
:param encap_data_invalid:
If ``True``, the encapsulated data inside the CMS is invalid,
but the remaining validation logic still has to be run (e.g. a
timestamp token, which requires validation of the embedded message
imprint).
This option is considered internal API, the semantics of which may
change without notice in the future.
:return:
A :class:`.SignatureStatus` object (or an instance of a proper subclass)
"""
status_kwargs = _validate_cms_signature(
signed_data, status_cls, raw_digest, validation_context,
status_kwargs, key_usage_settings, encap_data_invalid
)
return status_cls(**status_kwargs)
def collect_timing_info(signer_info: cms.SignerInfo,
ts_validation_context: ValidationContext):
status_kwargs = {}
# timestamp-related validation
signer_reported_dt = _extract_self_reported_ts(signer_info)
if signer_reported_dt is not None:
status_kwargs['signer_reported_dt'] = signer_reported_dt
tst_signed_data = _extract_tst_data(signer_info, signed=False)
if tst_signed_data is not None:
tst_validity_kwargs = _validate_timestamp(
tst_signed_data, ts_validation_context,
_compute_tst_digest(signer_info),
)
tst_validity = TimestampSignatureStatus(**tst_validity_kwargs)
status_kwargs['timestamp_validity'] = tst_validity
content_tst_signed_data = _extract_tst_data(signer_info, signed=True)
if content_tst_signed_data is not None:
content_tst_validity_kwargs = _validate_timestamp(
content_tst_signed_data, ts_validation_context,
expected_tst_imprint=extract_message_digest(signer_info)
)
content_tst_validity = TimestampSignatureStatus(
**content_tst_validity_kwargs
)
status_kwargs['content_timestamp_validity'] = content_tst_validity
return status_kwargs
@dataclass(frozen=True)
class StandardCMSSignatureStatus(SignatureStatus):
"""
Status of a standard "end-entity" CMS signature, potentially with
timing information embedded inside.
"""
signer_reported_dt: Optional[datetime] = None
"""
Signer-reported signing time, if present in the signature.
Generally speaking, this timestamp should not be taken as fact.
"""
timestamp_validity: Optional[TimestampSignatureStatus] = None
"""
Validation status of the signature timestamp token embedded in this
signature, if present.
"""
content_timestamp_validity: Optional[TimestampSignatureStatus] = None
"""
Validation status of the content timestamp token embedded in this
signature, if present.
"""
@property
def bottom_line(self) -> bool:
"""
Formulates a general judgment on the validity of this signature.
This takes into account the cryptographic validity of the signature,
the signature's chain of trust and the validity of the timestamp token
(if present).
:return:
``True`` if all constraints are satisfied, ``False`` otherwise.
"""
ts = self.timestamp_validity
if ts is None:
timestamp_ok = True
else:
timestamp_ok = ts.valid and ts.trusted
content_ts = self.content_timestamp_validity
if content_ts is None:
content_timestamp_ok = True
else:
content_timestamp_ok = content_ts.valid and content_ts.trusted
return (
self.intact and self.valid and self.trusted and timestamp_ok
and content_timestamp_ok
)
def summary_fields(self):
yield from super().summary_fields()
if self.timestamp_validity is not None:
yield 'TIMESTAMP_TOKEN<%s>' % (
'|'.join(self.timestamp_validity.summary_fields())
)
def pretty_print_details(self):
def fmt_section(hdr, body):
return '\n'.join(
(hdr, '-' * len(hdr), body, '\n')
)
sections = self.pretty_print_sections()
bottom_line = (
f"The signature is judged {'' if self.bottom_line else 'IN'}VALID."
)
sections.append(("Bottom line", bottom_line))
return '\n'.join(
fmt_section(hdr, body) for hdr, body in sections
)
def pretty_print_sections(self):
cert: x509.Certificate = self.signing_cert
if self.trusted:
trust_status = "trusted"
elif self.revoked:
trust_status = "revoked"
else:
trust_status = "untrusted"
about_signer = (
f"Certificate subject: \"{cert.subject.human_friendly}\"\n"
f"Certificate SHA1 fingerprint: {cert.sha1.hex()}\n"
f"Certificate SHA256 fingerprint: {cert.sha256.hex()}\n"
f"Trust anchor: \"{self._trust_anchor}\"\n"
f"The signer's certificate is {trust_status}."
)
validity_info = (
"The signature is cryptographically "
f"{'' if self.intact and self.valid else 'un'}sound.\n\n"
f"The digest algorithm used was '{self.md_algorithm}'.\n"
f"The signature mechanism used was "
f"'{self.pkcs7_signature_mechanism}'."
)
if 'ecdsa' in self.pkcs7_signature_mechanism:
ec_params: keys.ECDomainParameters = \
cert.public_key['algorithm']['parameters']
if ec_params.name == 'named':
curve_oid: core.ObjectIdentifier = ec_params.chosen
validity_info += (
f"\nThe elliptic curve used for the signer's ECDSA "
f"public key was '{curve_oid.native}' "
f"(OID: {curve_oid.dotted})."
)
timing_infos = []
reported_ts = self.signer_reported_dt
if reported_ts is | |
from typing import Dict
from typing import List
from treasury.session import FederalTreasurySession
class TreasuryReportsOnReceivables():
"""
## Overview:
----
The Treasury Report on Receivables and Debt Collection Activities
(TROR) is the federal government's primary means for collecting
data on the status of non-tax receivables (delinquent and
non-delinquent debt) owed to the United States.
"""
def __init__(self, session: FederalTreasurySession) -> None:
"""Initializes the `TreasuryReportsOnReceivables` object.
### Parameters
----
session : `TreasurySession`
An initialized session of the `TreasurySession`.
### Usage
----
>>> treasury_client = FederalTreasuryClient()
>>> reports_on_receivables_service = treasury_client.treasury_reports_on_receivables()
"""
# Set the session.
self.treasury_session: FederalTreasurySession = session
def __repr__(self) -> str:
"""String representation of the `FederalTreasuryClient.TreasuryReportsOnReceivables` object."""
# define the string representation
str_representation = '<FederalTreasuryClient.TreasuryReportsOnReceivables (active=True, connected=True)>'
return str_representation
def full_data(
self,
fields: List[str] = None,
sort: List[str] = None,
filters: List[str] = None,
page_number: int = 1,
page_size: int = 100
) -> Dict:
"""Queries the full data report for TROR.
### Overview
----
The Treasury Report on Receivables and Debt Collection Activities (TROR)
is the federal government's primary means for collecting data on the status
of non-tax receivables (delinquent and non-delinquent debt) owed to the
United States. This report provides summary data on the value of receivables
owed to the Federal government, the portion of those receivables that are
delinquent, and efforts to collect or write off delinquent debt. Receivables
are categorized as being either current or delinquent. Delinquent receivables
are also referred to as delinquent debt. Receivables are also categorized by
type of receivable: Administrative Receivables, Direct Loans, and Defaulted
Guaranteed Loans. Administrative Receivables are non-loan receivables, including
fines, payments, and overpayments. Direct Loans and Defaulted Guaranteed Loans are
federal loan receivables. Generally, Federal creditor agencies assess interest on
outstanding loan receivables. Federal creditor agencies are also generally required
to assess interest, penalties, and administrative costs when receivables become
delinquent. The rate of interest is generally governed by 31 U.S.C. Section 3717
and published by the Department of the Treasury. Collections are not always mutually
exclusive. The amount and count of collections are recorded for each tool or
technique that is used to collect funds.
### Parameters
----
fields : List[str] (optional, Default=None)
The fields parameter allows you to select which field(s) should be
included in the response. If desired fields are not specified, all
fields will be returned.
sort : List[str] (optional, Default=None)
The sort parameter allows a user to sort a field in ascending (least
to greatest) or descending (greatest to least) order. When no sort parameter
is specified, the default is to sort by the first column listed. Most API
endpoints are thus sorted by date in ascending order (historical to most
current).
filters : List[str] (optional, Default=None)
Filters are used to view a subset of the data based on specific
criteria. For example, you may want to find data that falls within
a certain date range, or only show records which contain a value
larger than a certain threshold. When no filters are provided,
the default response will return all fields and all data.
page_number : int (optional, Default=1)
The page number will set the index for the pagination, starting
at 1. This allows the user to paginate through the records
returned from an API request
page_size : int (optional, Default=100)
The page size will set the number of rows that are returned
on a request.
### Returns
----
Dict
A collection of `Records` resources.
### Usage
----
>>> treasury_client = FederalTreasuryClient()
>>> reports_on_receivables_service = treasury_client.treasury_reports_on_receivables()
>>> reports_on_receivables_service.full_data()
"""
if fields:
fields = ','.join(fields)
if filters:
filters = ','.join(filters)
if sort:
sort = ','.join(sort)
content = self.treasury_session.make_request(
method='get',
endpoint='/v2/debt/tror',
params={
'format': 'json',
'page[number]': page_number,
'page[size]': page_size,
'fields': fields,
'sort': sort,
'filters': filters
}
)
return content
def collected_and_outstanding_receivables(
self,
fields: List[str] = None,
sort: List[str] = None,
filters: List[str] = None,
page_number: int = 1,
page_size: int = 100
) -> Dict:
"""Queries Collected & Outstanding Receivables from TROR.
### Overview
----
Collected and Outstanding Receivables provides amounts owed to the federal government
by an individual, organization, or entity other than another federal agency during
the reporting period. The total outstanding receivable balance at the end of a fiscal
year is the net of receivables that remained unpaid from prior fiscal years and new
receivables recorded during that fiscal year, less collections, adjustments, and amounts
written off and closed out. Receivables are categorized as being either current or
delinquent. Delinquent receivables are also referred to as delinquent debt. Receivables
are also categorized by type of receivable: Administrative Receivables, Direct Loans,
and Defaulted Guaranteed Loans. Administrative Receivables are non-loan receivables,
including fines, payments, and overpayments. Direct Loans and Defaulted Guaranteed Loans
are federal loan receivables. Generally, federal creditor agencies assess interest on
outstanding loan receivables. Federal creditor agencies are also generally required to
assess interest, penalties, and administrative costs when receivables become delinquent.
The rate of interest is generally governed by 31 U.S.C. Section 3717 and published by the
Department of the Treasury. Collections are not always mutually exclusive. The amount and
count of collections are recorded for each tool or technique that is used to collect funds.
### Parameters
----
fields : List[str] (optional, Default=None)
The fields parameter allows you to select which field(s) should be
included in the response. If desired fields are not specified, all
fields will be returned.
sort : List[str] (optional, Default=None)
The sort parameter allows a user to sort a field in ascending (least
to greatest) or descending (greatest to least) order. When no sort parameter
is specified, the default is to sort by the first column listed. Most API
endpoints are thus sorted by date in ascending order (historical to most
current).
filters : List[str] (optional, Default=None)
Filters are used to view a subset of the data based on specific
criteria. For example, you may want to find data that falls within
a certain date range, or only show records which contain a value
larger than a certain threshold. When no filters are provided,
the default response will return all fields and all data.
page_number : int (optional, Default=1)
The page number will set the index for the pagination, starting
at 1. This allows the user to paginate through the records
returned from an API request
page_size : int (optional, Default=100)
The page size will set the number of rows that are returned
on a request.
### Returns
----
Dict
A collection of `Records` resources.
### Usage
----
>>> treasury_client = FederalTreasuryClient()
>>> reports_on_receivables_service = treasury_client.treasury_reports_on_receivables()
>>> reports_on_receivables_service.collected_and_outstanding_receivables()
"""
if fields:
fields = ','.join(fields)
if filters:
filters = ','.join(filters)
if sort:
sort = ','.join(sort)
content = self.treasury_session.make_request(
method='get',
endpoint='/v2/debt/tror/collected_outstanding_recv',
params={
'format': 'json',
'page[number]': page_number,
'page[size]': page_size,
'fields': fields,
'sort': sort,
'filters': filters
}
)
return content
def collections_delinquent_debt(
self,
fields: List[str] = None,
sort: List[str] = None,
filters: List[str] = None,
page_number: int = 1,
page_size: int = 100
) -> Dict:
"""Queries Collections on Delinquent Debt from TROR.
### Overview
----
Collections on Delinquent Debt provides amounts of delinquent debt collected during the
reporting period. Federal creditor agencies utilize a combination of debt collection tools.
Among these collection tools are administrative wage garnishment (AWG), use of private
collection agencies (PCAs), offset of Federal and State payments through the Treasury Offset
Program (TOP), use of Fiscal Service's Cross-Servicing Program (CS), and litigation. Before
using most collection tools, federal creditor agencies must first provide debtors with due
process. This includes providing notice and an opportunity to enter into a repayment agreement
based on the debtor's financial circumstances, dispute the debt, or object to the intended
collection action. Generally, federal creditor agencies are required to refer delinquent federal
non-tax debt to Fiscal Service for collection through its delinquent debt collection programs,
known as the Cross-Servicing Program and TOP. | |
f"{fillet_area=:.3f}, {fillet_perimeter=:.3f}")
return fillet_area, fillet_perimeter
# Fab_Fillet.get_geometries():
def get_geometries(self) -> Tuple[Fab_Geometry, ...]:
geometries: List[Fab_Geometry] = []
if self.Line:
geometries.append(self.Line)
if self.Arc:
geometries.append(self.Arc)
return tuple(geometries)
# Fab_Fillet._unit_tests():
@staticmethod
def _unit_tests(tracing: str = "") -> None:
"""Run Fab_Fillet unit tests."""
next_tracing: str = tracing + " " if tracing else ""
if tracing:
print(f"{tracing}=>Fab_Fillet._unit_tests()")
# Create 4 corners centered.
dx: float = Vector(20.0, 0.0, 0.0)
dy: float = Vector(0.0, 10.0, 0.0)
radius: float = 4.0
# Create the corner Vector's:
center: Vector = Vector(0.0, 0.0, 0.0)
ne_corner: Vector = Vector(center + dx + dy)
nw_corner: Vector = Vector(center - dx + dy)
sw_corner: Vector = Vector(center - dx - dy)
se_corner: Vector = Vector(center + dx - dy)
# Create the Fab_Fillet's:
ne_fillet: Fab_Fillet = Fab_Fillet(ne_corner, radius)
nw_fillet: Fab_Fillet = Fab_Fillet(nw_corner, radius)
sw_fillet: Fab_Fillet = Fab_Fillet(sw_corner, radius)
se_fillet: Fab_Fillet = Fab_Fillet(se_corner, radius)
# Provide before/after Fab_Fillets:
ne_fillet.Before = se_fillet
nw_fillet.Before = ne_fillet
sw_fillet.Before = nw_fillet
se_fillet.Before = sw_fillet
ne_fillet.After = nw_fillet
nw_fillet.After = sw_fillet
sw_fillet.After = se_fillet
se_fillet.After = ne_fillet
if False and tracing: # pragma: no unit cover
ne_fillet.compute_arc(tracing=f"{next_tracing}NE:")
nw_fillet.compute_arc(tracing=f"{next_tracing}NW:")
sw_fillet.compute_arc(tracing=f"{next_tracing}SW:")
se_fillet.compute_arc(tracing=f"{next_tracing}SE:")
else: # pragma: no unit cover
ne_fillet.compute_arc()
nw_fillet.compute_arc()
sw_fillet.compute_arc()
se_fillet.compute_arc()
if tracing:
print(f"{tracing}<=Fab_Fillet._unit_tests()")
# FabGeometry:
@dataclass(frozen=True)
class FabGeometry(object):
"""FabGeometry: The base class for FabPolygon and FabCircle."""
# FabGeometry.__post_init__():
def __post_init__(self) -> None:
"""Finish initializing a FaabGeometry."""
pass
# FabGeometry.Box():
@property
def Box(self) -> FabBox:
"""Return a FabBox that encloses the FabGeometry."""
raise NotImplementedError(f"{type(self)}.Box() is not implemented")
# FabGeometry.get_hash():
def get_hash(self) -> Tuple[Any, ...]:
"""Return FabGeometry hash."""
raise NotImplementedError(f"{type(self)}.get_hash() is not implemented")
# FabGeometry.produce():
def produce(self, geometry_context: Fab_GeometryContext, prefix: str,
index: int, tracing: str = "") -> Tuple[Any, ...]:
"""Produce the necessary FreeCAD objects for the FabGeometry."""
raise NotImplementedError(f"{type(self)}.produce() is not implemented")
# FabGeometry.project_to_plane():
def project_to_plane(self, plane: Fab_Plane) -> "FabGeometry":
"""Return a new FabGeometry projected onto a plane."""
raise NotImplementedError(f"{type(self)}.project_to_plane is not implemented")
# FabGeometry.get_geometry_info():
def get_geometry_info(
self, plane: Fab_Plane, tracing: str = "") -> Tuple[float, float, float, float]:
"""Return information about FabGeometry.
Arguments:
* *plane* (Fab_Plane): The plane to project the FabGeometry onto.
Returns:
* (float): The geometry area in square millimeters.
* (float): The perimeter length in millimeters.
* (float):
The minimum internal radius in millimeters. -1.0 means there is no internal radius.
* (float): The minimum external radius in millimeters. 0 means that all corners are "sharp".
"""
raise NotImplementedError(f"{type(self)}.get_geometry_info is not implemented")
# FabCircle:
@dataclass(frozen=True)
class FabCircle(FabGeometry):
"""FabCircle: A circle with a center and a radius.
This is actually a sphere of at a specified location and diameter. It gets cut into
circle later on.
Attributes:
* *Center* (Vector): The circle center.
* *Normal* (Vector): The normal to circle plane.
* *Diameter* (float): The diameter in millimeters.
"""
Center: Vector
Normal: Vector
Diameter: float
# FabCircle.__post_init__():
def __post_init__(self) -> None:
"""Finish initializing a FabCircle."""
super().__post_init__()
if self.Diameter <= 0.0:
raise ValueError(f"Diameter ({self.Diameter}) must be positive.")
# (Why __setattr__?)[https://stackoverflow.com/questions/53756788]
copy: Vector = Vector()
object.__setattr__(self, "Center", self.Center + copy) # Makes a copy.
object.__setattr__(self, "Normal", self.Normal + copy) # Makes a copy.
# FabCircle.get_hash():
def get_hash(self) -> Tuple[Any, ...]:
"""Feturn FabCircle hash."""
center: Vector = self.Center
normal: Vector = self.Normal
hashes: Tuple[Union[int, str, Tuple[Any, ...]], ...] = (
"FabCircle.get_hash",
f"{center.x:.6f}",
f"{center.y:.6f}",
f"{center.z:.6f}",
f"{normal.x:.6f}",
f"{normal.y:.6f}",
f"{normal.z:.6f}",
f"{self.Diameter:.6f}",
)
return hashes
# FabCircle.get_geometry_info():
def get_geometry_info(
self, plane: Fab_Plane, tracing: str = "") -> Tuple[float, float, float, float]:
"""Return information about FabGeometry.
Arguments:
* *plane* (Fab_Plane): The plane to project FabGeometry onto.
Returns:
* (float): The circle area in square millimeters.
* (float): The perimeter length in millimeters.
* (float): -1 since there are no internal radius corners for a circle.
* (float): The circle radius in millimeters.
"""
if tracing:
print("{tracing}=>FabCircle.get_geometry_info(*))")
pi: float = math.pi
radius: float = self.Diameter / 2.0
area: float = pi * radius * radius
perimeter: float = 2.0 * pi * radius
minimum_internal_radius: float = -1.0
minimum_external_radius: float = radius
if tracing:
print(f"{tracing}=>FabCircle.get_geometry_info(*))=>"
f"({area}, {perimeter}, {minimum_internal_radius}, {minimum_external_radius})")
return area, perimeter, minimum_internal_radius, minimum_external_radius
# FabCircle.Box():
@property
def Box(self) -> FabBox:
"""Return a FabBox that encloses the FabGeometry."""
# A perpendicular to the normal is needed:
# https://math.stackexchange.com/questions/137362/
# how-to-find-perpendicular-vector-to-another-vector
# The response from <NAME> is used. There is probably an alternate solution based
# on quaternions that is better, but the code below should be more than adequate.
EPSILON = 1.0e-8
copy: Vector = Vector()
normal: Vector = self.Normal / self.Normal.Length
nx: float = normal.x
ny: float = normal.y
nz: float = normal.z
xy_close: bool = abs(nx - ny) < EPSILON
perpendicular1: Vector = (
Vector(-nz, 0, nx) if xy_close else Vector(-ny, nx, 0))
perpendicular1 = perpendicular1 / perpendicular1.Length
perpendicular2: Vector = (normal + copy).cross(perpendicular1 + copy)
center: Vector = self.Center
radius: float = self.Diameter / 2.0
corner1: Vector = center + radius * perpendicular1
corner2: Vector = center + radius * perpendicular2
corner3: Vector = center - radius * perpendicular1
corner4: Vector = center - radius * perpendicular1
box: FabBox = FabBox()
box.enclose((corner1, corner2, corner3, corner4))
return box
# FabCircle.project_to_plane():
def project_to_plane(self, plane: Fab_Plane, tracing: str = "") -> "FabCircle":
"""Return a new FabCircle projected onto a plane.
Arguments:
* *plane* (Fab_Plane): Plane to project to.
Returns:
* (FabCircle): The newly projected FabCicle.
"""
if tracing:
print(f"{tracing}=>FabCircle.project_to_plane({plane})")
center: Vector = self.Center
new_center: Vector = plane.point_project(center)
new_circle: "FabCircle" = FabCircle(new_center, plane.Normal, self.Diameter)
if tracing:
print(f"{tracing}<=FabCircle.project_to_plane({plane}) => {new_circle}")
return new_circle
# FabCircle.produce():
def produce(self, geometry_context: Fab_GeometryContext, prefix: str,
index: int, tracing: str = "") -> Tuple[Any, ...]:
"""Produce the FreeCAD objects needed for FabPolygon."""
next_tracing: str = tracing + " " if tracing else ""
if tracing:
print(f"{tracing}=>FabCircle.produce()")
geometries: Tuple[Fab_Geometry, ...] = self.get_geometries()
geometry: Fab_Geometry
part_geometries: List[Any] = []
for index, geometry in enumerate(geometries):
part_geometry: Any = geometry.produce(
geometry_context, prefix, index, tracing=next_tracing)
# assert isinstance(part_geometry, Any)
part_geometries.append(part_geometry)
if tracing:
print(f"{tracing}<=FabCircle.produce()")
return tuple(part_geometries)
# FabCircle.get_geometries():
def get_geometries(self) -> Tuple[Fab_Geometry, ...]:
"""Return the FabPolygon lines and arcs."""
return (Fab_Circle(self.Center, self.Diameter),)
@staticmethod
# FabCircle._unit_tests():
def _unit_tests(tracing: str = "") -> None:
"""Run FabCircle unit tests."""
if tracing:
print(f"{tracing}=>FabCircle._unit_tests()")
normal: Vector = Vector(0, 0, 1)
center: Vector = Vector(1, 2, 3)
try:
FabCircle(center, normal, 0.0)
assert False
except ValueError as value_error:
assert str(value_error) == "Diameter (0.0) must be positive.", value_error
try:
FabCircle(center, normal, -1.0)
assert False
except ValueError as value_error:
assert str(value_error) == "Diameter (-1.0) must be positive.", value_error
circle: FabCircle = FabCircle(center, normal, 1.0)
box: FabBox = circle.Box
assert box.TNE == Vector(1.5, 2.0, 3.0)
assert box.BSW == Vector(0.5, 1.5, 3.0)
if tracing:
print(f"{tracing}<=FabCircle._unit_tests()")
# FabPolygon:
@dataclass(frozen=True)
class FabPolygon(FabGeometry):
"""FabPolygon: An immutable polygon with rounded corners.
A FabPolygon is represented as a sequence of corners (i.e. a Vector) where each corner can
optionally be filleted with a radius. In order to make it easier to use, a corner can be
specified as simple Vector or as a tuple that specifies a Vector and a radius. The radius
is in millimeters and can be provided as either a Python int or float. When an explicit
fillet radius is not specified, higher levels in the software stack will typically substitute
x in a deburr radius for external corners and an internal tool radius for internal corners.
FabPolygon's are frozen and can not be modified after creation. Since Vector's are mutable,
a copy of each vector stored inside the FabPolygon.
Attributes:
* *Corners* (Tuple[Union[Vector, Tuple[Vector, Union[int, float]]], ...]):
See description below for more on corners.
Constructor:
* FabPolygon(Corners):
Example:
```
polygon: FabPolyon = FabPolygon((
Vector(-10, -10, 0), # Lower left (no radius)
Vector(10, -10, 0), # Lower right (no radius)
(Vector(10, 10, 0), 5), # Upper right (5mm radius)
(Vector(-0, 10, 0), 5.5), # Upper right (5.5mm radius)
), "Name")
```
"""
Corners: Tuple[Union[Vector, Tuple[Vector, Union[int, float]]], ...]
Fab_Fillets: Tuple[Fab_Fillet, ...] = field(init=False, repr=False) # TODO make Private
EPSILON = 1.0e-8
# FabPolygon.__post_init__():
def __post_init__(self) -> None:
"""Verify that the corners passed in are correct."""
tracing: str = "" # Edit to enable tracing.
| |
32 bits must be in the range\n'
'\t \t * -2^31 to 2^31 - 1. See\n'
'\t \t * http://www.example.com/integers/this-website-is-too-'
'long-for-one-line\n'
'\t \t * for more details.\n'
'\t \t * \n'
'\t \t* Integers are not the same thing as natural numbers, as '
'the latter must be nonnegative.\n'
' * \t \tNor are they the same as rational numbers. Rational\n'
' * \t \tnumbers are repeating or terminating decimals, or\n'
' * \t \tequivalently, fractions.\n'
'\t \t */\n'
'\t \t int returnSomething() {\n'
'\t \t return 73 * 184 + ((1704 - 1600) * (305 - 16) / ((9 + '
'16) % 8));\n'
'\t \t }\n')
actual_text = view.substr(
Region(
comment_start_point, comment_start_point + len(expected_text)))
self.assertEqual(actual_text, expected_text)
def test_beginning_of_document(self):
"""Test word wrapping at the beginning of the document.
Test word wrapping of a section of line comments that starts at
the beginning of the document.
"""
view = self._view
self._set_up_cpp()
view.settings().set('rulers', [60])
comment_start_point = 0
self._insert(
comment_start_point,
'// Lorem ipsum dolor sit amet, iudicabit interpretaris ius eu, '
'et sit iudico aperiri scaevola. Ad solum eleifend sea, ex ius '
'graeci alienum accusamus, diam mandamus expetenda quo ei.\n')
expected_text = (
'// Lorem ipsum dolor sit amet, iudicabit interpretaris ius\n'
'// eu, et sit iudico aperiri scaevola. Ad solum eleifend\n'
'// sea, ex ius graeci alienum accusamus, diam mandamus\n'
'// expetenda quo ei.\n')
actual_text = view.substr(Region(0, view.size()))
self.assertEqual(actual_text, expected_text)
point = view.find(r'aperiri scaevola\.', 0).end()
self._insert(
point,
' Vim case eros choro et, te deserunt iudicabit assentior eum, id '
'his assum nobis primis.')
expected_text = (
'// Lorem ipsum dolor sit amet, iudicabit interpretaris ius\n'
'// eu, et sit iudico aperiri scaevola. Vim case eros choro\n'
'// et, te deserunt iudicabit assentior eum, id his assum\n'
'// nobis primis. Ad solum eleifend sea, ex ius graeci\n'
'// alienum accusamus, diam mandamus expetenda quo ei.\n')
actual_text = view.substr(Region(0, view.size()))
self.assertEqual(actual_text, expected_text)
point = view.find('Lorem ipsum', 0).begin()
self._delete(point, 92)
expected_text = (
'// Vim case eros choro et, te deserunt iudicabit assentior\n'
'// eum, id his assum nobis primis. Ad solum eleifend sea, ex\n'
'// ius graeci alienum accusamus, diam mandamus expetenda quo\n'
'// ei.\n')
actual_text = view.substr(Region(0, view.size()))
self.assertEqual(actual_text, expected_text)
def test_end_of_document(self):
"""Test word wrapping at the end of the document.
Test word wrapping of a C++ block comment that isn't closed with
a */, and so extends to the end of the document.
"""
view = self._view
self._set_up_cpp()
view.settings().set('rulers', [60])
self._append(
'#include <iostream>\n'
'\n'
'using namespace std;\n'
'\n')
comment_start_point = view.size()
self._insert(
comment_start_point,
'/**\n'
' * Lorem ipsum dolor sit amet, iudicabit interpretaris ius eu, '
'et sit iudico aperiri scaevola. Ad solum eleifend sea, ex ius '
'graeci alienum accusamus, diam mandamus expetenda quo ei.')
expected_text = (
'/**\n'
' * Lorem ipsum dolor sit amet, iudicabit interpretaris ius\n'
' * eu, et sit iudico aperiri scaevola. Ad solum eleifend\n'
' * sea, ex ius graeci alienum accusamus, diam mandamus\n'
' * expetenda quo ei.')
actual_text = view.substr(Region(comment_start_point, view.size()))
self.assertEqual(actual_text, expected_text)
point = view.find(r'aperiri scaevola\.', 0).end()
self._insert(
point,
' Vim case eros choro et, te deserunt iudicabit assentior eum, id '
'his assum nobis primis.')
expected_text = (
'/**\n'
' * Lorem ipsum dolor sit amet, iudicabit interpretaris ius\n'
' * eu, et sit iudico aperiri scaevola. Vim case eros choro\n'
' * et, te deserunt iudicabit assentior eum, id his assum\n'
' * nobis primis. Ad solum eleifend sea, ex ius graeci\n'
' * alienum accusamus, diam mandamus expetenda quo ei.')
actual_text = view.substr(Region(comment_start_point, view.size()))
self.assertEqual(actual_text, expected_text)
point = view.find('Lorem ipsum', 0).begin()
self._delete(point, 92)
expected_text = (
'/**\n'
' * Vim case eros choro et, te deserunt iudicabit assentior\n'
' * eum, id his assum nobis primis. Ad solum eleifend sea, ex\n'
' * ius graeci alienum accusamus, diam mandamus expetenda quo\n'
' * ei.')
actual_text = view.substr(Region(comment_start_point, view.size()))
self.assertEqual(actual_text, expected_text)
def test_bulk_insert_delete(self):
"""Test insertion and deletion of multiple characters at a time.
This is meant to simulate operations such as pasting text and
selecting multiple characters and pressing backspace.
"""
view = self._view
self._set_up_cpp()
view.settings().set('rulers', [60])
self._append(
'#include <iostream>\n'
'\n'
'using namespace std;\n'
'\n'
'/**\n'
' * The "fibonacci" function returns the nth number in the\n'
' * Fibonacci sequence. The function assumes that n >= 0.\n'
' */\n'
'int fibonacci(int n) {\n'
' // Base case\n'
' if (n == 0) {\n'
' return 0;\n'
' }\n'
'\n'
' // Iterative implementation of "fibonacci"\n'
' int cur = 1;\n'
' int prev = 0;\n'
' for (int i = 1; i < n; i++) {\n'
' int next = cur + prev;\n'
' prev = cur;\n'
' cur = next;\n'
' }\n'
' return cur;\n'
'}\n'
'\n'
'int main() {\n'
' cout << "The 8th Fibonacci number is " <<\n'
' fibonacci(8) << "\\n";\n'
' return 0;\n'
'}\n')
comment_start_point = view.find(r'/\*\*', 0).begin()
point = view.find('The function assumes', 0).begin() - 1
self._set_selection_point(point)
view.run_command(
'insert', {
'characters':
' The Fibonacci sequence begins with 0 as the 0th number '
'and 1 as the first number. Every subsequent number is '
'equal to the sum of the two previous numbers.',
})
expected_text = (
'/**\n'
' * The "fibonacci" function returns the nth number in the\n'
' * Fibonacci sequence. The Fibonacci sequence begins with 0\n'
' * as the 0th number and 1 as the first number. Every\n'
' * subsequent number is equal to the sum of the two previous\n'
' * numbers. The function assumes that n >= 0.\n'
' */\n')
actual_text = view.substr(
Region(
comment_start_point, comment_start_point + len(expected_text)))
self.assertEqual(actual_text, expected_text)
start_point = view.find(r'1 as the first number\.', 0).end() + 1
end_point = view.find('The function assumes', 0).begin()
self._set_selection_region(Region(start_point, end_point))
view.run_command('left_delete')
expected_text = (
'/**\n'
' * The "fibonacci" function returns the nth number in the\n'
' * Fibonacci sequence. The Fibonacci sequence begins with 0\n'
' * as the 0th number and 1 as the first number. The function\n'
' * assumes that n >= 0.\n'
' */\n')
actual_text = view.substr(
Region(
comment_start_point, comment_start_point + len(expected_text)))
self.assertEqual(actual_text, expected_text)
start_point = view.find('begins with 0', 0).begin()
end_point = view.find('1 as the first number', 0).end()
self._set_selection_region(Region(start_point, end_point))
view.run_command(
'insert',
{'characters': 'is the sequence 1, 1, 2, 3, 5, 8, 13, etc'})
expected_text = (
'/**\n'
' * The "fibonacci" function returns the nth number in the\n'
' * Fibonacci sequence. The Fibonacci sequence is the\n'
' * sequence 1, 1, 2, 3, 5, 8, 13, etc. The function assumes\n'
' * that n >= 0.\n'
' */\n')
actual_text = view.substr(
Region(
comment_start_point, comment_start_point + len(expected_text)))
self.assertEqual(actual_text, expected_text)
def test_wrap_entire_document(self):
"""Test a "wrap_as_you_type_sections" setting that wraps everything.
Test a "wrap_as_you_type_sections" setting that performs word
wrapping on the entire document.
"""
view = self._view
view.set_syntax_file('Packages/Text/Plain text.tmLanguage')
settings = view.settings()
settings.set(
'wrap_as_you_type_sections', [{'selector': 'source | text'}])
settings.set('rulers', [60])
self._insert(
0,
'Lorem ipsum dolor sit amet, iudicabit interpretaris ius eu, et '
'sit iudico aperiri scaevola. Ad solum eleifend sea, ex ius '
'graeci alienum accusamus, diam mandamus expetenda quo ei.\n')
expected_text = (
'Lorem ipsum dolor sit amet, iudicabit interpretaris ius eu,\n'
'et sit iudico aperiri scaevola. Ad solum eleifend sea, ex\n'
'ius graeci alienum accusamus, diam mandamus expetenda quo\n'
'ei.\n')
actual_text = view.substr(Region(0, view.size()))
self.assertEqual(actual_text, expected_text)
point = view.find(r'aperiri scaevola\.', 0).end()
self._insert(
point,
' Vim case eros choro et, te deserunt iudicabit assentior eum, id '
'his assum nobis primis.')
expected_text = (
'Lorem ipsum dolor sit amet, iudicabit interpretaris ius eu,\n'
'et sit iudico aperiri scaevola. Vim case eros choro et, te\n'
'deserunt iudicabit assentior eum, id his assum nobis primis.\n'
'Ad solum eleifend sea, ex ius graeci alienum accusamus, diam\n'
'mandamus expetenda quo ei.\n')
actual_text = view.substr(Region(0, view.size()))
self.assertEqual(actual_text, expected_text)
point = view.find('Lorem ipsum', 0).begin()
self._delete(point, 92)
expected_text = (
'Vim case eros choro et, te deserunt iudicabit | |
<reponame>YSabarad/monopyly
__author__ = 'Cedric'
# each information will be used to sort the properties for the given policy
import random
from monopyly import *
from .Memory import *
from .Policy import *
class VSSmartBuyerInvertGaussFocusAI(PlayerAIBase):
'''
'''
def __init__(self):
'''
ctor
'''
# memory information
self.needed_money = 0
self.auction_memory = AuctionMemory()
self.deal_memory = DealMemory()
self.property_policy = AcquiringPolicy(self)
self.house_policy = HousePolicy_v2(self,HousePolicy_v2.HousePolicy.ONE_COMPLETE_SET, HousePolicy_v2.RepartitionPolicy.SAME_SIZE)
self.selling_policy = SellingPolicy(self,self.deal_memory)
self.chance_policy = ChancePolicy(0.1)
self.jail_policy = JailPolicy(0.8, 5, 22)
self.money_to_be_taken = 0
self.properties_information = {
#brown
Square.Name.OLD_KENT_ROAD: [0, 1.25, 1.25, 1.5, 50, 9, 0.75],
Square.Name.WHITECHAPEL_ROAD: [0, 1.25, 1.25, 1.5, 0, 10, 0.75],
#light blue
Square.Name.THE_ANGEL_ISLINGTON: [100, 1.2, 1.2, 1.35, 50, 6, 0.75],
Square.Name.EUSTON_ROAD: [100, 1.2, 1.2, 1.35, 50, 6, 0.75],
Square.Name.PENTONVILLE_ROAD: [100, 1.2, 1.2, 1.35, 0, 7, 0.75],
#rose
Square.Name.PALL_MALL: [200, 1.1, 1.1, 1.15, 150, 4, 0.75],
Square.Name.WHITEHALL: [200, 1.1, 1.1, 1.15, 150, 4, 0.75],
Square.Name.NORTHUMBERLAND_AVENUE: [200, 1.1, 1.1, 1.15, 150, 5, 0.75],
#orange
Square.Name.BOW_STREET: [300, 1, 1, 1, 250, 3, 0.75],
Square.Name.MARLBOROUGH_STREET: [300, 1, 1, 1, 250, 3, 0.75],
Square.Name.VINE_STREET: [300, 1, 1, 1, 250, 4, 0.75],
#red
Square.Name.STRAND: [300, 1, 1, 1, 250, 3, 0.75],
Square.Name.FLEET_STREET: [300, 1, 1, 1, 250, 3, 0.75],
Square.Name.TRAFALGAR_SQUARE: [300, 1, 1, 1, 250, 4, 0.75],
#yellow
Square.Name.LEICESTER_SQUARE: [200, 1.1, 1.1, 1.15, 150, 4, 0.75],
Square.Name.COVENTRY_STREET: [200, 1.1, 1.1, 1.15, 150, 4, 0.75],
Square.Name.PICCADILLY: [200, 1.1, 1.1, 1.15, 150, 5, 0.75],
#green
Square.Name.REGENT_STREET: [100, 1.2, 1.2, 1.35, 50, 6, 0.75],
Square.Name.OXFORD_STREET: [100, 1.2, 1.2, 1.35, 50, 6, 0.75],
Square.Name.BOND_STREET: [100, 1.2, 1.2, 1.35, 0, 7, 0.75],
#dark blue
Square.Name.PARK_LANE: [0, 1.25, 1.25, 1.5, 50, 9, 0.75],
Square.Name.MAYFAIR: [0, 1.25, 1.25, 1.5, 0, 10, 0.75],
#station
Square.Name.KINGS_CROSS_STATION: [0, 1.2, 1.2, 1.35, 0, -1, 0],
Square.Name.MARYLEBONE_STATION: [0, 1.2, 1.2, 1.35, 0, -1, 0],
Square.Name.FENCHURCH_STREET_STATION: [0, 1.2, 1.2, 1.35, 0, -1, 0],
Square.Name.LIVERPOOL_STREET_STATION: [0, 1.2, 1.2, 1.35, 0, -1, 0],
#company
Square.Name.ELECTRIC_COMPANY: [150, 1, 1, 1.15, 0, -1, 0.5],
Square.Name.WATER_WORKS: [150, 1, 1, 1.15, 0, -1, 0.5],
}
def get_name(self):
return 'VSSmartBuyerInvertGaussFocusAI'
def start_of_game(self):
'''
Called at the start of the game.
No response is required.
'''
self.needed_money = 0
self.money_to_be_taken = 0
def start_of_turn(self, game_state, player):
'''
Called when an AI's turn starts. All AIs receive this notification.
No response is required.
'''
self.needed_money = 0
self.money_to_be_taken = 0
def landed_on_unowned_property(self, game_state, player, property):
'''
Called when the AI lands on an unowned property. Only the active
player receives this notification.
Must return either the BUY or DO_NOT_BUY action from the
PlayerAIBase.Action enum.
The default behaviour is DO_NOT_BUY.
'''
return self.property_policy.acquire_through_landing(game_state,player,property)
def money_given(self, player, amount):
'''
Called when money has been given to the player.
No response is required.
'''
pass
def money_will_be_taken(self, player, amount):
'''
Called shortly before money will be taken from the player.
Before the money is taken, there will be an opportunity to
make deals and/or mortgage properties. (This will be done via
subsequent callbacks.)
No response is required.
'''
self.money_to_be_taken = amount
if amount > player.state.cash:
self.needed_money = amount - player.state.cash
pass
def money_taken(self, player, amount):
'''
Called when money has been taken from the player.
No response is required.
'''
pass
def players_birthday(self):
'''
Called when a player picks up the 'It is your birthday...'
Community Chest card.
You should return "Happy Birthday!" (with this casing and the
exclamation mark). If not, you will have to pay £100 instead of
the standard £10.
'''
return "Happy Birthday!"
def pay_ten_pounds_or_take_a_chance(self, game_state, player):
'''
Called when the player picks up the "Pay a £10 fine or take a Chance" card.
Return either:
PlayerAIBase.Action.PAY_TEN_POUND_FINE
or
PlayerAIBase.Action.TAKE_A_CHANCE
'''
return self.chance_policy.compute()
def property_offered_for_auction(self, game_state, player, property):
'''
Called when a property is put up for auction.
Properties are auctioned when a player lands on an unowned square but does
not want to buy it. All players take part in the auction, including the
player who landed on the square.
The property will be sold to the highest bidder using the eBay rule,
ie, for £1 more than the second-highest bid.
Return the amount you bid. To put in a bid this must be a positive integer.
Zero means that you are not bidding (it does not mean that you are bidding
zero).
The default behaviour is not to bid.
'''
return self.property_policy.acquire_through_auction(game_state,player,property)
def auction_result(self, status, property, player, amount_paid):
'''
Called with the result of an auction. All players receive
this notification.
status is either AUCTION_SUCCEEDED or AUCTION_FAILED.
If the auction succeeded, the property, the player who won
the auction and the amount they paid are passed to the AI.
If the auction failed, the player will be None and the
amount paid will be 0.
No response is required.
'''
if status == PlayerAIBase.Action.AUCTION_SUCCEEDED:
self.auction_memory.add_auction(property,player,amount_paid)
pass
def build_houses(self, game_state, player):
'''
Called near the start of the player's turn to give the option of building houses.
Return a list of tuples indicating which properties you want to build houses
on and how many houses to build on each. For example:
[(park_lane, 3), (mayfair, 4)]
The properties should be Property objects.
Return an empty list if you do not want to build.
Notes:
- You must own a whole set of unmortgaged properties before you can
build houses on it.
- You can build on multiple sets in one turn. Just specify all the streets
and houses you want to build.
- Build five houses on a property to have a "hotel".
- You specify the _additional_ houses you will be building, not the
total after building. For example, if Park Lane already has 3 houses
and you specify (park_lane, 2) you will end up with 5
houses (ie, a hotel).
- Sets must end up with 'balanced' housing. No square in a set can
have more than one more house than any other. If you request an
unbalanced build, the whole transaction will be rolled back, even
if it includes balanced building on other sets as well.
- If you do not have (or cannot raise) enough money to build all the
houses specified, the whole transaction will be rolled back. Between
this function call and money being taken, you will have an opportunity
to mortgage properties or make deals.
The default behaviour is not to build.
'''
return self.house_policy.compute(game_state, player)
def sell_houses(self, game_state, player):
'''
Gives the player the option to sell properties.
This is called when any debt, fine or rent has to be paid. It is
called just before mortgage_properties (below).
Notes:
- You cannot mortgage properties with houses on them, so if you
plan to mortgage, make sure you sell all the houses first.
- For each house sold you receive half the price that they were
bought for.
- Houses on a set must end up 'balanced', ie no property can have
more than one more house than any other property in the set.
Return a list of tuples of the streets and number of houses you
want to sell. For example:
[(old_kent_road, 1), (bow_street, 1)]
The streets should be Property objects.
The default is not to sell any houses.
'''
if self.needed_money > 0:
return self.selling_policy.computeHouse(game_state,player)
return []
def mortgage_properties(self, game_state, player):
'''
Gives the player an option to mortgage properties.
This is called before any debt is paid (house building, rent,
tax, fines from cards etc).
Notes:
- You receive half the face value of each property mortgaged.
- You cannot mortgage properties with houses on them.
(The AI will have been given the option to sell houses before this
function is called.)
Return a list of properties to mortgage, for example:
[bow_street, liverpool_street_station]
The properties should be Property objects.
Return an empty list if you do not want to mortgage anything.
The default behaviour is not to mortgage anything.
'''
if self.needed_money > 0:
return self.selling_policy.computeMortgage(game_state,player)
return []
def unmortgage_properties(self, game_state, player):
'''
Called near the start of the player's turn | |
#!/usr/bin/python
# Copyright (c) 2006-2013 Regents of the University of Minnesota.
# For licensing terms, see the file LICENSE.
# Usage:
#
# $ ./bike_facilities_populate.py --help
#
# Also:
#
# $ ./bike_facilities_populate.py |& tee 2013.05.11.bike_facil_pop.txt
#
# BUG nnnn: Populate new /byway/cycle_route from byways' tags and attrs.
# (I.e., this script answers yet-to-be-filed Bug nnnn.)
# *** stats: created 18904 new links total
# *** stats: created 16880 new links for attr: Bicycle Facility
# *** stats: created 2024 new links for attr: Controlled Access
# *** cnt: 14764 / facil: Bike Trail
# *** cnt: 2116 / facil: Bike Lane
#FIXME: shoulder count
#
# May-12 21:17:00 INFO script_base # Script completed in 4.98 mins.
script_name = ('Create and Populate New "Bike Facility" Attribute')
script_version = '1.0'
__version__ = script_version
__author__ = 'Cyclopath <<EMAIL>>'
__date__ = '2013-05-11'
# ***
# SYNC_ME: Search: Scripts: Load pyserver.
import os
import sys
sys.path.insert(0, os.path.abspath('%s/../util'
% (os.path.abspath(os.curdir),)))
import pyserver_glue
import time
import conf
import g
import logging
from util_ import logging2
from util_.console import Console
log_level = logging.DEBUG
#log_level = logging2.VERBOSE2
#log_level = logging2.VERBOSE4
#log_level = logging2.VERBOSE
conf.init_logging(True, True, Console.getTerminalSize()[0]-1, log_level)
log = g.log.getLogger('facils_popl')
# ***
import copy
import psycopg2
import time
import traceback
from grax.access_level import Access_Level
from grax.access_scope import Access_Scope
from grax.access_style import Access_Style
from grax.grac_manager import Grac_Manager
from grax.item_manager import Item_Manager
from grax.user import User
from gwis.query_overlord import Query_Overlord
from item import item_base
from item import item_user_access
from item import item_versioned
from item import link_value
from item.attc import attribute
from item.feat import branch
from item.feat import byway
from item.grac import group
from item.link import link_attribute
from item.util import revision
from item.util.item_query_builder import Item_Query_Builder
from item.util.item_type import Item_Type
from item.util.watcher_frequency import Watcher_Frequency
from util_ import db_glue
from util_ import geometry
from util_ import gml
from util_ import misc
from util_.log_progger import Debug_Progress_Logger
from util_.script_args import Ccp_Script_Args
from util_.script_base import Ccp_Script_Base
# *** Debug switches
debug_prog_log = Debug_Progress_Logger()
debug_prog_log.debug_break_loops = False
#debug_prog_log.debug_break_loops = True
#debug_prog_log.debug_break_loop_cnt = 3
##debug_prog_log.debug_break_loop_cnt = 10
debug_skip_commit = False
#debug_skip_commit = True
# This is shorthand for if one of the above is set.
debugging_enabled = ( False
or debug_prog_log.debug_break_loops
or debug_skip_commit
)
if debugging_enabled:
log.warning('****************************************')
log.warning('* *')
log.warning('* WARNING: debugging_enabled *')
log.warning('* *')
log.warning('****************************************')
# ***
# 2013.05.11: [lb] doesn't quite remember when he sussed these tags out, so
# there might be some new ones we miss here... but probably not that many of
# them, it's not like people spend a lot of time tagging.
class Tag_Presumption(object):
# 2013.05.12: On Mpls-St. Paul, applied to 2024 / 156378 line segments.
tag_prohibited = (
'prohibited',
'closed',
'biking prohibited',
'closed - permanently',
)
# 2013.05.12: tag_bike_path + tag_bike_lane + tag_bike_shoulder:
# On Mpls-St. Paul, applied to ??? / 156378 line segments.
# 2013.05.12: trail: applied to 14764 / 156378 line segments.
tag_bike_path = (
'bikepath',
'bike path',
'combined path',
#? 'path',
#? 'paved path',
#? 'wide sidewalk, bike path',
)
# 2013.05.12: blane: applied to 2116 / 156378 line segments.
tag_bike_lane = (
'bike lane',
'bikelane',
'bike lane one side',
'bike lane on left',
'bike lanes',
)
# FIXME:
# 2013.05.13: shldr: applied to ??? / 156378 line segments.
# FIXME: Sync with shoulder_dis
tag_bike_shoulder = (
'bike shoulder',
'busy, good shoulder',
'good shoulder',
'great! recently repaved, wide shoulders',
'nice shoulder',
'shoulder',
'shoulder lane',
'striped shoulder',
'striped shoulder on bridge',
'wide shoulder',
'wide shoulder path',
'wide shoulder, safe',
)
# FIXME: Wire this? ...
# Make a boolean for unpaved??
# And another boolean attribute for caution?
# What about a temporal attribute for "xx (level?) of service"?
tag_unpaved = (
'dirt path',
'Dirt path',
'unpaved',
)
tag_bike_boulevard = (
'bike boulevard',
'bike boulevard marked with signage',
# FIXME: Is bike route same as bike boulevard?:
'bike route',
),
# *** Cli arg. parser
class ArgParser_Script(Ccp_Script_Args):
#
def __init__(self):
Ccp_Script_Args.__init__(self, script_name, script_version)
#
def prepare(self):
Ccp_Script_Args.prepare(self)
#
def verify_handler(self):
ok = Ccp_Script_Args.verify_handler(self)
return ok
# *** Bike_Facilities_Populate
class Bike_Facilities_Populate(Ccp_Script_Base):
# *** Constructor
def __init__(self):
Ccp_Script_Base.__init__(self, ArgParser_Script)
#
self.attr_cycle_facil = None
# Piggy-back! So we don't need another script, attaching,
# for controlled-access roadways (well, highways and freeways):
self.attr_no_access = None
# 2013.06.14: [lb] made the cautionary facils their own attribute. But we
# don't need those for this script, since CcpV1 has no cautionaries.
# Nope: self.attr_cautionary = None
#
self.stats = dict()
self.stats['cnt_attrs_all'] = 0
self.stats['cnt_attrs_cyle_facil'] = 0
self.stats['cnt_attrs_no_access'] = 0
self.stats['cnt_facils_kvals'] = {}
# ***
#
def go_main(self):
# Skipping: Ccp_Script_Base.go_main(self)
do_commit = False
try:
log.debug('go_main: getting exclusive revision lock...')
revision.Revision.revision_lock_dance(
self.qb.db, caller='bike_facility_populate__go_main')
log.debug('go_main: database is locked.')
# MAYBE: There seems to be an awful lot of boilerplate code here.
self.qb.grac_mgr = Grac_Manager()
self.qb.grac_mgr.prepare_mgr('user', self.qb)
# A developer is running this script.
g.assurt(self.qb.username
and (self.qb.username != conf.anonymous_username))
self.qb.user_group_id = User.private_group_id(self.qb.db,
self.qb.username)
# Get a new revision ID.
self.qb.item_mgr.start_new_revision(self.qb.db)
log.debug('Got rid_new: %d' % (self.qb.item_mgr.rid_new,))
# Get the Bike Facility attribute.
internal_name = '/byway/cycle_facil'
self.attr_cycle_facil = attribute.Many.get_system_attr(
self.qb, internal_name)
g.assurt(self.attr_cycle_facil is not None)
# Get the Controlled Access attribute.
internal_name = '/byway/no_access'
self.attr_no_access = attribute.Many.get_system_attr(
self.qb, internal_name)
g.assurt(self.attr_no_access is not None)
# BUG nnnn: New Script: Populate '/byway/cycle_route' by...
# stack IDs? byway names? maybe easier
# just to do in flashclient....
self.byways_suss_out_facilities()
# Save the new revision and finalize the sequence numbers.
log.debug('go_main: saving rev # %d' % (self.qb.item_mgr.rid_new,))
# NOTE: We're cheating here: We know only the public group needs
# group_revision records, since all new items were only public.
# Either of these should be acceptable:
# group_names_or_ids = ['Public',]
group_names_or_ids = [group.Many.public_group_id(self.qb),]
# MAYBE: Give credit to user who runs this script, or _script?
# I.e., where is the accountability if user like/dislike
# application (calculation) of this new attribute?
#
#complain_to_this_user = 'landonb'
#complain_to_this_user = self.qb.username
complain_to_this_user = '_script'
#
changenote = ('Populated new bike facility attr. using existing '
+ 'tags and attrs. (i.e., guessing!).')
#
self.finish_script_save_revision(group_names_or_ids,
username=complain_to_this_user,
changenote=changenote)
self.print_stats()
if debug_skip_commit:
raise Exception('DEBUG: Skipping commit: Debugging')
do_commit = True
except Exception, e:
log.error('Exception!: "%s" / %s' % (str(e), traceback.format_exc(),))
finally:
self.cli_args.close_query(do_commit)
# ***
#
def byways_suss_out_facilities(self):
log.info('byways_suss_out_facilities: ready, set, suss!')
time_0 = time.time()
prog_log = Debug_Progress_Logger(copy_this=debug_prog_log)
# 2013.05.12: Weird. At first, you'll see 1250 byways being processed
# each second, but later in the processing, after 100,000 byways, you'll
# see 250 byways being processed every one or two seconds.
#prog_log.log_freq = 250
#prog_log.log_freq = 2500
prog_log.log_freq = 1000
prog_log.loop_max = None
feat_class = byway
feat_search_fcn = 'search_for_items' # E.g. byway.Many().search_for_items
processing_fcn = self.feat_suss_out_facil
self.qb.item_mgr.load_feats_and_attcs(
self.qb, feat_class, feat_search_fcn,
processing_fcn, prog_log, heavyweight=False)
log.info('... processed %d features in %s'
% (prog_log.progress,
misc.time_format_elapsed(time_0),))
# ***
#
def feat_suss_out_facil(self, qb, gf, prog_log):
bike_facil = self.byway_deduce_bike_facility(gf)
if bike_facil is not None:
self.create_link_attr_feat(qb,
self.attr_cycle_facil,
gf,
value_text=bike_facil)
self.stats['cnt_attrs_cyle_facil'] += 1
misc.dict_count_inc(self.stats['cnt_facils_kvals'], bike_facil)
travel_restricted = self.byway_deduce_restricted(gf)
if travel_restricted:
self.create_link_attr_feat(qb,
self.attr_no_access,
gf,
value_boolean=True)
self.stats['cnt_attrs_no_access'] += 1
# ***
#
def create_link_attr_feat(self, qb,
attr,
feat,
value_boolean=None,
value_integer=None,
value_real=None,
value_text=None,
value_binary=None,
value_date=None):
g.assurt(id(qb) == id(self.qb))
g.assurt(feat.item_type_id == Item_Type.BYWAY)
client_id = self.qb.item_mgr.get_next_client_id()
new_link = link_value.One(
qb=self.qb,
row={
# *** from item_versioned:
'system_id' : None, # assigned later
'branch_id' : self.qb.branch_hier[0][0],
'stack_id' : client_id,
'version' : 0,
'deleted' : False,
'reverted' : False,
'name' : '', # FIXME: Is this right?
#'valid_start_rid' : # assigned by
#'valid_until_rid' : # version_finalize_and_increment
# NOTE: We don't set valid_start_rid any earlier, so historic
# views obviously won't show bike facility ornamentation.
'lhs_stack_id' : attr.stack_id,
'rhs_stack_id' : feat.stack_id,
# The item type IDs are saved to the group_item_access table.
'link_lhs_type_id' : attr.item_type_id,
'link_rhs_type_id' : feat.item_type_id,
'value_boolean' : value_boolean,
'value_integer' : value_integer,
'value_real' : value_real,
'value_text' : value_text,
'value_binary' : value_binary,
'value_date' : value_date,
}
)
log.verbose2('create_link_facil: new_link: %s' % (new_link,))
g.assurt(new_link.groups_access is None)
new_link.stack_id_correct(self.qb)
g.assurt(new_link.fresh)
log.verbose('create_link_facil: not clearing item_cache')
# NO: self.qb.item_mgr.item_cache_reset()
self.qb.item_mgr.item_cache_add(new_link, client_id)
self.qb.item_mgr.item_cache_add(attr)
self.qb.item_mgr.item_cache_add(feat)
prepared = self.qb.grac_mgr.prepare_item(self.qb,
new_link, Access_Level.editor, ref_item=None)
g.assurt(prepared)
log.verbose2(' >> prepare_item: %s' % (new_link,))
log.verbose2(' >> groups_access/1: %s' % (new_link.groups_access,))
new_link.version_finalize_and_increment(
self.qb, self.qb.item_mgr.rid_new)
log.verbose2(' >> version_finalize_and_increment: %s' % (new_link,))
new_link.save(self.qb, self.qb.item_mgr.rid_new)
log.verbose2(' >> saved: %s' % (new_link,))
log.verbose2(' >> groups_access/2: %s' % (new_link.groups_access,))
g.assurt(len(new_link.groups_access) == 1)
try:
group_id = group.Many.public_group_id(self.qb)
new_link.groups_access[group_id]
except KeyError:
g.assurt(False) # Unexpected.
self.stats['cnt_attrs_all'] += 1
# ***
#
def byway_deduce_bike_facility(self, bway):
bike_facil = None
# MAYBE: Make stats for each bike facility value.
# BUG nnnn: Or just calculate nightly, along with miles of road, etc.,
# I.e., a report for the agency clients. See also gnuplot.
if bway.geofeature_layer_id in (byway.Geofeature_Layer.Bike_Trail,
byway.Geofeature_Layer.Major_Trail,):
bike_facil = 'paved_trail'
# MAYBE: Also check Tag_Presumption.tag_bike_path?
| |
<gh_stars>0
# Copyright 2013, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from mock import patch
from oslo_utils import encodeutils
from oslo_utils import units
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
from subject.common import exception
from subject.common import store_utils
import subject.quota
from subject.tests.unit import utils as unit_test_utils
from subject.tests import utils as test_utils
UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d'
class FakeContext(object):
owner = 'someone'
is_admin = False
class FakeSubject(object):
size = None
subject_id = 'someid'
locations = [{'url': 'file:///not/a/path', 'metadata': {}}]
tags = set([])
def set_data(self, data, size=None):
self.size = 0
for d in data:
self.size += len(d)
def __init__(self, **kwargs):
self.extra_properties = kwargs.get('extra_properties', {})
class TestSubjectQuota(test_utils.BaseTestCase):
def setUp(self):
super(TestSubjectQuota, self).setUp()
def tearDown(self):
super(TestSubjectQuota, self).tearDown()
def _get_subject(self, location_count=1, subject_size=10):
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_subject = FakeSubject()
base_subject.subject_id = 'xyz'
base_subject.size = subject_size
subject = subject.quota.SubjectProxy(base_subject, context, db_api, store)
locations = []
for i in range(location_count):
locations.append({'url': 'file:///g/there/it/is%d' % i,
'metadata': {}, 'status': 'active'})
subject_values = {'id': 'xyz', 'owner': context.owner,
'status': 'active', 'size': subject_size,
'locations': locations}
db_api.subject_create(context, subject_values)
return subject
def test_quota_allowed(self):
quota = 10
self.config(user_storage_quota=str(quota))
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_subject = FakeSubject()
base_subject.subject_id = 'id'
subject = subject.quota.SubjectProxy(base_subject, context, db_api, store)
data = '*' * quota
base_subject.set_data(data, size=None)
subject.set_data(data)
self.assertEqual(quota, base_subject.size)
def _test_quota_allowed_unit(self, data_length, config_quota):
self.config(user_storage_quota=config_quota)
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_subject = FakeSubject()
base_subject.subject_id = 'id'
subject = subject.quota.SubjectProxy(base_subject, context, db_api, store)
data = '*' * data_length
base_subject.set_data(data, size=None)
subject.set_data(data)
self.assertEqual(data_length, base_subject.size)
def test_quota_allowed_unit_b(self):
self._test_quota_allowed_unit(10, '10B')
def test_quota_allowed_unit_kb(self):
self._test_quota_allowed_unit(10, '1KB')
def test_quota_allowed_unit_mb(self):
self._test_quota_allowed_unit(10, '1MB')
def test_quota_allowed_unit_gb(self):
self._test_quota_allowed_unit(10, '1GB')
def test_quota_allowed_unit_tb(self):
self._test_quota_allowed_unit(10, '1TB')
def _quota_exceeded_size(self, quota, data,
deleted=True, size=None):
self.config(user_storage_quota=quota)
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_subject = FakeSubject()
base_subject.subject_id = 'id'
subject = subject.quota.SubjectProxy(base_subject, context, db_api, store)
if deleted:
with patch.object(store_utils, 'safe_delete_from_backend'):
store_utils.safe_delete_from_backend(
context,
subject.subject_id,
base_subject.locations[0])
self.assertRaises(exception.StorageQuotaFull,
subject.set_data,
data,
size=size)
def test_quota_exceeded_no_size(self):
quota = 10
data = '*' * (quota + 1)
# NOTE(jbresnah) When the subject size is None it means that it is
# not known. In this case the only time we will raise an
# exception is when there is no room left at all, thus we know
# it will not fit.
# That's why 'get_remaining_quota' is mocked with return_value = 0.
with patch.object(subject.api.common, 'get_remaining_quota',
return_value=0):
self._quota_exceeded_size(str(quota), data)
def test_quota_exceeded_with_right_size(self):
quota = 10
data = '*' * (quota + 1)
self._quota_exceeded_size(str(quota), data, size=len(data),
deleted=False)
def test_quota_exceeded_with_right_size_b(self):
quota = 10
data = '*' * (quota + 1)
self._quota_exceeded_size('10B', data, size=len(data),
deleted=False)
def test_quota_exceeded_with_right_size_kb(self):
quota = units.Ki
data = '*' * (quota + 1)
self._quota_exceeded_size('1KB', data, size=len(data),
deleted=False)
def test_quota_exceeded_with_lie_size(self):
quota = 10
data = '*' * (quota + 1)
self._quota_exceeded_size(str(quota), data, deleted=False,
size=quota - 1)
def test_append_location(self):
new_location = {'url': 'file:///a/path', 'metadata': {},
'status': 'active'}
subject = self._get_subject()
pre_add_locations = subject.locations[:]
subject.locations.append(new_location)
pre_add_locations.append(new_location)
self.assertEqual(subject.locations, pre_add_locations)
def test_insert_location(self):
new_location = {'url': 'file:///a/path', 'metadata': {},
'status': 'active'}
subject = self._get_subject()
pre_add_locations = subject.locations[:]
subject.locations.insert(0, new_location)
pre_add_locations.insert(0, new_location)
self.assertEqual(subject.locations, pre_add_locations)
def test_extend_location(self):
new_location = {'url': 'file:///a/path', 'metadata': {},
'status': 'active'}
subject = self._get_subject()
pre_add_locations = subject.locations[:]
subject.locations.extend([new_location])
pre_add_locations.extend([new_location])
self.assertEqual(subject.locations, pre_add_locations)
def test_iadd_location(self):
new_location = {'url': 'file:///a/path', 'metadata': {},
'status': 'active'}
subject = self._get_subject()
pre_add_locations = subject.locations[:]
subject.locations += [new_location]
pre_add_locations += [new_location]
self.assertEqual(subject.locations, pre_add_locations)
def test_set_location(self):
new_location = {'url': 'file:///a/path', 'metadata': {},
'status': 'active'}
subject = self._get_subject()
subject.locations = [new_location]
self.assertEqual(subject.locations, [new_location])
def _make_subject_with_quota(self, subject_size=10, location_count=2):
quota = subject_size * location_count
self.config(user_storage_quota=str(quota))
return self._get_subject(subject_size=subject_size,
location_count=location_count)
def test_exceed_append_location(self):
subject = self._make_subject_with_quota()
self.assertRaises(exception.StorageQuotaFull,
subject.locations.append,
{'url': 'file:///a/path', 'metadata': {},
'status': 'active'})
def test_exceed_insert_location(self):
subject = self._make_subject_with_quota()
self.assertRaises(exception.StorageQuotaFull,
subject.locations.insert,
0,
{'url': 'file:///a/path', 'metadata': {},
'status': 'active'})
def test_exceed_extend_location(self):
subject = self._make_subject_with_quota()
self.assertRaises(exception.StorageQuotaFull,
subject.locations.extend,
[{'url': 'file:///a/path', 'metadata': {},
'status': 'active'}])
def test_set_location_under(self):
subject = self._make_subject_with_quota(location_count=1)
subject.locations = [{'url': 'file:///a/path', 'metadata': {},
'status': 'active'}]
def test_set_location_exceed(self):
subject = self._make_subject_with_quota(location_count=1)
try:
subject.locations = [{'url': 'file:///a/path', 'metadata': {},
'status': 'active'},
{'url': 'file:///a/path2', 'metadata': {},
'status': 'active'}]
self.fail('Should have raised the quota exception')
except exception.StorageQuotaFull:
pass
def test_iadd_location_exceed(self):
subject = self._make_subject_with_quota(location_count=1)
try:
subject.locations += [{'url': 'file:///a/path', 'metadata': {},
'status': 'active'}]
self.fail('Should have raised the quota exception')
except exception.StorageQuotaFull:
pass
def test_append_location_for_queued_subject(self):
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_subject = FakeSubject()
base_subject.subject_id = str(uuid.uuid4())
subject = subject.quota.SubjectProxy(base_subject, context, db_api, store)
self.assertIsNone(subject.size)
self.stubs.Set(store_api, 'get_size_from_backend',
unit_test_utils.fake_get_size_from_backend)
subject.locations.append({'url': 'file:///fake.img.tar.gz',
'metadata': {}})
self.assertIn({'url': 'file:///fake.img.tar.gz', 'metadata': {}},
subject.locations)
def test_insert_location_for_queued_subject(self):
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_subject = FakeSubject()
base_subject.subject_id = str(uuid.uuid4())
subject = subject.quota.SubjectProxy(base_subject, context, db_api, store)
self.assertIsNone(subject.size)
self.stubs.Set(store_api, 'get_size_from_backend',
unit_test_utils.fake_get_size_from_backend)
subject.locations.insert(0,
{'url': 'file:///fake.img.tar.gz',
'metadata': {}})
self.assertIn({'url': 'file:///fake.img.tar.gz', 'metadata': {}},
subject.locations)
def test_set_location_for_queued_subject(self):
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_subject = FakeSubject()
base_subject.subject_id = str(uuid.uuid4())
subject = subject.quota.SubjectProxy(base_subject, context, db_api, store)
self.assertIsNone(subject.size)
self.stubs.Set(store_api, 'get_size_from_backend',
unit_test_utils.fake_get_size_from_backend)
subject.locations = [{'url': 'file:///fake.img.tar.gz', 'metadata': {}}]
self.assertEqual([{'url': 'file:///fake.img.tar.gz', 'metadata': {}}],
subject.locations)
def test_iadd_location_for_queued_subject(self):
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_subject = FakeSubject()
base_subject.subject_id = str(uuid.uuid4())
subject = subject.quota.SubjectProxy(base_subject, context, db_api, store)
self.assertIsNone(subject.size)
self.stubs.Set(store_api, 'get_size_from_backend',
unit_test_utils.fake_get_size_from_backend)
subject.locations += [{'url': 'file:///fake.img.tar.gz', 'metadata': {}}]
self.assertIn({'url': 'file:///fake.img.tar.gz', 'metadata': {}},
subject.locations)
class TestSubjectPropertyQuotas(test_utils.BaseTestCase):
def setUp(self):
super(TestSubjectPropertyQuotas, self).setUp()
self.base_subject = FakeSubject()
self.subject = subject.quota.SubjectProxy(self.base_subject,
mock.Mock(),
mock.Mock(),
mock.Mock())
self.subject_repo_mock = mock.Mock()
self.subject_repo_mock.add.return_value = self.base_subject
self.subject_repo_mock.save.return_value = self.base_subject
self.subject_repo_proxy = subject.quota.SubjectRepoProxy(
self.subject_repo_mock,
mock.Mock(),
mock.Mock(),
mock.Mock())
def test_save_subject_with_subject_property(self):
self.config(subject_property_quota=1)
self.subject.extra_properties = {'foo': 'bar'}
self.subject_repo_proxy.save(self.subject)
self.subject_repo_mock.save.assert_called_once_with(self.base_subject,
from_state=None)
def test_save_subject_too_many_subject_properties(self):
self.config(subject_property_quota=1)
self.subject.extra_properties = {'foo': 'bar', 'foo2': 'bar2'}
exc = self.assertRaises(exception.SubjectPropertyLimitExceeded,
self.subject_repo_proxy.save, self.subject)
self.assertIn("Attempted: 2, Maximum: 1",
encodeutils.exception_to_unicode(exc))
def test_save_subject_unlimited_subject_properties(self):
self.config(subject_property_quota=-1)
self.subject.extra_properties = {'foo': 'bar'}
self.subject_repo_proxy.save(self.subject)
self.subject_repo_mock.save.assert_called_once_with(self.base_subject,
from_state=None)
def test_add_subject_with_subject_property(self):
self.config(subject_property_quota=1)
self.subject.extra_properties = {'foo': 'bar'}
self.subject_repo_proxy.add(self.subject)
self.subject_repo_mock.add.assert_called_once_with(self.base_subject)
def test_add_subject_too_many_subject_properties(self):
self.config(subject_property_quota=1)
self.subject.extra_properties = {'foo': 'bar', 'foo2': 'bar2'}
exc = self.assertRaises(exception.SubjectPropertyLimitExceeded,
self.subject_repo_proxy.add, self.subject)
self.assertIn("Attempted: 2, Maximum: 1",
encodeutils.exception_to_unicode(exc))
def test_add_subject_unlimited_subject_properties(self):
self.config(subject_property_quota=-1)
self.subject.extra_properties = {'foo': 'bar'}
self.subject_repo_proxy.add(self.subject)
self.subject_repo_mock.add.assert_called_once_with(self.base_subject)
def _quota_exceed_setup(self):
self.config(subject_property_quota=2)
self.base_subject.extra_properties = {'foo': 'bar', 'spam': 'ham'}
self.subject = subject.quota.SubjectProxy(self.base_subject,
mock.Mock(),
mock.Mock(),
mock.Mock())
def test_modify_subject_properties_when_quota_exceeded(self):
self._quota_exceed_setup()
self.config(subject_property_quota=1)
self.subject.extra_properties = {'foo': 'frob', 'spam': 'eggs'}
self.subject_repo_proxy.save(self.subject)
self.subject_repo_mock.save.assert_called_once_with(self.base_subject,
from_state=None)
self.assertEqual('frob', self.base_subject.extra_properties['foo'])
self.assertEqual('eggs', self.base_subject.extra_properties['spam'])
def test_delete_subject_properties_when_quota_exceeded(self):
self._quota_exceed_setup()
self.config(subject_property_quota=1)
del self.subject.extra_properties['foo']
self.subject_repo_proxy.save(self.subject)
self.subject_repo_mock.save.assert_called_once_with(self.base_subject,
from_state=None)
self.assertNotIn('foo', self.base_subject.extra_properties)
self.assertEqual('ham', self.base_subject.extra_properties['spam'])
def test_invalid_quota_config_parameter(self):
self.config(user_storage_quota='foo')
location = {"url": "file:///fake.img.tar.gz", "metadata": {}}
self.assertRaises(exception.InvalidOptionValue,
self.subject.locations.append, location)
def test_exceed_quota_during_patch_operation(self):
self._quota_exceed_setup()
self.subject.extra_properties['frob'] = 'baz'
self.subject.extra_properties['lorem'] = 'ipsum'
self.assertEqual('bar', self.base_subject.extra_properties['foo'])
self.assertEqual('ham', self.base_subject.extra_properties['spam'])
self.assertEqual('baz', self.base_subject.extra_properties['frob'])
self.assertEqual('ipsum', self.base_subject.extra_properties['lorem'])
del self.subject.extra_properties['frob']
del self.subject.extra_properties['lorem']
self.subject_repo_proxy.save(self.subject)
call_args = mock.call(self.base_subject, from_state=None)
self.assertEqual(call_args, self.subject_repo_mock.save.call_args)
self.assertEqual('bar', self.base_subject.extra_properties['foo'])
self.assertEqual('ham', self.base_subject.extra_properties['spam'])
self.assertNotIn('frob', self.base_subject.extra_properties)
self.assertNotIn('lorem', self.base_subject.extra_properties)
def test_quota_exceeded_after_delete_subject_properties(self):
self.config(subject_property_quota=3)
self.base_subject.extra_properties = {'foo': 'bar',
'spam': 'ham',
'frob': 'baz'}
self.subject = subject.quota.SubjectProxy(self.base_subject,
mock.Mock(),
mock.Mock(),
mock.Mock())
self.config(subject_property_quota=1)
del self.subject.extra_properties['foo']
self.subject_repo_proxy.save(self.subject)
self.subject_repo_mock.save.assert_called_once_with(self.base_subject,
from_state=None)
self.assertNotIn('foo', self.base_subject.extra_properties)
self.assertEqual('ham', self.base_subject.extra_properties['spam'])
self.assertEqual('baz', self.base_subject.extra_properties['frob'])
class TestSubjectTagQuotas(test_utils.BaseTestCase):
def setUp(self):
super(TestSubjectTagQuotas, self).setUp()
self.base_subject = mock.Mock()
self.base_subject.tags = set([])
self.base_subject.extra_properties = {}
self.subject = subject.quota.SubjectProxy(self.base_subject,
mock.Mock(),
mock.Mock(),
mock.Mock())
self.subject_repo_mock = mock.Mock()
self.subject_repo_proxy = subject.quota.SubjectRepoProxy(
self.subject_repo_mock,
mock.Mock(),
mock.Mock(),
mock.Mock())
def test_replace_subject_tag(self):
self.config(subject_tag_quota=1)
self.subject.tags = ['foo']
self.assertEqual(1, len(self.subject.tags))
def test_replace_too_many_subject_tags(self):
self.config(subject_tag_quota=0)
exc = self.assertRaises(exception.SubjectTagLimitExceeded,
setattr, self.subject, 'tags', ['foo', 'bar'])
self.assertIn('Attempted: 2, Maximum: 0',
encodeutils.exception_to_unicode(exc))
self.assertEqual(0, len(self.subject.tags))
def test_replace_unlimited_subject_tags(self):
self.config(subject_tag_quota=-1)
self.subject.tags = ['foo']
self.assertEqual(1, len(self.subject.tags))
def test_add_subject_tag(self):
self.config(subject_tag_quota=1)
self.subject.tags.add('foo')
self.assertEqual(1, len(self.subject.tags))
def test_add_too_many_subject_tags(self):
self.config(subject_tag_quota=1)
self.subject.tags.add('foo')
exc = self.assertRaises(exception.SubjectTagLimitExceeded,
self.subject.tags.add, 'bar')
self.assertIn('Attempted: 2, Maximum: 1',
encodeutils.exception_to_unicode(exc))
def test_add_unlimited_subject_tags(self):
self.config(subject_tag_quota=-1)
self.subject.tags.add('foo')
self.assertEqual(1, len(self.subject.tags))
def test_remove_subject_tag_while_over_quota(self):
self.config(subject_tag_quota=1)
self.subject.tags.add('foo')
self.assertEqual(1, len(self.subject.tags))
self.config(subject_tag_quota=0)
self.subject.tags.remove('foo')
self.assertEqual(0, len(self.subject.tags))
class TestQuotaSubjectTagsProxy(test_utils.BaseTestCase):
def setUp(self):
super(TestQuotaSubjectTagsProxy, self).setUp()
def test_add(self):
proxy = subject.quota.QuotaSubjectTagsProxy(set([]))
proxy.add('foo')
self.assertIn('foo', proxy)
def test_add_too_many_tags(self):
self.config(subject_tag_quota=0)
proxy = subject.quota.QuotaSubjectTagsProxy(set([]))
exc = self.assertRaises(exception.SubjectTagLimitExceeded,
proxy.add, 'bar')
self.assertIn('Attempted: 1, Maximum: 0',
encodeutils.exception_to_unicode(exc))
def test_equals(self):
proxy = subject.quota.QuotaSubjectTagsProxy(set([]))
self.assertEqual(set([]), proxy)
def test_not_equals(self):
proxy = subject.quota.QuotaSubjectTagsProxy(set([]))
self.assertNotEqual('foo', proxy)
| |
''' Defines exceptions used to report errors in parsing and execution
of macros.
'''
import exceptions
from macro.interpret.txt_token import TxtToken
from macro.util import NULL_POSITION, NULL_TOKEN_ID
# Base exception class with a shared rendering method.
class MacroError():
''' Base class for macro exceptions containing rendering
methods.'''
# To be overridden
def set_render_list(self):
self.render = [TxtToken("Unknown Exception")]
def get_render_list(self):
''' Get a list of the tokens to render for this
exception. '''
self.set_render_list()
return self.render
# Debug methods
def get_debug_str(self):
r_list = self.get_render_list()
return ''.join(map(lambda t: t.get_render_desc() + ' ' \
if t.render_space_after \
else t.get_render_desc(), r_list))
def __str__(self):
return self.get_debug_str()
def __repr__(self):
return self.get_debug_str()
# Exception types
class OtherError(MacroError):
''' Non-lexing/parsing errors.'''
class InterpretError(MacroError):
''' Interpretation-specific errors.
Attributes:
cmd -- Start of command causing interpretation problems.
'''
def __init__(self, cmd, data1=None):
self.cmd = cmd
self.data1 = data1
def get_token(self):
''' Get the token causing the error so we can give it
a display popup.'''
return self.cmd
class LexerError(MacroError):
''' Lexer errors.
Attributes:
data -- Input causing error
pos -- Start index of input
rule -- Rules object we tried.
'''
def __init__(self, data, start, end, rule):
self.start = start
self.end = end
self.rule = rule
self.data = data
def get_start(self):
return self.start
def get_end(self):
return self.end
class ParserError(MacroError):
''' Parser errors.
Attributes:
cmd -- Verb MacroToken
data1 -- Exception data field 1
data2 -- Exception data field 2
Note that these errors can be saved as popups
'''
def __init__(self, cmd, data1=None, data2=None):
if cmd: cmd.render_space_after = True
self.cmd = cmd
self.data1 = data1
self.data2 = data2
def get_token(self):
''' Get the token causing the error so we can give it
a display popup.'''
if self.data1 is not None and self.data1.token_id != NULL_TOKEN_ID:
return self.data1
if self.data2 is not None:
return self.data2
return None
def get_pos(self):
if self.data1 is not None and self.data1.start != NULL_POSITION:
return self.data1.start
if self.data2 is not None:
return self.data2.start
return NULL_POSITION
#
# UI Errors
#
# No input
class NoInputError(OtherError, BaseException):
'''Handle configuration errors.'''
def __init__(self, error="Unknown init error"):
self.error = error
def set_render_list(self):
self.render = [TxtToken(self.error)]
# Init error
class InitError(OtherError, BaseException):
'''Handle configuration errors.'''
def __init__(self, error="Unknown init error"):
self.error = error
def set_render_list(self):
self.render = [TxtToken(self.error)]
# Search error
class InvalidSearchError(OtherError, BaseException):
'''Somebody is doing something funny with search.'''
def __init__(self, error="Unknown search error"):
self.error = error
def set_render_list(self):
self.render = [TxtToken(self.error)]
#
# Input/Config Errors
#
# Handle config problems
class ConfigError(OtherError, BaseException):
'''Handle configuration errors.'''
def __init__(self, error="Unknown init error"):
self.error = error
def set_render_list(self):
self.render = [TxtToken("Configuration error: "),
TxtToken(error)]
# Bad user input independant of parsing.
class UserInputError(OtherError, BaseException):
'''Exception raised for errors in the input independant of
parsing.
'''
def __init__(self, macro=''):
self.error = macro
def set_render_list(self):
if len(self.error) == 0:
self.render = [TxtToken("Please enter a macro to parse.")]
else:
self.render = [TxtToken("Bad user input: "),
TxtToken(self.error)]
# Too long
class MacroLenError(OtherError, BaseException):
'''Macro too long.
'''
def __init__(self, err=''):
self.error = err
def set_render_list(self):
self.render = [TxtToken(self.error)]
#
# Lexer Errors
#
# Error in lexing--couldn't lex input into tokens.
class LexErrorNoMatchingRules(LexerError, BaseException):
'''Raised when we cant find a rule to lex some input.
'''
# Override the render list construction.
def set_render_list(self):
from macro.lex.rules import get_rule_desc
# Give a bit of detail on what we failed
desc = get_rule_desc(self.rule)
self.render = [TxtToken("Expected %s, but got %s. Is there a typo in your macro?" \
% (desc, self.data))]
# Error in lexing--required macro part not found.
class LexErrorRequiredMatchFailed(LexerError, BaseException):
'''Raised when we expected a match for something, and didnt find
one. Example: /command-verb
'''
def set_render_list(self):
from macro.lex.rules import get_rule_desc
# Give a bit of detail on what we failed
desc = get_rule_desc(self.rule)
self.render = [TxtToken("Could not find %s in %s." % (desc,
self.data))]
#
# Parsing Errors
#
# Error in parsing--invalid target token
class ParseErrorInvalidTargetToken(ParserError, BaseException):
'''Was looking for one type of token, got another.
In this exception, data1 is expected, data2 is recieved
'''
def set_render_list(self):
self.render = [TxtToken("Target qualifier %s is either mispelled or in the wrong place." % self.cmd.data)]
def get_pos(self):
return self.cmd.start
def get_token(self):
return self.cmd
# Error in parsing--got unexpected token
class ParseErrorUnexpectedToken(ParserError, BaseException):
'''Was looking for one type of token, got another.
In this exception, data1 is expected, data2 is recieved
'''
def set_render_list(self):
self.render = [TxtToken("Unexpected token found. Do you have a typo in your macro?")]
def get_pos(self):
return self.data2.start
def get_token(self):
return self.data2
# Error in parsing--multiple targets in the same condition.
class ParseErrorMultiTargetCondition(ParserError, BaseException):
'''Bad condition--has multiple targets.
In this exception, data1 is current target, data2 the new target
'''
def set_render_list(self):
self.render = [TxtToken("Can't assign multiple targets in the same condition.")]
# Error in parsing--no reset allowed for this command.
class ParseErrorNoResetAllowed(ParserError, BaseException):
'''reset= given for a command that doesnt allow it..
Field data1 is the reset param.
'''
def set_render_list(self):
self.render = [TxtToken("Macro command %s doesn't take reset options." % self.cmd.data)]
def get_pos(self):
return self.data1.start
def get_token(self):
return self.data1
# Error in parsing--reset in the wrong place
class ParseErrorResetBeforeConditions(ParserError, BaseException):
'''reset= options must be given before conditions.
Field data1 is the reset param.
'''
def set_render_list(self):
self.render = [TxtToken("Options for resetting a sequence must be placed after any macro conditions. Example: %s [conditions] reset=..." % self.cmd.data)]
def get_pos(self):
return self.data1.start
def get_token(self):
return self.data1
# Error in parsing--toggles aren't allowed
class ParseErrorNoTogglesAllowed(ParserError, BaseException):
'''Command doesnt take toggled arguments.
data1 has the offending toggle, data2 the parameter.
'''
def set_render_list(self):
self.render = [TxtToken("Command %s doesn't allow toggled parameters." % self.cmd.data)]
# Error in parsing--parameter required to make a valid command.
class ParseErrorParamRequired(ParserError, BaseException):
'''Command needs a parameter, and doesnt have one.
No data for this error.
'''
def set_render_list(self):
self.render = [TxtToken("Command %s requires a valid parameter." % self.cmd.data)]
def get_pos(self):
return self.cmd.end
def get_token(self):
return self.cmd
# Error in parsing--did not find an acceptable param pattern.
class ParseErrorWrongParams(ParserError, BaseException):
'''Command was not given an acceptable set of parameters.
The command token has the pattern required.
data1 has the parameter types, data2 the error position.
'''
def set_render_list(self):
if self.cmd.attrs.error_msg:
self.render = [TxtToken(self.cmd.attrs.error_msg)]
return
req_pats = []
got_pats = None
for t in self.cmd.attrs.param:
if not t:
req_pats.append('no value, ')
else:
req_pats.append('(%s)' % \
(', '.join(["Item, Spell or known identifier" if i is str \
else "Numeric Value" \
for i in t])))
if len(self.data1) > 0:
got_pats = ('(%s)' % \
(', '.join(["Item, Spell or known identifier" if i is str \
else "Numeric Value" \
for i in self.data1])))
else:
got_pats = "nothing"
self.render = [TxtToken("Command %s" % self.cmd.data),
TxtToken("takes"),
TxtToken(' or '.join(req_pats), render_space_after=False),
TxtToken(". The parameter types given are"),
TxtToken(got_pats, render_space_after=False),
TxtToken(". Did you mispell or forget something?")]
# Return the position
def get_pos(self):
return self.data2
def get_token(self):
return self.cmd
# Error in parsing--option does not allow arguments.
class ParseErrorNoArgsForOption(ParserError, BaseException):
'''Option arguments are given for an option that does
not allow them. Example: exists:1/2/3
data1 has the option, data2 operator token.
'''
def set_render_list(self):
self.render = [TxtToken("Option"),
self.data1,
TxtToken("doesn't take arguments.")]
# Return the position of the op token.
def get_pos(self):
return self.data2.end
def get_token(self):
return self.data2
# Error in parsing--option requires arguments, and we didn't get any.
class ParseErrorReqArgsForOption(ParserError, BaseException):
'''
Option arguments are omitted for an option that requires them.
data1 has the option.
'''
def set_render_list(self):
self.render = [TxtToken("Option"),
self.data1,
TxtToken("requires option arguments.")]
# Return the position of the op token.
def get_pos(self):
return self.data1.end
# Error in parsing--non-matching negs in or list
class ParseErrorNonMatchingNegs(ParserError, BaseException):
'''
If we get word:arg/noword:arg form, error.
data1 has the offending negative.
'''
def set_render_list(self):
self.render = [TxtToken("Can't mix negative and non-negatives options in the same argument list. Break this into seperate options.")]
# Return the position of the op token.
def get_pos(self):
return self.data1.end
# Error in parsing--non-matching option words in or list
class ParseErrorNonMatchingOptionWords(ParserError, BaseException):
'''
If we get word:arg/word:arg form, word had better match.
cmd has the first option.
data1 has the option that doesnt match.
'''
def set_render_list(self):
self.render = [TxtToken("Option"),
TxtToken("'%s'" % self.cmd.data),
TxtToken("does not match option"),
TxtToken("'%s'" % self.data1.data),
TxtToken("in your condition. Break this | |
import numpy as np
from scipy.stats import norm, ks_2samp
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
###############################################################################
# Plot formatting
#------------------------------------------------------------------------------
#plt.rc('text', usetex=True)
plt.rc('font', size=16)#family='serif')
lwidth = 2 # Line width used in plots
label_dict = {'Mdark_Mstar_ratio':'$M_{DM}/M_*$',
'Mtot':'$M_{tot}$ [$M_\odot$]',
'Mstar':'$M_*$ [$M_\odot$]',
'rabsmag': '$M_r$',
'Z12logOH': '12 + log(O/H)',
'Rmax': '$R_{max}$ [kpc/h]',
'avg_r_turn': '$R_{turn}$ [kpc/h]',
'Rmax_Rturn_ratio': '$R_{max}$/$R_{turn}$',
'M90_Mdisk_ratio': '$M_{90}/M_{90, disk}$',
'M90_map': 'log($M_{90}/M_\odot$)',
'M90_disk_map': 'log($M_{90,disk}/M_\odot$)',
'Rturn_map': '$R_{turn}$ [kpc/h]',
'Rdisk_map': '$R_{disk}$ [kpc/h]'}
###############################################################################
def median_hist( void, wall, x_param, y_param, bins, error_linestyle=['', ':'],
save_fig=False, IMAGE_DIR='', IMAGE_FORMAT='eps'):
'''
Bin the data in x and calculate the median of the y property in each bin.
Plot with associated standard deviation as y error bars, and bin width as
x error bars.
PARAMETERS
==========
void : astropy table of length n
Table of parameters for void galaxies
wall : astropy table of length m
Table of parameters for wall galaxies
x_param : string
Galaxy parameter to plot on the x-axis
y_param : string
Galaxy parameter to plot on the y_axis
bins : numpy array of shape (p,)
Bin edges in x parameter
error_linestyle : length-2 list of strings
Line styling for the error bars. Order is [void, wall]. Default
styling is solid for void and dotted for wall.
save_fig : boolean
Flag to determine whether or not the figure should be saved. Default
is False (do not save).
IMAGE_DIR : string
Address to directory to save file. Default is current directory.
IMAGE_FORMAT : string
Format for saved image. Default is 'eps'
'''
###########################################################################
# Initialize figure
#--------------------------------------------------------------------------
#plt.figure(tight_layout=True)
###########################################################################
###########################################################################
# Bin galaxies
#--------------------------------------------------------------------------
i_bin_void = np.digitize(void[x_param], bins)
i_bin_wall = np.digitize(wall[x_param], bins)
###########################################################################
###########################################################################
# Calculate bin statistics
#--------------------------------------------------------------------------
median_void = np.zeros(len(bins))
median_wall = np.zeros(len(bins))
std_void = np.zeros(len(bins))
std_wall = np.zeros(len(bins))
for i in range(len(bins)):
# Bin median
median_void[i] = np.median(void[y_param][i_bin_void == i])
median_wall[i] = np.median(wall[y_param][i_bin_wall == i])
# Bin standard deviation
std_void[i] = np.std(void[y_param][i_bin_void == i])/len(void[i_bin_void == i])
std_wall[i] = np.std(wall[y_param][i_bin_wall == i])/len(wall[i_bin_wall == i])
###########################################################################
###########################################################################
# Plot scatter plot
#
# x error bars are the width of the bins
# y error bars are the standard deviation of the y-values in each bin
#--------------------------------------------------------------------------
bin_width = 0.5*(bins[1] - bins[0])
# Void galaxies
v = plt.errorbar(bins + bin_width, median_void,
xerr=bin_width, yerr=std_void,
marker='o', mfc='r', ms=200,
ecolor='r', fmt='none')
plt.plot(bins + bin_width, median_void, 'ro', label='Void')
if error_linestyle[0] != '':
v[-1][0].set_linestyle(error_linestyle[0])
v[-1][1].set_linestyle(error_linestyle[0])
# Wall galaxies
w = plt.errorbar(bins + bin_width, median_wall,
xerr=bin_width, yerr=std_wall,
marker='^', mfc='k', ms=200,
ecolor='k', fmt='none')
w[-1][0].set_linestyle(error_linestyle[1])
w[-1][1].set_linestyle(error_linestyle[1])
plt.plot(bins + bin_width, median_wall, 'k^', label='Wall')
plt.xlabel(label_dict[x_param])
plt.ylabel('median ' + label_dict[y_param])
plt.legend()
###########################################################################
###########################################################################
# Save figure?
#--------------------------------------------------------------------------
if save_fig:
plt.savefig( IMAGE_DIR + '/histograms/' + x_param + '-' + y_param + '.' + IMAGE_FORMAT,
format=IMAGE_FORMAT)
###########################################################################
###############################################################################
###############################################################################
###############################################################################
def param_hist_scatter( void, wall, field, bins, save_fig=False, IMAGE_DIR='',
IMAGE_FORMAT='eps'):
'''
Histogram the specified field parameter as separated by environment.
PARAMETERS
==========
void : astropy table of length n
Table of parameters for void galaxies
wall : astropy table of length m
Table of parameters for wall galaxies
param : string
Galaxy parameter to histogram
bins : numpy array of shape (p,)
Histogram bin edges
save_fig : boolean
Flag to determine whether or not the figure should be saved. Default
is False (do not save).
IMAGE_DIR : string
Address to directory to save file. Default is current directory.
IMAGE_FORMAT : string
Format for saved image. Default is 'eps'
'''
bin_step = bins[1] - bins[0]
###########################################################################
# Histrogram of samples
#--------------------------------------------------------------------------
Ntot_void = len(void)
Ntot_wall = len(wall)
Nv,_ = np.histogram(void[field], bins=bins)
Nw,_ = np.histogram(wall[field], bins=bins)
nv = Nv/Ntot_void
nw = Nw/Ntot_wall
# Change integer counts to floats
Nv = Nv.astype('float')
Nw = Nw.astype('float')
# Set 0-counts to equal infinity
Nv[Nv==0] = np.infty
Nw[Nw==0] = np.infty
###########################################################################
###########################################################################
# Plot scatter plot of histogram
#--------------------------------------------------------------------------
# Void
plt.plot(bins[:-1] + 0.5*bin_step, nv, 'ro', label='Void')
plt.errorbar(bins[:-1] + 0.5*bin_step, nv, xerr=0.5*bin_step,
yerr=1/(Ntot_void*np.sqrt(Nv)),
ecolor='r', fmt='none')
# Wall
plt.plot(bins[:-1] + 0.5*bin_step, nw, 'k^', label='Wall')
w = plt.errorbar(bins[:-1] + 0.5*bin_step, nw, xerr=0.5*bin_step,
yerr=1/(Ntot_wall*np.sqrt(Nw)),
ecolor='k', fmt='none')
w[-1][0].set_linestyle(':')
w[-1][1].set_linestyle(':')
plt.xlabel(label_dict[field])
plt.ylabel('Fraction')
plt.legend()
###########################################################################
###########################################################################
# Save figure?
#--------------------------------------------------------------------------
if save_fig:
plt.savefig( IMAGE_DIR + '/histograms/' + field + '_scatter_hist.' + IMAGE_FORMAT,
format=IMAGE_FORMAT)
###########################################################################
###############################################################################
###############################################################################
###############################################################################
def param_hist( void, wall, field, bins, save_fig=False, IMAGE_DIR='',
IMAGE_FORMAT='eps'):
'''
Histogram the specified field parameter as separated by environment.
PARAMETERS
==========
void : astropy table of length n
Table of parameters for void galaxies
wall : astropy table of length m
Table of parameters for wall galaxies
param : string
Galaxy parameter to histogram
bins : numpy array of shape (p,)
Histogram bin edges
save_fig : boolean
Flag to determine whether or not the figure should be saved. Default
is False (do not save).
IMAGE_DIR : string
Address to directory to save file. Default is current directory.
IMAGE_FORMAT : string
Format for saved image. Default is 'eps'
'''
###########################################################################
# Histrogram of samples
#--------------------------------------------------------------------------
Ntot_void = len(void)
Ntot_wall = len(wall)
Nv,_ = np.histogram(void[field], bins=bins)
Nw,_ = np.histogram(wall[field], bins=bins)
nv = Nv/Ntot_void
nw = Nw/Ntot_wall
###########################################################################
###########################################################################
# Plot step histogram
#--------------------------------------------------------------------------
plt.step(bins[:-1], nv, 'r', where='post', linewidth=lwidth,
label='Void: ' + str( Ntot_void))
plt.step(bins[:-1], nw, 'k', where='post', linewidth=lwidth, linestyle=':',
label='Wall: ' + str( Ntot_wall))
#--------------------------------------------------------------------------
# Histogram plot formatting
#--------------------------------------------------------------------------
plt.xlabel(label_dict[field])
plt.ylabel('Fraction')
plt.legend()
###########################################################################
###########################################################################
# Save figure?
#--------------------------------------------------------------------------
if save_fig:
plt.savefig( IMAGE_DIR + '/histograms/' + field + '_hist.' + IMAGE_FORMAT,
format=IMAGE_FORMAT)
###########################################################################
###############################################################################
###############################################################################
###############################################################################
def param_CDF( void, wall, field, field_range, save_fig=False, IMAGE_DIR='',
IMAGE_FORMAT='eps'):
'''
Histogram the specified field parameter as separated by environment.
PARAMETERS
==========
void : astropy table of length n
Table of parameters for void galaxies
wall : astropy table of length m
Table of parameters for wall galaxies
field : string
Galaxy parameter to histogram
field_range : tuple
Minimum and maximum of data values
save_fig : boolean
Flag to determine whether or not the figure should be saved. Default
is False (do not save).
IMAGE_DIR : string
Address to directory to save file. Default is current directory.
IMAGE_FORMAT : string
Format for saved image. Default is 'eps'
'''
###########################################################################
# CDF of void and wall
#--------------------------------------------------------------------------
ks_stat, p_val = ks_2samp( wall[field], void[field])
plt.hist( void[field], bins=1000, range=field_range, density=True,
cumulative=True, histtype='step', color='r', linewidth=lwidth,
label='Void')
plt.hist( wall[field], bins=1000, range=field_range, density=True,
cumulative=True, histtype='step', color='k', linewidth=lwidth,
linestyle=':', label='Wall')
#--------------------------------------------------------------------------
# CDF plot formatting
#--------------------------------------------------------------------------
plt.xlabel(label_dict[field])
plt.ylabel('Fraction')
plt.legend(loc='upper left')
plt.annotate( "p-val: " + "{:.{}f}".format( p_val, 3), (field_range[1] * 0.65, 0.15))
###########################################################################
###########################################################################
# Save figure?
#--------------------------------------------------------------------------
if save_fig:
plt.savefig( IMAGE_DIR + '/histograms/' + field + '_CDF.' + IMAGE_FORMAT,
format=IMAGE_FORMAT)
###########################################################################
###############################################################################
###############################################################################
###############################################################################
def DM_SM_hist( void_ratios, wall_ratios, bins=None, hist_range=(0,60),
y_max=0.05, y_err=False,
plot_title='$M_{DM}$ / $M_*$ distribution',
x_label='$M_{DM}/M_*$',
save_fig=False, FILE_SUFFIX='', IMAGE_DIR='',
IMAGE_FORMAT='eps'):
'''
Histogram the dark matter to stellar mass ratios as separated by
environment
Parameters:
===========
void_ratios : numpy array of shape (n, )
Ratio of dark matter halo mass to stellar mass for void galaxies
wall_ratios : numpy array of shape (m, )
Ratio of dark matter halo mass to stellar mass for wall galaxies
bins : numpy array of shape (p, )
Histogram bin edges
hist_range : tuple
Minimum and maximum of histogram
y_max : float
Upper limit of y-axis; default value is 0.05.
y_err : boolean
Determines whether or not to plot sqrt(N) error bars on histogram.
Default value is False (no error bars).
plot_title : string
Title of plot; default is '$M_{DM}$ / $M_*$ distribution'
x_label : string
Axis label for x-axis; default is '$M_{DM}/M_*$'
save_fig : boolean
Flag to determine whether or not the figure should be saved. Default
is False (do not save).
FILE_SUFFIX : string
Additional information to include at the end of the figure file name.
Default is '' (nothing).
IMAGE_DIR : string
Address to directory to save file. Default is current directory.
IMAGE_FORMAT : string
Format for saved image. Default is 'eps'
'''
if bins is None:
bins = np.linspace(hist_range[0], | |
value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, intVector, name)
__repr__ = _swig_repr
def iterator(self):
return _robotsim.intVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _robotsim.intVector___nonzero__(self)
def __bool__(self):
return _robotsim.intVector___bool__(self)
def __len__(self):
return _robotsim.intVector___len__(self)
def __getslice__(self, i, j):
return _robotsim.intVector___getslice__(self, i, j)
def __setslice__(self, *args):
return _robotsim.intVector___setslice__(self, *args)
def __delslice__(self, i, j):
return _robotsim.intVector___delslice__(self, i, j)
def __delitem__(self, *args):
return _robotsim.intVector___delitem__(self, *args)
def __getitem__(self, *args):
return _robotsim.intVector___getitem__(self, *args)
def __setitem__(self, *args):
return _robotsim.intVector___setitem__(self, *args)
def pop(self):
return _robotsim.intVector_pop(self)
def append(self, x):
return _robotsim.intVector_append(self, x)
def empty(self):
return _robotsim.intVector_empty(self)
def size(self):
return _robotsim.intVector_size(self)
def swap(self, v):
return _robotsim.intVector_swap(self, v)
def begin(self):
return _robotsim.intVector_begin(self)
def end(self):
return _robotsim.intVector_end(self)
def rbegin(self):
return _robotsim.intVector_rbegin(self)
def rend(self):
return _robotsim.intVector_rend(self)
def clear(self):
return _robotsim.intVector_clear(self)
def get_allocator(self):
return _robotsim.intVector_get_allocator(self)
def pop_back(self):
return _robotsim.intVector_pop_back(self)
def erase(self, *args):
return _robotsim.intVector_erase(self, *args)
def __init__(self, *args):
this = _robotsim.new_intVector(*args)
try:
self.this.append(this)
except Exception:
self.this = this
def push_back(self, x):
return _robotsim.intVector_push_back(self, x)
def front(self):
return _robotsim.intVector_front(self)
def back(self):
return _robotsim.intVector_back(self)
def assign(self, n, x):
return _robotsim.intVector_assign(self, n, x)
def resize(self, *args):
return _robotsim.intVector_resize(self, *args)
def insert(self, *args):
return _robotsim.intVector_insert(self, *args)
def reserve(self, n):
return _robotsim.intVector_reserve(self, n)
def capacity(self):
return _robotsim.intVector_capacity(self)
__swig_destroy__ = _robotsim.delete_intVector
__del__ = lambda self: None
intVector_swigregister = _robotsim.intVector_swigregister
intVector_swigregister(intVector)
class doubleMatrix(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, doubleMatrix, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, doubleMatrix, name)
__repr__ = _swig_repr
def iterator(self):
return _robotsim.doubleMatrix_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _robotsim.doubleMatrix___nonzero__(self)
def __bool__(self):
return _robotsim.doubleMatrix___bool__(self)
def __len__(self):
return _robotsim.doubleMatrix___len__(self)
def __getslice__(self, i, j):
return _robotsim.doubleMatrix___getslice__(self, i, j)
def __setslice__(self, *args):
return _robotsim.doubleMatrix___setslice__(self, *args)
def __delslice__(self, i, j):
return _robotsim.doubleMatrix___delslice__(self, i, j)
def __delitem__(self, *args):
return _robotsim.doubleMatrix___delitem__(self, *args)
def __getitem__(self, *args):
return _robotsim.doubleMatrix___getitem__(self, *args)
def __setitem__(self, *args):
return _robotsim.doubleMatrix___setitem__(self, *args)
def pop(self):
return _robotsim.doubleMatrix_pop(self)
def append(self, x):
return _robotsim.doubleMatrix_append(self, x)
def empty(self):
return _robotsim.doubleMatrix_empty(self)
def size(self):
return _robotsim.doubleMatrix_size(self)
def swap(self, v):
return _robotsim.doubleMatrix_swap(self, v)
def begin(self):
return _robotsim.doubleMatrix_begin(self)
def end(self):
return _robotsim.doubleMatrix_end(self)
def rbegin(self):
return _robotsim.doubleMatrix_rbegin(self)
def rend(self):
return _robotsim.doubleMatrix_rend(self)
def clear(self):
return _robotsim.doubleMatrix_clear(self)
def get_allocator(self):
return _robotsim.doubleMatrix_get_allocator(self)
def pop_back(self):
return _robotsim.doubleMatrix_pop_back(self)
def erase(self, *args):
return _robotsim.doubleMatrix_erase(self, *args)
def __init__(self, *args):
this = _robotsim.new_doubleMatrix(*args)
try:
self.this.append(this)
except Exception:
self.this = this
def push_back(self, x):
return _robotsim.doubleMatrix_push_back(self, x)
def front(self):
return _robotsim.doubleMatrix_front(self)
def back(self):
return _robotsim.doubleMatrix_back(self)
def assign(self, n, x):
return _robotsim.doubleMatrix_assign(self, n, x)
def resize(self, *args):
return _robotsim.doubleMatrix_resize(self, *args)
def insert(self, *args):
return _robotsim.doubleMatrix_insert(self, *args)
def reserve(self, n):
return _robotsim.doubleMatrix_reserve(self, n)
def capacity(self):
return _robotsim.doubleMatrix_capacity(self)
__swig_destroy__ = _robotsim.delete_doubleMatrix
__del__ = lambda self: None
doubleMatrix_swigregister = _robotsim.doubleMatrix_swigregister
doubleMatrix_swigregister(doubleMatrix)
class TriangleMesh(_object):
"""
A 3D indexed triangle mesh class.
Attributes:
vertices (SWIG vector of floats): a list of vertices, given as
a flattened coordinate list [x1, y1, z1, x2, y2, ...]
indices (SWIG vector of ints): a list of triangle vertices given
as indices into the vertices list, i.e., [a1,b1,c2, a2,b2,c2, ...]
Note: because the bindings are generated by SWIG, you can access the indices /
vertices members via some automatically generated accessors / modifiers. In
particular len(), append(), and indexing via [] are useful. Some other methods
like resize() are also provided. However, you CANNOT set these items via
assignment.
Examples::
m = TriangleMesh()
m.vertices.append(0)
m.vertices.append(0)
m.vertices.append(0)
print len(m.vertices) #prints 3
m.vertices = [0,0,0] #this is an error
m.vertices += [1,2,3] #this is also an error
C++ includes: geometry.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, TriangleMesh, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, TriangleMesh, name)
__repr__ = _swig_repr
def translate(self, t):
"""
Translates all the vertices by v=v+t.
Args:
t (:obj:`list of 3 floats`)
"""
return _robotsim.TriangleMesh_translate(self, t)
def transform(self, R, t):
"""
Transforms all the vertices by the rigid transform v=R*v+t.
Args:
R (:obj:`list of 9 floats (so3 element)`)
t (:obj:`list of 3 floats`)
"""
return _robotsim.TriangleMesh_transform(self, R, t)
__swig_setmethods__["indices"] = _robotsim.TriangleMesh_indices_set
__swig_getmethods__["indices"] = _robotsim.TriangleMesh_indices_get
if _newclass:
indices = _swig_property(_robotsim.TriangleMesh_indices_get, _robotsim.TriangleMesh_indices_set)
__swig_setmethods__["vertices"] = _robotsim.TriangleMesh_vertices_set
__swig_getmethods__["vertices"] = _robotsim.TriangleMesh_vertices_get
if _newclass:
vertices = _swig_property(_robotsim.TriangleMesh_vertices_get, _robotsim.TriangleMesh_vertices_set)
def __init__(self):
"""
A 3D indexed triangle mesh class.
Returns:
(:class:`~klampt.TriangleMesh`):
Attributes:
vertices (SWIG vector of floats): a list of vertices, given as
a flattened coordinate list [x1, y1, z1, x2, y2, ...]
indices (SWIG vector of ints): a list of triangle vertices given
as indices into the vertices list, i.e., [a1,b1,c2, a2,b2,c2, ...]
Note: because the bindings are generated by SWIG, you can access the indices /
vertices members via some automatically generated accessors / modifiers. In
particular len(), append(), and indexing via [] are useful. Some other methods
like resize() are also provided. However, you CANNOT set these items via
assignment.
Examples::
m = TriangleMesh()
m.vertices.append(0)
m.vertices.append(0)
m.vertices.append(0)
print len(m.vertices) #prints 3
m.vertices = [0,0,0] #this is an error
m.vertices += [1,2,3] #this is also an error
C++ includes: geometry.h
"""
this = _robotsim.new_TriangleMesh()
try:
self.this.append(this)
except Exception:
self.this = this
__swig_destroy__ = _robotsim.delete_TriangleMesh
__del__ = lambda self: None
TriangleMesh_swigregister = _robotsim.TriangleMesh_swigregister
TriangleMesh_swigregister(TriangleMesh)
class PointCloud(_object):
"""
A 3D point cloud class.
Attributes:
vertices (SWIG vector of floats): a list of vertices, given as a
list [x1, y1, z1, x2, y2, ... zn]
properties (SWIG vector of floats): a list of vertex properties,
given as a list [p11, p21, ..., pk1, p12, p22, ..., pk2, ...,
p1n, p2n, ..., pkn] where each vertex has k properties. The
name of each property is given by the ``propertyNames`` member.
propertyNames (SWIG vector of strs): a list of the names of each
property
settings (SWIG map of strs to strs): a general property map .
Note: because the bindings are generated by SWIG, you can access the
vertices/properties/propertyName members via some automatically generated
accessors / modifiers. In particular len(), append(), and indexing via [] are
useful. Some other methods like resize() are also provided. However, you CANNOT
set these items via assignment.
Properties are usually lowercase but follow PCL naming convention, and often
include:
* normal_x, normal_y, normal_z: the outward normal
* rgb, rgba: integer encoding of RGB (24 bit int) or RGBA color (32 bit int)
* opacity: opacity, in range [0,1]
* c: opacity, in range [0,255]
* r,g,b,a: color channels, in range [0,1]
* u,v: texture coordinate
Settings are usually lowercase but follow PCL naming convention, and often
include:
* version: version of the PCL file, typically "0.7"
* id: integer id
* viewpoint: "ox oy oz qw qx qy qz"
Examples::
pc = PointCloud()
pc.propertyNames.append('rgb')
#add 1 point with coordinates (0,0,0) and color #000000 (black)
pc.vertices.append(0)
pc.vertices.append(0)
pc.vertices.append(0)
pc.properties.append(0)
print len(pc.vertices) #prints 3
print pc.numPoints() #prints 1
#add another point with coordinates (1,2,3)
pc.addPoint([1,2,3])
#this prints 2
print pc.numPoints()
#this prints 2, because there is 1 property category x 2 points
print len(pc.properties.size())
#this prints 0; this is the default value added when addPoint is called
print pc.getProperty(1,0)
C++ includes: geometry.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, PointCloud, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, PointCloud, name)
__repr__ = _swig_repr
def numPoints(self):
"""
Returns the number of points.
Returns:
(int):
"""
return _robotsim.PointCloud_numPoints(self)
def numProperties(self):
"""
Returns the number of properties.
Returns:
(int):
"""
return _robotsim.PointCloud_numProperties(self)
def setPoints(self, num, plist):
"""
Sets all the points to the given list (a 3n-list)
Args:
num (int)
plist (:obj:`list of floats`)
"""
return _robotsim.PointCloud_setPoints(self, num, plist)
def addPoint(self, p):
"""
Adds a point. Sets all its properties to 0. Returns the index.
Args:
p (:obj:`list of 3 floats`)
Returns:
(int):
"""
return _robotsim.PointCloud_addPoint(self, p)
def setPoint(self, index, p):
"""
Sets the position of the point at the given index to p.
Args:
index (int)
p (:obj:`list of 3 floats`)
"""
return _robotsim.PointCloud_setPoint(self, index, | |
Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.468568,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 13.8133,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 2.83407e-06,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202691,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 1.51802e-05,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.499313,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.805374,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.406526,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.71121,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.571068,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.94083,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 2.86787e-06,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0209434,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.151449,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.15489,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.151452,
'Execution Unit/Register Files/Runtime Dynamic': 0.175833,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.319061,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.09463,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 3.58975,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00103549,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00103549,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000898276,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000345751,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.002225,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00519425,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0100579,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.148899,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.324101,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.505729,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96396,
'Instruction Fetch Unit/Runtime Dynamic': 0.993982,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0417221,
'L2/Runtime Dynamic': 0.0285064,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 7.96018,
'Load Store Unit/Data Cache/Runtime Dynamic': 3.26889,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.217507,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.217507,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 8.98729,
'Load Store Unit/Runtime Dynamic': 4.55906,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.536335,
'Load Store Unit/StoreQ/Runtime Dynamic': 1.07267,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.190347,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.190971,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0531408,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.883086,
'Memory Management Unit/Runtime Dynamic': 0.244111,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 27.4064,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 7.74196e-06,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0225277,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.266777,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': | |
<reponame>mycyzs/fta
# coding: utf-8
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
from datetime import datetime
from sqlalchemy import (Boolean, Column, Date, DateTime, Float, ForeignKey,
Index, Integer, SmallInteger, String)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
Base = declarative_base()
metadata = Base.metadata
class AuthGroup(Base):
__tablename__ = 'auth_group'
id = Column(Integer, primary_key=True)
name = Column(String(80), nullable=False, unique=True)
class AuthGroupPermission(Base):
__tablename__ = 'auth_group_permissions'
__table_args__ = (
Index('group_id', 'group_id', 'permission_id', unique=True),
)
id = Column(Integer, primary_key=True)
group_id = Column(ForeignKey(u'auth_group.id'), nullable=False, index=True)
permission_id = Column(
ForeignKey(u'auth_permission.id'), nullable=False, index=True)
group = relationship(u'AuthGroup')
permission = relationship(u'AuthPermission')
class AuthMessage(Base):
__tablename__ = 'auth_message'
id = Column(Integer, primary_key=True)
user_id = Column(ForeignKey(u'auth_user.id'), nullable=False, index=True)
message = Column(String, nullable=False)
user = relationship(u'AuthUser')
class AuthPermission(Base):
__tablename__ = 'auth_permission'
__table_args__ = (
Index('content_type_id', 'content_type_id', 'codename', unique=True),
)
id = Column(Integer, primary_key=True)
name = Column(String(50), nullable=False)
content_type_id = Column(
ForeignKey(u'django_content_type.id'), nullable=False, index=True)
codename = Column(String(100), nullable=False)
content_type = relationship(u'DjangoContentType')
class AuthUser(Base):
__tablename__ = 'auth_user'
id = Column(Integer, primary_key=True)
username = Column(String(30), nullable=False, unique=True)
first_name = Column(String(30), nullable=False)
last_name = Column(String(30), nullable=False)
email = Column(String(75), nullable=False)
password = Column(String(128), nullable=False)
is_staff = Column(Integer, nullable=False)
is_active = Column(Integer, nullable=False)
is_superuser = Column(Integer, nullable=False)
last_login = Column(DateTime, nullable=False)
date_joined = Column(DateTime, nullable=False)
class AuthUserGroup(Base):
__tablename__ = 'auth_user_groups'
__table_args__ = (
Index('user_id', 'user_id', 'group_id', unique=True),
)
id = Column(Integer, primary_key=True)
user_id = Column(ForeignKey(u'auth_user.id'), nullable=False, index=True)
group_id = Column(ForeignKey(u'auth_group.id'), nullable=False, index=True)
group = relationship(u'AuthGroup')
user = relationship(u'AuthUser')
class AuthUserUserPermission(Base):
__tablename__ = 'auth_user_user_permissions'
__table_args__ = (
Index('user_id', 'user_id', 'permission_id', unique=True),
)
id = Column(Integer, primary_key=True)
user_id = Column(ForeignKey(u'auth_user.id'), nullable=False, index=True)
permission_id = Column(
ForeignKey(u'auth_permission.id'), nullable=False, index=True)
permission = relationship(u'AuthPermission')
user = relationship(u'AuthUser')
class CeleryTaskmeta(Base):
__tablename__ = 'celery_taskmeta'
id = Column(Integer, primary_key=True)
task_id = Column(String(255), nullable=False, unique=True)
status = Column(String(50), nullable=False)
result = Column(String)
date_done = Column(DateTime, nullable=False)
traceback = Column(String)
hidden = Column(Integer, nullable=False, index=True)
meta = Column(String)
class CeleryTasksetmeta(Base):
__tablename__ = 'celery_tasksetmeta'
id = Column(Integer, primary_key=True)
taskset_id = Column(String(255), nullable=False, unique=True)
result = Column(String, nullable=False)
date_done = Column(DateTime, nullable=False)
hidden = Column(Integer, nullable=False, index=True)
class DjangoAdminLog(Base):
__tablename__ = 'django_admin_log'
id = Column(Integer, primary_key=True)
action_time = Column(DateTime, nullable=False)
user_id = Column(ForeignKey(u'auth_user.id'), nullable=False, index=True)
content_type_id = Column(ForeignKey(u'django_content_type.id'), index=True)
object_id = Column(String)
object_repr = Column(String(200), nullable=False)
action_flag = Column(SmallInteger, nullable=False)
change_message = Column(String, nullable=False)
content_type = relationship(u'DjangoContentType')
user = relationship(u'AuthUser')
class DjangoCache(Base):
__tablename__ = 'django_cache'
cache_key = Column(String(255), primary_key=True)
value = Column(String, nullable=False)
expires = Column(DateTime, nullable=False, index=True)
class DjangoContentType(Base):
__tablename__ = 'django_content_type'
__table_args__ = (
Index('app_label', 'app_label', 'model', unique=True),
)
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
app_label = Column(String(100), nullable=False)
model = Column(String(100), nullable=False)
class DjangoSession(Base):
__tablename__ = 'django_session'
session_key = Column(String(40), primary_key=True)
session_data = Column(String, nullable=False)
expire_date = Column(DateTime, nullable=False, index=True)
class DjangoSite(Base):
__tablename__ = 'django_site'
id = Column(Integer, primary_key=True)
domain = Column(String(100), nullable=False)
name = Column(String(50), nullable=False)
class DjceleryCrontabschedule(Base):
__tablename__ = 'djcelery_crontabschedule'
id = Column(Integer, primary_key=True)
minute = Column(String(64), nullable=False)
hour = Column(String(64), nullable=False)
day_of_week = Column(String(64), nullable=False)
day_of_month = Column(String(64), nullable=False)
month_of_year = Column(String(64), nullable=False)
class DjceleryIntervalschedule(Base):
__tablename__ = 'djcelery_intervalschedule'
id = Column(Integer, primary_key=True)
every = Column(Integer, nullable=False)
period = Column(String(24), nullable=False)
class DjceleryPeriodictask(Base):
__tablename__ = 'djcelery_periodictask'
id = Column(Integer, primary_key=True)
name = Column(String(200), nullable=False, unique=True)
task = Column(String(200), nullable=False)
interval_id = Column(
ForeignKey(u'djcelery_intervalschedule.id'), index=True)
crontab_id = Column(ForeignKey(u'djcelery_crontabschedule.id'), index=True)
args = Column(String, nullable=False)
kwargs = Column(String, nullable=False)
queue = Column(String(200))
exchange = Column(String(200))
routing_key = Column(String(200))
expires = Column(DateTime)
enabled = Column(Integer, nullable=False)
last_run_at = Column(DateTime)
total_run_count = Column(Integer, nullable=False)
date_changed = Column(DateTime, nullable=False)
description = Column(String, nullable=False)
crontab = relationship(u'DjceleryCrontabschedule')
interval = relationship(u'DjceleryIntervalschedule')
class DjceleryPeriodictasks(Base):
__tablename__ = 'djcelery_periodictasks'
ident = Column(SmallInteger, primary_key=True)
last_update = Column(DateTime, nullable=False)
class DjceleryTaskstate(Base):
__tablename__ = 'djcelery_taskstate'
id = Column(Integer, primary_key=True)
state = Column(String(64), nullable=False, index=True)
task_id = Column(String(36), nullable=False, unique=True)
name = Column(String(200), index=True)
tstamp = Column(DateTime, nullable=False, index=True)
args = Column(String)
kwargs = Column(String)
eta = Column(DateTime)
expires = Column(DateTime)
result = Column(String)
traceback = Column(String)
runtime = Column(Float(asdecimal=True))
retries = Column(Integer, nullable=False)
worker_id = Column(ForeignKey(u'djcelery_workerstate.id'), index=True)
hidden = Column(Integer, nullable=False, index=True)
worker = relationship(u'DjceleryWorkerstate')
class DjceleryWorkerstate(Base):
__tablename__ = 'djcelery_workerstate'
id = Column(Integer, primary_key=True)
hostname = Column(String(255), nullable=False, unique=True)
last_heartbeat = Column(DateTime, index=True)
class FtaSolutionsAppAdvice(Base):
__tablename__ = 'fta_solutions_app_advice'
id = Column(Integer, primary_key=True)
advice_def_id = Column(Integer, nullable=False, index=True)
cc_biz_id = Column(Integer, nullable=False)
subject = Column(String(128), nullable=False)
alarm_num = Column(Integer, nullable=False)
alarm_start_time = Column(Date, nullable=False)
alarm_end_time = Column(Date, nullable=False)
status = Column(String(32), nullable=False)
comment = Column(String)
create_time = Column(DateTime, nullable=False, index=True)
operator = Column(String(128))
modify_time = Column(DateTime)
advice_fta_def_id = Column(Integer, nullable=True, default=0)
alarminstance_id = Column(Integer, nullable=True, default=0)
offline_handle = Column(String(32), default='no')
class FtaSolutionsAppAdvicedef(Base):
__tablename__ = 'fta_solutions_app_advicedef'
id = Column(Integer, primary_key=True)
codename = Column(String(128), nullable=False)
description = Column(String, nullable=False)
is_enabled = Column(Integer, nullable=False)
cc_biz_id = Column(Integer, nullable=False)
subject_type = Column(String(64), nullable=False)
check_type = Column(String(64), nullable=False)
check_sub_type = Column(String(128), nullable=False)
interval = Column(Integer, nullable=False)
threshold = Column(Integer, nullable=False)
advice_type = Column(String(64), nullable=False)
advice = Column(String, nullable=False)
create_time = Column(DateTime, nullable=False)
class FtaSolutionsAppAlarmdef(Base):
__tablename__ = 'fta_solutions_app_alarmdef'
id = Column(Integer, primary_key=True)
is_enabled = Column(Integer, nullable=False)
is_deleted = Column(Integer, nullable=False)
category = Column(String(32), nullable=False)
cc_biz_id = Column(Integer, nullable=False, index=True)
alarm_type = Column(String(128), nullable=False)
tnm_attr_id = Column(String)
reg = Column(String(255))
process = Column(String(255))
module = Column(String, nullable=False)
topo_set = Column(String, nullable=False)
set_attr = Column(String, nullable=False)
idc = Column(String, nullable=False)
device_class = Column(String, nullable=False)
responsible = Column(String(255))
title = Column(String(128))
description = Column(String)
ok_notify = Column(Integer, nullable=False)
notify = Column(String, nullable=False)
solution_id = Column(Integer, index=True)
timeout = Column(Integer, nullable=False)
source_type = Column(String(32))
alarm_attr_id = Column(String(128))
module_names = Column(String)
set_names = Column(String)
class FtaSolutionsAppAlarmType(Base):
__tablename__ = 'fta_solutions_app_alarmtype'
id = Column(Integer, primary_key=True)
is_enabled = Column(Boolean, default=True)
is_hidden = Column(Boolean, default=True)
cc_biz_id = Column(Integer, nullable=False, index=True)
source_type = Column(String(128), nullable=False, index=True)
alarm_type = Column(String(128), nullable=False)
pattern = Column(String(128), nullable=False)
description = Column(String)
exclude = Column(String, default='')
match_mode = Column(Integer, default=0)
class FtaSolutionsAppAlarminstance(Base):
__tablename__ = 'fta_solutions_app_alarminstance'
id = Column(Integer, primary_key=True)
alarm_def_id = Column(Integer, nullable=False, index=True)
source_type = Column(String(32))
source_id = Column(String(255), index=True)
event_id = Column(String(255), unique=True)
ip = Column(String(30))
raw = Column(String, nullable=False)
status = Column(String(30), index=True)
failure_type = Column(String(30))
tnm_alarm = Column(String)
tnm_alarm_id = Column(String(30), unique=True)
inc_alarm_id = Column(String(30))
uwork_id = Column(String(30))
bpm_task_id = Column(String(30))
comment = Column(String)
source_time = Column(DateTime, index=True)
begin_time = Column(DateTime, nullable=False)
end_time = Column(DateTime)
priority = Column(Integer, nullable=False, index=True, default=1)
cc_biz_id = Column(Integer, nullable=False, index=True)
alarm_type = Column(String(128), nullable=False, index=True)
solution_type = Column(String(128), index=True)
snap_alarm_def = Column(String)
snap_solution = Column(String)
cc_topo_set = Column(String(128), nullable=False, index=True)
cc_app_module = Column(String(128), nullable=False, index=True)
origin_alarm = Column(String)
approved_user = Column(String(128))
approved_time = Column(DateTime)
approved_comment = Column(String(128))
level = Column(Integer, nullable=False, index=True, default=1)
finish_time = Column(DateTime)
class FtaSolutionsAppAlarminstanceBackup(Base):
__tablename__ = 'fta_solutions_app_alarminstancebackup'
id = Column(Integer, primary_key=True)
alarm_def_id = Column(Integer, nullable=False, index=True)
source_type = Column(String(32))
source_id = Column(String(255), index=True)
event_id = Column(String(255), unique=True)
ip = Column(String(30))
raw = Column(String, nullable=False)
status = Column(String(30), index=True)
failure_type = Column(String(30))
tnm_alarm = Column(String)
tnm_alarm_id = Column(String(30), unique=True)
inc_alarm_id = Column(String(30))
uwork_id = Column(String(30))
bpm_task_id = Column(String(30))
comment = Column(String)
source_time = Column(DateTime, index=True)
begin_time = Column(DateTime, nullable=False)
end_time = Column(DateTime)
priority = Column(Integer, nullable=False, index=True)
cc_biz_id = Column(Integer, nullable=False, index=True)
alarm_type = Column(String(128), nullable=False, index=True)
solution_type = Column(String(128), index=True)
snap_alarm_def = Column(String)
snap_solution = Column(String)
cc_topo_set = Column(String(128), nullable=False, index=True)
cc_app_module = Column(String(128), nullable=False, index=True)
origin_alarm = Column(String)
approved_user = Column(String(128))
approved_time = Column(DateTime)
approved_comment = Column(String(128))
level = Column(Integer, nullable=False, index=True)
finish_time = Column(DateTime)
class FtaSolutionsAppAlarminstancearchive(Base):
__tablename__ = 'fta_solutions_app_alarminstancearchive'
__table_args__ = (
Index('fta_solutions_app_alarminstancearchi_date_78c35a10ad17e6e8_uniq', 'date', 'cc_biz_id', 'biz_team',
'is_success', 'alarm_type', 'failure_type', 'solution_type', 'source_type', 'is_off_time', unique=True),
)
id = Column(Integer, primary_key=True)
date = Column(Date, nullable=False, index=True)
cc_biz_id = Column(SmallInteger, nullable=False, index=True)
biz_team = Column(String(128), nullable=False, index=True)
is_success | |
in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_mixin6" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_mixin6`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/projects/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_token(self, project, role, iat, **kwargs): # noqa: E501
"""Delete a new project token. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_token(project, role, iat, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project: (required)
:param str role: (required)
:param str iat: (required)
:param str id:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_token_with_http_info(project, role, iat, **kwargs) # noqa: E501
def delete_token_with_http_info(self, project, role, iat, **kwargs): # noqa: E501
"""Delete a new project token. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_token_with_http_info(project, role, iat, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str project: (required)
:param str role: (required)
:param str iat: (required)
:param str id:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(object, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'project',
'role',
'iat',
'id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_token" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'project' is set
if self.api_client.client_side_validation and ('project' not in local_var_params or # noqa: E501
local_var_params['project'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `project` when calling `delete_token`") # noqa: E501
# verify the required parameter 'role' is set
if self.api_client.client_side_validation and ('role' not in local_var_params or # noqa: E501
local_var_params['role'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `role` when calling `delete_token`") # noqa: E501
# verify the required parameter 'iat' is set
if self.api_client.client_side_validation and ('iat' not in local_var_params or # noqa: E501
local_var_params['iat'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `iat` when calling `delete_token`") # noqa: E501
collection_formats = {}
path_params = {}
if 'project' in local_var_params:
path_params['project'] = local_var_params['project'] # noqa: E501
if 'role' in local_var_params:
path_params['role'] = local_var_params['role'] # noqa: E501
if 'iat' in local_var_params:
path_params['iat'] = local_var_params['iat'] # noqa: E501
query_params = []
if 'id' in local_var_params and local_var_params['id'] is not None: # noqa: E501
query_params.append(('id', local_var_params['id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/projects/{project}/roles/{role}/token/{iat}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_mixin6(self, name, **kwargs): # noqa: E501
"""Get returns a project by name # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_mixin6(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1AppProject
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_mixin6_with_http_info(name, **kwargs) # noqa: E501
def get_mixin6_with_http_info(self, name, **kwargs): # noqa: E501
"""Get returns a project by name # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_mixin6_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1AppProject, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_mixin6" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `get_mixin6`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/projects/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1AppProject', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_sync_windows_state(self, name, **kwargs): # noqa: E501
"""GetSchedulesState returns true if there are any active sync syncWindows # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sync_windows_state(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ProjectSyncWindowsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_sync_windows_state_with_http_info(name, **kwargs) # noqa: E501
def get_sync_windows_state_with_http_info(self, name, **kwargs): # noqa: E501
"""GetSchedulesState returns true if there are any active sync syncWindows # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sync_windows_state_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, | |
core.internals only has to worry
# about 2D cases.
df = self.to_frame()
result = df.quantile(q=q, interpolation=interpolation, numeric_only=False)
if result.ndim == 2:
result = result.iloc[:, 0]
if is_list_like(q):
result.name = self.name
return self._constructor(result, index=Float64Index(q), name=self.name)
else:
# scalar
return result.iloc[0]
def corr(self, other, method="pearson", min_periods=None) -> float:
"""
Compute correlation with `other` Series, excluding missing values.
Parameters
----------
other : Series
Series with which to compute the correlation.
method : {'pearson', 'kendall', 'spearman'} or callable
Method used to compute correlation:
- pearson : Standard correlation coefficient
- kendall : Kendall Tau correlation coefficient
- spearman : Spearman rank correlation
- callable: Callable with input two 1d ndarrays and returning a float.
.. versionadded:: 0.24.0
Note that the returned matrix from corr will have 1 along the
diagonals and will be symmetric regardless of the callable's
behavior.
min_periods : int, optional
Minimum number of observations needed to have a valid result.
Returns
-------
float
Correlation with other.
See Also
--------
DataFrame.corr : Compute pairwise correlation between columns.
DataFrame.corrwith : Compute pairwise correlation with another
DataFrame or Series.
Examples
--------
>>> def histogram_intersection(a, b):
... v = np.minimum(a, b).sum().round(decimals=1)
... return v
>>> s1 = pd.Series([.2, .0, .6, .2])
>>> s2 = pd.Series([.3, .6, .0, .1])
>>> s1.corr(s2, method=histogram_intersection)
0.3
"""
this, other = self.align(other, join="inner", copy=False)
if len(this) == 0:
return np.nan
if method in ["pearson", "spearman", "kendall"] or callable(method):
return nanops.nancorr(
this.values, other.values, method=method, min_periods=min_periods
)
raise ValueError(
"method must be either 'pearson', "
"'spearman', 'kendall', or a callable, "
f"'{method}' was supplied"
)
def cov(
self,
other: Series,
min_periods: int | None = None,
ddof: int | None = 1,
) -> float:
"""
Compute covariance with Series, excluding missing values.
Parameters
----------
other : Series
Series with which to compute the covariance.
min_periods : int, optional
Minimum number of observations needed to have a valid result.
ddof : int, default 1
Delta degrees of freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
.. versionadded:: 1.1.0
Returns
-------
float
Covariance between Series and other normalized by N-1
(unbiased estimator).
See Also
--------
DataFrame.cov : Compute pairwise covariance of columns.
Examples
--------
>>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035])
>>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198])
>>> s1.cov(s2)
-0.01685762652715874
"""
this, other = self.align(other, join="inner", copy=False)
if len(this) == 0:
return np.nan
return nanops.nancov(
this.values, other.values, min_periods=min_periods, ddof=ddof
)
@doc(
klass="Series",
extra_params="",
other_klass="DataFrame",
examples=dedent(
"""
Difference with previous row
>>> s = pd.Series([1, 1, 2, 3, 5, 8])
>>> s.diff()
0 NaN
1 0.0
2 1.0
3 1.0
4 2.0
5 3.0
dtype: float64
Difference with 3rd previous row
>>> s.diff(periods=3)
0 NaN
1 NaN
2 NaN
3 2.0
4 4.0
5 6.0
dtype: float64
Difference with following row
>>> s.diff(periods=-1)
0 0.0
1 -1.0
2 -1.0
3 -2.0
4 -3.0
5 NaN
dtype: float64
Overflow in input dtype
>>> s = pd.Series([1, 0], dtype=np.uint8)
>>> s.diff()
0 NaN
1 255.0
dtype: float64"""
),
)
def diff(self, periods: int = 1) -> Series:
"""
First discrete difference of element.
Calculates the difference of a {klass} element compared with another
element in the {klass} (default is element in previous row).
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative
values.
{extra_params}
Returns
-------
{klass}
First differences of the Series.
See Also
--------
{klass}.pct_change: Percent change over given number of periods.
{klass}.shift: Shift index by desired number of periods with an
optional time freq.
{other_klass}.diff: First discrete difference of object.
Notes
-----
For boolean dtypes, this uses :meth:`operator.xor` rather than
:meth:`operator.sub`.
The result is calculated according to current dtype in {klass},
however dtype of the result is always float64.
Examples
--------
{examples}
"""
result = algorithms.diff(self._values, periods)
return self._constructor(result, index=self.index).__finalize__(
self, method="diff"
)
def autocorr(self, lag=1) -> float:
"""
Compute the lag-N autocorrelation.
This method computes the Pearson correlation between
the Series and its shifted self.
Parameters
----------
lag : int, default 1
Number of lags to apply before performing autocorrelation.
Returns
-------
float
The Pearson correlation between self and self.shift(lag).
See Also
--------
Series.corr : Compute the correlation between two Series.
Series.shift : Shift index by desired number of periods.
DataFrame.corr : Compute pairwise correlation of columns.
DataFrame.corrwith : Compute pairwise correlation between rows or
columns of two DataFrame objects.
Notes
-----
If the Pearson correlation is not well defined return 'NaN'.
Examples
--------
>>> s = pd.Series([0.25, 0.5, 0.2, -0.05])
>>> s.autocorr() # doctest: +ELLIPSIS
0.10355...
>>> s.autocorr(lag=2) # doctest: +ELLIPSIS
-0.99999...
If the Pearson correlation is not well defined, then 'NaN' is returned.
>>> s = pd.Series([1, 0, 0, 0])
>>> s.autocorr()
nan
"""
return self.corr(self.shift(lag))
def dot(self, other):
"""
Compute the dot product between the Series and the columns of other.
This method computes the dot product between the Series and another
one, or the Series and each columns of a DataFrame, or the Series and
each columns of an array.
It can also be called using `self @ other` in Python >= 3.5.
Parameters
----------
other : Series, DataFrame or array-like
The other object to compute the dot product with its columns.
Returns
-------
scalar, Series or numpy.ndarray
Return the dot product of the Series and other if other is a
Series, the Series of the dot product of Series and each rows of
other if other is a DataFrame or a numpy.ndarray between the Series
and each columns of the numpy array.
See Also
--------
DataFrame.dot: Compute the matrix product with the DataFrame.
Series.mul: Multiplication of series and other, element-wise.
Notes
-----
The Series and other has to share the same index if other is a Series
or a DataFrame.
Examples
--------
>>> s = pd.Series([0, 1, 2, 3])
>>> other = pd.Series([-1, 2, -3, 4])
>>> s.dot(other)
8
>>> s @ other
8
>>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]])
>>> s.dot(df)
0 24
1 14
dtype: int64
>>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]])
>>> s.dot(arr)
array([24, 14])
"""
if isinstance(other, (Series, ABCDataFrame)):
common = self.index.union(other.index)
if len(common) > len(self.index) or len(common) > len(other.index):
raise ValueError("matrices are not aligned")
left = self.reindex(index=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[0] != rvals.shape[0]:
raise Exception(
f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}"
)
if isinstance(other, ABCDataFrame):
return self._constructor(
np.dot(lvals, rvals), index=other.columns
).__finalize__(self, method="dot")
elif isinstance(other, Series):
return np.dot(lvals, rvals)
elif isinstance(rvals, np.ndarray):
return np.dot(lvals, rvals)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
def __matmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(other)
def __rmatmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(np.transpose(other))
@doc(base.IndexOpsMixin.searchsorted, klass="Series")
def searchsorted(self, value, side="left", sorter=None) -> np.ndarray:
return algorithms.searchsorted(self._values, value, side=side, sorter=sorter)
# -------------------------------------------------------------------
# Combination
def append(
self, to_append, ignore_index: bool = False, verify_integrity: bool = False
):
"""
Concatenate two or more Series.
Parameters
----------
to_append : Series or list/tuple of Series
Series to append with self.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
verify_integrity : bool, default False
If True, raise Exception on creating index with duplicates.
Returns
-------
Series
Concatenated Series.
See Also
--------
concat : General function to concatenate DataFrame or Series objects.
Notes
-----
Iteratively appending to a Series can be more computationally intensive
than a single concatenate. A better solution is to append values to a
list and then concatenate the list with the original Series all at
once.
Examples
--------
>>> s1 = pd.Series([1, 2, 3])
>>> s2 = pd.Series([4, 5, 6])
>>> s3 = pd.Series([4, 5, 6], index=[3, 4, 5])
>>> s1.append(s2)
0 1
1 2
| |
# Copyright (C) 2018-2021
# Author: <NAME>
# Contact: <EMAIL>
"""Tag Functions.
The following functions give you access to interact with Ignition Tags.
"""
from __future__ import print_function
__all__ = [
"browse",
"browseHistoricalTags",
"browseTags",
"browseTagsSimple",
"configure",
"copy",
"deleteAnnotations",
"deleteTags",
"exists",
"exportTags",
"getConfiguration",
"importTags",
"isOverlaysEnabled",
"move",
"queryAnnotations",
"queryTagCalculations",
"queryTagDensity",
"queryTagHistory",
"read",
"readAll",
"readAsync",
"readBlocking",
"rename",
"requestGroupExecution",
"setOverlaysEnabled",
"storeAnnotations",
"storeTagHistory",
"write",
"writeAll",
"writeAsync",
"writeBlocking",
]
import warnings
import system.date
from java.lang import Object
from java.util import Date
class BrowseTag(Object):
"""BrowseTag class."""
def __init__(
self,
name=None,
path=None,
fullPath=None,
type=None,
valueSource=None,
dataType=None,
):
self.name = name
self.path = path
self.fullPath = fullPath
self.type = type
self.valueSource = valueSource
self.dataType = dataType
def getDataType(self):
return self.dataType
def getFullPath(self):
return self.fullPath
def getPath(self):
return self.path
def getTagType(self):
return self.type
def getValueSource(self):
return self.valueSource
def isDB(self):
print(self)
return True
def isExpression(self):
print(self)
return True
def isFolder(self):
print(self)
return True
def isMemory(self):
print(self)
return True
def isOPC(self):
print(self)
return True
def isQuery(self):
print(self)
return True
def isUDT(self):
print(self)
return True
class QualifiedValue(object):
"""Represents a value with a DataQuality & timestamp attached to
it.
"""
def __init__(self, value=None, quality=None, timestamp=None):
self._value = value
self._quality = quality
self._timestamp = timestamp
@property
def value(self):
return str(self._value)
def derive(self, diagnosticMessage):
pass
def equals(self, value, includeTimestamp):
pass
def getQuality(self):
pass
def getTimestamp(self):
pass
def getValue(self):
pass
class QualityCode(Object):
"""QualityCode contains a 32-bit integer code and optionally a
diagnostic string.
"""
def getCode(self):
pass
def getCodeName(self, arg):
pass
def getCodesJson(self):
pass
def getDiagnosticMessage(self):
pass
def getLevel(self, arg):
pass
def getName(self):
pass
def getQualityFor(self, arg):
pass
def isBad(self):
print(self)
return False
def isBadOrError(self):
print(self)
return False
def isError(self):
print(self)
return False
def isGood(self):
print(self)
return True
def isNot(self, arg):
print(self, arg)
return True
def isNotGood(self):
print(self)
return False
def isUncertain(self):
print(self)
return False
def toValue(self):
pass
@staticmethod
def values(self):
pass
@staticmethod
def worstOf(a, b):
pass
@staticmethod
def worstOfAll(*args):
pass
class Results(Object):
"""The results of a browse operation.
May only represent a partial result set, which can be determined by
comparing the Total Available Size to the Returned Size. If there is
a mismatch, the continuation point should be non-null and can be
used in constructing the subsequent BrowseFilter to continue the
browse.
"""
def error(self, result):
pass
def getContinuationPoint(self):
pass
def getResultQuality(self):
pass
def getResults(self):
pass
def getReturnedSize(self):
pass
def getTotalAvailableSize(self):
pass
def of(self, arg):
pass
def setContinuationPoint(self, continuationPoint):
pass
def setResultQuality(self, value):
pass
def setResults(self, results):
pass
def setTotalAvailableResults(self, totalAvailableResults):
pass
def browse(path, filter=None):
"""Returns a list of tags found at the specified tag path.
The list objects are returned as dictionaries with some basic
information about each tag.
Args:
path (str): The path that will be browsed, typically to a folder
or UDT instance.
filter (dict): A dictionary of browse filter parameters.
Parameters include name (with wildcards), dataType,
valueSource, tagType, typeId, quality (specify Good or Bad),
and maxResults. The aforementioned wildcard character is the
* character. Again, the wildcard is only used with the Name
parameter.
Returns:
Results: A Results object which contains a list of tag
dictionaries, one for each tag found during the browse. Use
.getResults() on the results object to get the list of tag
dictionaries, or .getReturnedSize() to the the number of
items in results. Refer to the list of tagBrowse objects.
"""
print(path, filter)
return Results()
def browseHistoricalTags(
path, nameFilters=None, maxSize=None, continuationPoint=None
):
"""Will browse for any historical Tags at the provided historical
path.
It will only browse for Tags at the path, and will not go down
through any children. Will return with a BrowseResults object.
Args:
path (str): The Historical Tag Path to browse. See the Tag
Export page for a description of how to construct a
historical Tag Path.
nameFilters (list[str]): A list of name filters to be applied to
the result set. Optional.
maxSize (int): The maximum size of the result set. Optional.
continuationPoint (object): Sets the continuation point in order
to continue a browse that was previously started and then
limited. Use .getContinuationPoint() on the BrowseResults
object to get the continuation point. Optional.
Returns:
Results: A Results object which contains a list of Tag
dictionaries, one for each Tag found during the browse.
"""
print(path, nameFilters, maxSize, continuationPoint)
return Results()
def browseTags(
parentPath,
tagPath=None,
tagType=None,
dataType=None,
udtParentType=None,
recursive=False,
sort="ASC",
):
"""Returns an array of tags from a specific folder.
The function supports filtering and recursion. Leave filters blank
to return all tags.
If called in the gateway scope, a Tag Provider must be specified.
Args:
parentPath (str): The parent folder path. Leave blank for the
root folder. Note: you can specify the tag provider name in
square brackets at the beginning of the parentPath string.
Example: "[myTagProvider]MyTagsFolder". If the tag provider
name is left off then the project default provider will be
used.
tagPath (str): Filters on a tag path. Use * as a wildcard for
any number of characters and a ? for a single character.
Optional.
tagType (str): Filters on a tag type. Possible values are OPC,
MEMORY, DB, QUERY, Folder, DERIVED and UDT_INST. Optional.
dataType (str): The data type of the tag. Not used for UDT
instances or folders. Possible values are Int1, Int2, Int4,
Int8, Float4, Float8, Boolean, String, and DateTime.
Optional.
udtParentType (str): The name of the parent UDT. Optional.
recursive (bool): Recursively search for tags inside of folders.
Note: It is highly recommended that recursive is set to
False, as server timeouts are more likely to occur.
Optional.
sort (str): Sets the sort order, possible values are ASC and
DESC. Sorting is done on the full path of the tag. Optional.
Returns:
list[BrowseTag]: An array of BrowseTag. BrowseTag has the
following variables: name, path, fullPath, type, dataType,
and the following functions: isFolder(), isUDT(), isOPC(),
isMemory(), isExpression(), isQuery().
"""
warnings.warn(
"browseTags is deprecated, use browse instead.", DeprecationWarning
)
print(
parentPath,
tagPath,
tagType,
dataType,
udtParentType,
recursive,
sort,
)
return [BrowseTag()]
def browseTagsSimple(parentPath, sort):
"""Returns a sorted array of tags from a specific folder.
Args:
parentPath (str): The parent folder path. Leave blank for the
root folder. Note: you can specify the tag provider name in
square brackets at the beginning of the parentPath string.
Example: "[myTagProvider]MyTagsFolder". If the tag provider
name is left off then the project default provider will be
used.
sort (str): Sets the sort order, possible values are ASC and
DESC.
Returns:
list[BrowseTag]: An array of BrowseTag. BrowseTag has the
following variables: name, path, fullPath, type, dataType,
and the following functions: isFolder(), isUDT(), isOPC(),
isMemory(), isExpression(), isQuery().
"""
warnings.warn(
"browseTagsSimple is deprecated, use browse instead.",
DeprecationWarning,
)
print(parentPath, sort)
return [BrowseTag()]
def configure(basePath, tags, collisionPolicy="o"):
"""Creates Tags from a given list of Python dictionaries or from a
JSON source string.
Can be used to overwrite a current Tag's configuration.
When utilizing this function, the tag definitions must specify the
names of properties with their scripting/JSON name. A reference of
these properties can be found on the Tag Properties and Tag Alarm
Properties pages.
Args:
basePath (str): The starting point where the new Tags will be
created. When making changes to existing tags with this
function, you want to set the path to the parent folder of
the exiting tag(s), not the tag(s) themselves.
tags (list[object]): A list of Tag definitions, where each Tag
definition is a Python dictionary. Alternately, a JSON
source string may be passed to this parameter. When editing
existing tags, it is generally easier to retrieve the tag
configurations with system.tag.getConfiguration, modify the
results of the getConfiguration call, and then write the new
configuration to the parent folder of the existing tag(s).
collisionPolicy (str): The action to take when a tag or folder
with the same path and name is encountered. Possible values
include:
a - Abort and throw an exception
o - Overwrite and replace existing Tag's configuration
i - Ignore that item in the list
m - Merge, modifying values that are specified in the
definition, without impacting values that aren't defined
in the | |
config.MODEL_DIR + 'ft_{0}_sg_300_neg5_it2.model'.format(config.MODEL_NAME)
self.ft_model = self.load_ft_model(self.ft_fname)
def load_ft_model(self, fname):
"""
class FastText(sentences=None, sg=0, hs=0, size=100, alpha=0.025, window=5, min_count=5, max_vocab_size=None,
word_ngrams=1, sample=0.001, seed=1, workers=3, min_alpha=0.0001, negative=5, cbow_mean=1, hashfxn=hash, iter=5,
null_word=0, min_n=3, max_n=6, sorted_vocab=1, bucket=2000000, trim_rule=None, batch_words=MAX_WORDS_IN_BATCH)
min_n : int
Min length of char ngrams to be used for training word representations.
max_n : int
Max length of char ngrams to be used for training word representations.
Set max_n to be lesser than min_n to avoid char ngrams being used.
word_ngrams : int {1,0}
If 1, uses enriches word vectors with subword(ngrams) information. If 0, this is equivalent to word2vec.
bucket : int
Character ngrams are hashed into a fixed number of buckets, in order to limit the memory usage of the model.
This option specifies the number of buckets used by the model.
"""
print('Loading Fasttext Model... in {0:.2f} seconds'.format(time.time() - start_time))
try:
fasttext_model = FastText.load(fname)
print(fasttext_model)
except IOError:
print('No existed model. Training Ft Model... in {0:.2f} seconds'.format(time.time() - start_time))
texts = config.WikiCorpus()
fasttext_model = FastText(texts, **DEFAULT_ARGUMENTS_FT)
fasttext_model.save(fname)
print('Success to load Fasttext Model... in {0:.2f} seconds'.format(time.time() - start_time))
return fasttext_model
def test(self):
self.ft_model.wv.accuracy(SOURCE_DIR + 'questions-words.txt')
similarities = self.ft_model.wv.evaluate_word_pairs(datapath('wordsim353.tsv'))
# print(similarities)
class MyModel(object):
def __init__(self, threshold=None, space_order=[1, 1]):
"""
:param is_selected_gender_vocab: 'True' means selected_gender_vocab is prepared.
:param remove_oov: remove words not in w2v.model vocab.
"""
# embedding models
self.my_fname = config.MITIGATED_EMBEDDING_NAME
self.my_model = self.load_w2v_model(self.my_fname)
self.init_modulate = np.shape(self.my_model.syn0)[1]
self._modulate_vector_linalg(dim=1, dim2=1)
self.threshold = threshold
self.space_order = space_order
self.modulated_number = 0
def load_w2v_model(self, fname, arranged_savfile=True):
try:
print('Loading My Model... in {0:.2f} seconds'.format(time.time() - start_time))
if not arranged_savfile:
w2v_model = gensim.models.KeyedVectors.load(fname)
wi = {w: i for i, w in enumerate(w2v_model.index2word)}
w2v_model.vocab = {word: config.Vocab(count=count, index=wi[word]) for word, count in w2v_model.vocab.items()}
w2v_model.save_word2vec_format(fname, binary=False)
my_model = word2vec.Word2VecKeyedVectors.load_word2vec_format(fname, binary=False)
#my_model = word2vec.Word2Vec.load(fname + 'w2vf')
print(my_model)
except IOError:
print('No existed model. Training My Model... in {0:.2f} seconds'.format(time.time() - start_time))
print("constructing")
exit()
print('Success to load My Model... in {0:.2f} seconds'.format(time.time() - start_time))
return my_model
def _modulate_vector_linalg(self, dim=1, dim2=1):
self.my_model.syn0[:, :dim + dim2] = self.my_model.syn0[:, :dim + dim2] / self.init_modulate
def modulate_sentiment(self, dim=1, dim2=1, intensity=1):
assert len(self.space_order) < 3, "please set space_order with type 'list' (e.g. [1, 1])."
if self.threshold and self.space_order[1] == 1: # modulate sentiment only for entity words
self.my_model.syn0[:, :dim] = np.multiply(self.my_model.syn0[:, :dim],
np.where(self.my_model.syn0[:, dim:dim + dim2] >= (self.threshold / self.init_modulate),
intensity, 1))
elif self.threshold and self.space_order[1] == -1: # modulate sentiment only for entity words
self.my_model.syn0[:, :dim] = np.multiply(self.my_model.syn0[:, :dim],
np.where(self.my_model.syn0[:, dim:dim + dim2] <= -(self.threshold / self.init_modulate),
intensity, 1))
else: # modulate sentiment for entire words
self.my_model.syn0[:, :dim] = self.my_model.syn0[:, :dim] * intensity
self.my_model.syn0norm = (self.my_model.syn0 / np.sqrt((self.my_model.syn0 ** 2).sum(-1))[..., np.newaxis]).astype(float)
self.modulated_number += intensity*1
# self.my_model.init_sims(replace=True)
# it makes syn0 and vectors to be also normalized (same as syn0norm and vectors_norm)
def modulate_all(self, dim=1, dim2=1, intensity=1):
if intensity < 1:
assert len(self.space_order) < 3, "please set space_order with type 'list' (e.g. [1, 1])."
self.my_model.syn0[:, :dim+dim2] = self.my_model.syn0[:, :dim+dim2] * intensity
# self.my_model.init_sims(replace=True)
# it makes syn0 and vectors to be also normalized (same as syn0norm and vectors_norm)
self.my_model.syn0norm = (
self.my_model.syn0 / np.sqrt((self.my_model.syn0 ** 2).sum(-1))[..., np.newaxis]).astype(float)
def test(self, uci_dataset, intensity_order=1):
for i, intensity in enumerate([1, 10]): #, 10, 10]):
if i == 0 and intensity_order < 0:
continue
print("Model with intensity 10^{}, threshold {}".format(i*intensity_order, self.threshold))
self.modulate_sentiment(intensity=intensity**intensity_order)
#self.test_analogy()
self.test_UCI(uci_dataset)
self.test_intrinsic()
#self.show_vocab_tsnescatterplot()
#self.show_topn_embedding()
print("Model with intensity 0, threshold {}".format(self.threshold))
self.modulate_sentiment(intensity=0)
#self.test_analogy()
self.test_UCI(uci_dataset)
self.test_intrinsic()
def test_intrinsic(self):
self.my_model.accuracy(SOURCE_DIR + 'questions-words.txt', restrict_vocab=300000)
similarities = self.my_model.evaluate_word_pairs(datapath('wordsim353.tsv'), restrict_vocab=300000)
print(similarities)
def test_analogy(self):
for w1, w2 in sensitive_pair:
for word in neutral_word_list:
try:
print('{}:{} = {}:{}'.format(
w1, w2, word, self.my_model.most_similar(positive=[w2, word], negative=[w1], topn=10)))
except Exception as e:
continue
def test_UCI(self, uci_dataset, small_train=True):
(_X_train, _y_train), (_X_test, _y_test) = uci_dataset
test_male_index, test_female_index = identify_index_by_gender(_X_test, _y_test)
(X_train, y_train), (X_test, y_test) = word2rep(_X_train, _y_train, self.my_model), word2rep(_X_test, _y_test,
self.my_model)
assert len(X_train) == len(y_train)
assert len(X_test) == len(y_test)
print("num of tests / num of labels: {} {} / {} {} in {:.2f} sec".format(
len(X_train), len(X_test), len(set(y_train)), len(set(y_test)), time.time() - start_time))
for c in SVM_Cs:
clf = svm.SVC(C=c)
if small_train:
clf.fit(X_train[:SMALL_UCI_NUM], y_train[:SMALL_UCI_NUM])
else:
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
with codecs.open(SOURCE_DIR + 'pred_UCI\\my' + str(self.modulated_number) + '_' + config.MODEL_NAME + str(c) + '_pred.txt', 'w', encoding='utf-8', errors='ignore') as f:
for tokens, label in zip(_X_test, pred):
f.write('\t'.join(tokens) + '\t' + str(label) + '\n')
print_result(y_test, pred, test_male_index, test_female_index)
return 0
def show_topn_affect(self, dim=1, dim2=1, topn=50):
sort_index_sum = np.ndarray.flatten(self.my_model.vectors[:, :dim]).argsort()
sort_index = np.prod(self.my_model.vectors[:, :dim+dim2], axis=1).argsort()
cond = np.ndarray.flatten(self.my_model.vectors[sort_index, dim:dim+dim2]) >= (
self.threshold / self.init_modulate) if self.space_order[1] == 1 else \
np.ndarray.flatten(self.my_model.vectors[sort_index, dim:dim+dim2]) <= -(
self.threshold / self.init_modulate)
print("< top {} positive stereotypes >".format(topn))
if self.space_order[0] * self.space_order[1] == 1:
for index in sort_index[cond][:-1-topn:-1]:
print(self.my_model.index2word[index], self.my_model.vectors[index][:dim+dim2])
else:
for index in sort_index[cond][:topn]:
print(self.my_model.index2word[index], self.my_model.vectors[index][:dim+dim2])
print("< top {} negative stereotypes >".format(topn))
if self.space_order[0] * self.space_order[1] == 1:
for index in sort_index[cond][:topn]:
print(self.my_model.index2word[index], self.my_model.vectors[index][:dim+dim2])
else:
for index in sort_index[cond][:-1-topn:-1]:
print(self.my_model.index2word[index], self.my_model.vectors[index][:dim+dim2])
def show_vocab_tsnescatterplot(self, dim=1, dim2=1, shown_word=60, top=False):
sort_index = np.prod(self.my_model.vectors[:, :dim + dim2], axis=1).argsort()
cond = np.ndarray.flatten(self.my_model.vectors[sort_index, dim:dim + dim2]) >= (
self.threshold / self.init_modulate) if self.space_order[1] == 1 else \
np.ndarray.flatten(self.my_model.vectors[sort_index, dim:dim + dim2]) <= -(
self.threshold / self.init_modulate)
# get random words
# close_words = model.similar_by_word(word)
if top:
entity_words = list(sort_index[cond][::self.space_order[1]])[:int(shown_word / 2)]
notity_words = list(sort_index[np.logical_not(cond)][::-self.space_order[1]])[:int(shown_word / 2)]
else:
entity_words = random.sample(list(sort_index[cond]), int(shown_word / 2))
notity_words = random.sample(list(sort_index[np.logical_not(cond)]), int(shown_word / 2))
# add the vector for each of the closest words to the array
arr, word_labels = np.empty((0, 300), dtype='f'), []
for index in entity_words + notity_words:
wrd_vector = self.my_model.syn0norm[index]
word_labels.append(self.my_model.index2word[index])
arr = np.append(arr, np.array([wrd_vector]), axis=0)
# find tsne coords for 1 dimensions
tsne = TSNE(n_components=1, random_state=0)
np.set_printoptions(suppress=True)
x_coords = arr[:, 1]
y_coords = arr[:, 0]
# display scatter plot
plt.scatter(x_coords, y_coords)
for label, x, y in zip(word_labels, x_coords, y_coords):
plt.annotate(label, xy=(x, y), xytext=(0, 0), textcoords='offset points')
plt.xlim(x_coords.min() + 0.05, x_coords.max() + 0.05)
plt.ylim(y_coords.min() + 0.05, y_coords.max() + 0.05)
plt.show()
def show_topn_embedding(self, dim=1, dim2=1, topn=30):
sort_index_sent = np.sum(self.my_model.vectors[:, :dim], axis=1).argsort()
if self.space_order[0] == -1:
print("< top {} positive stereotypes >".format(topn))
for index in sort_index_sent[:topn]:
print(self.my_model.index2word[index], self.my_model.vectors[index][:dim+dim2])
print("< top {} negative stereotypes >".format(topn))
for index in sort_index_sent[:-1-topn:-1]:
print(self.my_model.index2word[index], self.my_model.vectors[index][:dim+dim2])
else:
print("< top {} positive stereotypes >".format(topn))
for index in sort_index_sent[:-1-topn:-1]:
print(self.my_model.index2word[index], self.my_model.vectors[index][:dim+dim2])
print("< top {} negative stereotypes >".format(topn))
for index in sort_index_sent[:topn]:
print(self.my_model.index2word[index], self.my_model.vectors[index][:dim+dim2])
sort_index_sent = np.sum(self.my_model.vectors[:, dim:dim+dim2], axis=1).argsort()
if self.space_order[1] == -1:
print("< top {} entity stereotypes >".format(topn))
for index in sort_index_sent[:topn]:
print(self.my_model.index2word[index], self.my_model.vectors[index][:dim+dim2])
print("< top {} notity stereotypes >".format(topn))
for index in sort_index_sent[:-1-topn:-1]:
print(self.my_model.index2word[index], self.my_model.vectors[index][:dim+dim2])
else:
print("< top {} entity stereotypes >".format(topn))
for index in sort_index_sent[:-1 - topn:-1]:
print(self.my_model.index2word[index], self.my_model.vectors[index][:dim + dim2])
print("< top {} notity stereotypes >".format(topn))
for index in sort_index_sent[:topn]:
print(self.my_model.index2word[index], self.my_model.vectors[index][:dim + dim2])
class DebiasModel(object):
def __init__(self, bias_model, same_env=True):
"""
:param is_selected_gender_vocab: 'True' means selected_gender_vocab is prepared.
:param remove_oov: remove words not in w2v.model vocab.
"""
# embedding models
print("same_env: {}".format(same_env))
if same_env:
self.model = self.debias_we_same_env(bias_model)
else:
self.model = self.debias_we(bias_model)
def debias_we(self, E):
print('Loading Debias Model... in {0:.2f} seconds'.format(time.time() - start_time))
with open(SOURCE_DIR + 'definitional_pairs.json', "r") as f:
definitional = json.load(f)
with open(SOURCE_DIR + 'equalize_pairs.json', "r") as f:
equalize = json.load(f)
with open(SOURCE_DIR + 'gender_specific_seed.json', "r") as f:
gender_specific_words = json.load(f)
tmp_w2v = gensim.models.KeyedVectors(vector_size=300)
tmp_w2v.index2word = E.index2word
tmp_w2v.vocab = E.vocab
tmp_w2v.syn0 = E.syn0
tmp_w2v.syn0norm = (E.syn0 / np.sqrt((E.syn0 ** 2).sum(-1))[..., np.newaxis]).astype(float)
gender_direction = self.doPCA(definitional, tmp_w2v).components_[0]
specific_set = set(gender_specific_words)
for i, w in enumerate(tmp_w2v.index2word):
if w not in specific_set:
tmp_w2v.syn0[i] = self.drop(tmp_w2v.syn0[i], gender_direction)
tmp_w2v.syn0norm = (tmp_w2v.syn0 / np.sqrt((tmp_w2v.syn0 ** 2).sum(-1))[..., np.newaxis]).astype(float)
candidates = {x for e1, e2 in equalize for x in [(e1.lower(), e2.lower()),
(e1.title(), e2.title()),
(e1.upper(), e2.upper())]}
print(candidates)
for (a, b) in candidates:
if (a in tmp_w2v.index2word and b in tmp_w2v.index2word):
y = self.drop((tmp_w2v[a] + tmp_w2v[b]) / 2, gender_direction)
z = np.sqrt(1 - np.linalg.norm(y) ** 2)
if (tmp_w2v[a] - tmp_w2v[b]).dot(gender_direction) < 0:
z = -z
tmp_w2v.syn0[tmp_w2v.vocab[a].index] = z * gender_direction + y
tmp_w2v.syn0[tmp_w2v.vocab[b].index] = -z * gender_direction + y
tmp_w2v.syn0norm = (tmp_w2v.syn0 / np.sqrt((tmp_w2v.syn0 ** 2).sum(-1))[..., np.newaxis]).astype(float)
print('Success to load Debias Model... in {0:.2f} seconds'.format(time.time() - start_time))
return | |
bool, default = False
Determines whether NaN values are allowed
in the input/output of the transformer.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator.
Random number generator is not directly used by the Deduper;
however, the parameter is present for compatibility with PEstimator
class. When RandomState is set to None,
it disables file caching functionality
(see documentation for PickleCache for details).
Attributes:
----------
columns_to_keep_ : list of str
list of column names to keep
columns_to_drop_ : list of str
list of column names to delete
"""
keep: str
allow_nans: bool
columns_to_keep_: List[str]
columns_to_drop_: List[str]
def __init__(self
, keep: str = "first"
, allow_nans: bool = False
, random_state = None
, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.set_params(keep=keep
, allow_nans=allow_nans
, random_state=random_state)
def get_params(self, deep=True):
params = dict(keep=self.keep
, allow_nans=self.allow_nans
, random_state = self.random_state)
return params
def set_params(self , *
, keep = None
, allow_nans = None
, random_state = None
,**kwargs
) -> PFeatureMaker:
self.keep = keep
self.allow_nans = allow_nans
self.random_state = random_state
self.columns_to_keep_ = []
self.columns_to_drop_ = []
return self
@property
def is_fitted_(self) -> bool:
return bool(len(self.columns_to_keep_))
@property
def input_can_have_nans(self) -> bool:
return self.allow_nans
@property
def output_can_have_nans(self) -> bool:
return self.allow_nans
@property
def input_columns_(self) -> List[str]:
assert self.is_fitted_
return sorted(self.columns_to_keep_ + self.columns_to_drop_)
@property
def output_columns_(self) -> List[str]:
return sorted(self.columns_to_keep_)
def fit_transform(self
, X: pd.DataFrame
, y: Optional[pd.Series] = None
) -> pd.DataFrame:
assert self.keep in {"first", "last"}
X, y = self.start_fitting(X, y)
X_dd = X.T.drop_duplicates(keep=self.keep).T
self.columns_to_keep_ = list(X_dd.columns)
self.columns_to_drop_ = list(set(X.columns) - set(X_dd.columns))
log_message = f"{len(self.columns_to_drop_)}"
log_message += f" duplicate features have been removed, "
log_message += f"{len(self.columns_to_keep_)} unique features left."
self.info(log_message)
return self.finish_transforming(X_dd)
def transform(self
, X: pd.DataFrame
) -> pd.DataFrame:
X = self.start_transforming(X)
log_message = f"{len(self.columns_to_drop_)}"
log_message += f" duplicate features have been removed, "
log_message += f"{len(self.columns_to_keep_)} unique features left."
self.info(log_message)
return self.finish_transforming(X[self.output_columns_])
class NumericImputer(PFeatureMaker):
"""A transformer that creates NaN-less versions of numeric columns"""
imputation_aggr_funcs: Optional[List[Any]]
fill_values_: Optional[pd.DataFrame]
def __init__(self, *
, imputation_aggr_funcs= (
np.min, np.max, percentile50, minmode, maxmode)
, random_state = None
, **kwargs
) -> None:
super().__init__(**kwargs)
self.set_params(
imputation_aggr_funcs = imputation_aggr_funcs
, random_state = random_state
, **kwargs)
def get_params(self, deep=True):
params = dict(imputation_aggr_funcs=self.imputation_aggr_funcs
, random_state = self.random_state)
return params
def set_params(self, *
, imputation_aggr_funcs = None
, random_state = None
, **kwargs
) -> PFeatureMaker:
self.imputation_aggr_funcs = imputation_aggr_funcs
self.random_state = random_state
self.fill_values_ = None
return self
@property
def is_fitted_(self) -> bool:
return self.fill_values_ is not None
@property
def input_can_have_nans(self) -> bool:
return True
@property
def output_can_have_nans(self) -> bool:
return False
@property
def input_columns_(self) -> List[str]:
assert self.is_fitted_
return sorted(self.fill_values_.columns)
@property
def output_columns_(self) -> List[str]:
all_columns = []
for col in self.input_columns_:
for f in self.imputation_aggr_funcs:
label = f.__name__
column_name = "fillna_" + label + "(" + col + ")"
all_columns += [column_name]
return sorted(all_columns)
def fit_transform(self
, X: pd.DataFrame
, y: Optional[pd.Series] = None
) -> pd.DataFrame:
for f in self.imputation_aggr_funcs:
assert callable(f)
type_of_X = type(X).__name__
X, y = self.start_fitting(X, y, write_to_log=False)
X_num = X.select_dtypes(include="number")
num_nans = int(X_num.isna().sum().sum())
aggr_func_names = [f.__name__ for f in self.imputation_aggr_funcs]
n_func = len(aggr_func_names)
log_message = f"==> Starting removing NaNs from "
log_message += f"{len(X_num.columns)} numeric columns of a {type_of_X}"
log_message += " named < " + NeatStr.object_names(X, div_ch=" / ")
log_message += f" > with shape {X.shape}. "
log_message += f"Currently, the numeric columns of a dataset"
log_message += f" contain {num_nans} NaN-s. "
log_message += f"Each numeric columns will be replaced with "
log_message += f"{n_func} new ones, with imputation performed "
log_message += f"using the following functions: {aggr_func_names}."
self.info(log_message)
aggregations = {}
for col in X_num:
aggregations[col] = [f(X_num[col]) for f in self.imputation_aggr_funcs]
self.fill_values_ = pd.DataFrame(
data=aggregations, index=aggr_func_names)
self.log_df_ = self.fill_values_
return self.transform(X_num)
def transform(self
, X: pd.DataFrame
) -> pd.DataFrame:
X = self.start_transforming(X, write_to_log=False)
X_num = X.select_dtypes(include="number")[self.input_columns_]
num_nans = X_num.isna().sum().sum()
all_columns = []
for col in X_num.columns:
for f in self.imputation_aggr_funcs:
label = f.__name__
f_val = self.fill_values_.at[label, col]
filled_column = X_num[col].fillna(value=f_val)
filled_column.name = "fillna_" + label + "(" + col + ")"
all_columns += [filled_column]
result = pd.concat(all_columns, axis=1)
log_message = f"<== Returning a new, numeric-only dataframe"
log_message += f" with shape {result.shape}."
log_message += f" {num_nans} original NaN-s were removed"
log_message += f" by applying {len(self.imputation_aggr_funcs)}"
log_message += f" imputation functions."
self.info(log_message)
return self.finish_transforming(result, write_to_log=False)
class NumericFuncTransformer(PFeatureMaker):
"""A transformer that applies math functions to numeric features"""
columns_to_a_transform_: Optional[List[str]]
columns_to_p_transform_: Optional[List[str]]
positive_arg_num_functions: List[Any]
any_arg_num_functions: List[Any]
def __init__(self, *
, positive_arg_num_functions=(power_m1_1p, np.log1p, root_2, power_2)
, any_arg_num_functions=(passthrough, power_3)
, random_state = None
, **kwargs) -> None:
super().__init__(**kwargs)
self.set_params(
positive_arg_num_functions, any_arg_num_functions, random_state,**kwargs)
def get_params(self, deep=True):
params = dict(positive_arg_num_functions=self.positive_arg_num_functions
, any_arg_num_functions=self.any_arg_num_functions
, random_state = self.random_state)
return params
def set_params(self
, positive_arg_num_functions = None
, any_arg_num_functions = None
, random_state = None
, **kwargs
) -> PFeatureMaker:
if positive_arg_num_functions is not None:
for f in positive_arg_num_functions + any_arg_num_functions:
assert callable(f)
self.positive_arg_num_functions = positive_arg_num_functions
self.any_arg_num_functions = any_arg_num_functions
self.random_state = random_state
self.columns_to_p_transform_ = None
self.columns_to_a_transform_ = None
return self
@property
def is_fitted_(self):
result = (self.columns_to_a_transform_ is not None
and self.columns_to_p_transform_ is not None)
return result
@property
def input_can_have_nans(self) -> bool:
return True
@property
def output_can_have_nans(self) -> bool:
return True
@property
def input_columns_(self) -> List[str]:
assert self.is_fitted_
return sorted(set(self.columns_to_a_transform_)
| set(self.columns_to_p_transform_))
@property
def output_columns_(self) -> List[str]:
all_columns = []
for a_func in self.any_arg_num_functions:
f_columns = [a_func.__name__ + "(" + c + ")"
for c in self.columns_to_a_transform_]
all_columns += f_columns
for p_func in self.positive_arg_num_functions:
f_columns = [p_func.__name__ + "(" + c + ")"
for c in self.columns_to_p_transform_]
all_columns += f_columns
return sorted(all_columns)
def fit_transform(self
, X: pd.DataFrame
, y: Optional[pd.Series] = None
) -> pd.DataFrame:
(X, y) = self.start_fitting(X, y)
self.columns_to_p_transform_ = None
self.columns_to_a_transform_ = None
X_numbers = X.select_dtypes(include="number")
assert len(X_numbers.columns)
self.columns_to_a_transform_ = list(X_numbers.columns)
feature_mins = X_numbers.min()
p_transformable_features = feature_mins[feature_mins >= 0]
self.columns_to_p_transform_ = list(p_transformable_features.index)
result = self.transform(X)
return result
def transform(self
, X: pd.DataFrame
) -> pd.DataFrame:
all_funcs = self.positive_arg_num_functions + self.any_arg_num_functions
all_funcs = [f.__name__ for f in all_funcs]
X_numbers = self.start_transforming(
X, write_to_log=False).select_dtypes("number")
log_message = f"==> Starting generating features "
log_message += f"using a {type(X).__name__} named < "
log_message += NeatStr.object_names(X, div_ch=" / ")
log_message += f" > with the shape {X.shape} and the following "
log_message += f"{len(all_funcs)} functions: {all_funcs}."
self.info(log_message)
all_transformations = []
for a_func in self.any_arg_num_functions:
X_new = a_func(X_numbers)
X_new.columns = [a_func.__name__ + "(" + c + ")" for c in X_new]
all_transformations += [X_new]
if len(self.columns_to_p_transform_):
X_positive_numbers = deepcopy(
X_numbers[self.columns_to_p_transform_])
negative_flags = (X_positive_numbers < 0)
below_zero = negative_flags.sum().sum()
X_positive_numbers[negative_flags] = 0
if below_zero > 0:
log_message = f"{below_zero} negative values were found in "
log_message += "the features, scheduled for transformation "
log_message += "via functions that expect positive input "
log_message += "values. Negatives will be replaced "
log_message += "with zeros."
self.warning(log_message)
for p_func in self.positive_arg_num_functions:
X_new = p_func(X_positive_numbers)
X_new.columns = [p_func.__name__ + "(" + c + ")" for c in
X_new]
all_transformations += [X_new]
result = pd.concat(all_transformations, axis=1)
return self.finish_transforming(result)
class CatSelector(PFeatureMaker):
""" Abstract base class that finds categorical features.
Warning: This class should not be used directly. Use derived classes
instead.
"""
min_cat_size: int
max_uniques_per_cat: int
cat_columns_: Optional[Set[str]]
cat_values_: Optional[Dict[str, Set[str]]]
def __init__(self
, *
, min_cat_size: int = 20
, max_uniques_per_cat: int = 100
, random_state = None
, **kwargs) -> None:
super().__init__( **kwargs)
self.set_params(min_cat_size=min_cat_size
, max_uniques_per_cat=max_uniques_per_cat
, random_state = random_state)
def get_params(self, deep=True):
params = dict(min_cat_size = self.min_cat_size
, max_uniques_per_cat = self.max_uniques_per_cat
, random_state = self.random_state)
return params
def set_params(self, *
, min_cat_size = None
, max_uniques_per_cat = None
, random_state = None
, **kwards) -> PFeatureMaker:
self.min_cat_size = min_cat_size
self.max_uniques_per_cat = max_uniques_per_cat
self.random_state = random_state
self.cat_columns_ = None
self.cat_values_ = None
return self
def start_fitting(self
, X: Any
, y: Any
, write_to_log: bool = True
) -> Tuple[pd.DataFrame,pd.Series]:
X, y = super().start_fitting(X, y, write_to_log)
uniques = X.nunique()
uniques = uniques[uniques <= self.max_uniques_per_cat]
self.cat_columns_ = set(uniques.index)
self.cat_values_ = dict()
for c in self.cat_columns_:
uniques = X[c].value_counts()
uniques | |
= ' \\multicolumn{2}{c||}{Map Error} '
# for seqId in sequences:
# rowString += ' & {0:.2f}m '.format(1000.0*seqStats[seqId]['mapErr'])
# rowString += ' & {0:.2f}m \\\\\n'.format(1000.0*seqErrAvg)
# fid.write(rowString)
for (mmI,mmName) in enumerate(motionModels):
fid.write('\\hline\n'*2)
if len(motionModels) > 1:
fid.write(' & & \multicolumn{{{0}}}{{|c|}}{{{1}}} '.format(len(sequences)+1, dynNameMap[mmName]))
fid.write(' \\\\\n')
fid.write('\\hline\n'*2)
cSources = sources + ['Oracle']
for (srcI,sourceId) in enumerate(cSources):
if srcI == 0:
rowString = ' \\multirow{{{0}}}{{*}}{{Position Error}} '.format(len(cSources))
else:
rowString = ' '
rowString += ' & ' + sourceNameMap[sourceId] + ' '
if srcI < len(sources):
for seqId in sequences:
cErr = tblStats[seqId][sourceId][mmName]['map_localize_error_mean']
if np.isfinite(cErr[0]):
rowString += '& {0:.1f}m '.format(1000.0*cErr[0])
else:
rowString += '& * '
cErr = tblStats['sum'][sourceId][mmName]['map_localize_error_sum'] / tblStats['sum'][sourceId][mmName]['nframes_localize']
rowString += '& {0:.1f}m '.format(1000.0*cErr[0])
else:
for seqId in sequences:
rowString += '& {0:.1f}m '.format(1000.0*seqStats[seqId]['mapErr'])
rowString += ' & {0:.2f}m '.format(1000.0*seqErrAvg)
rowString += ' \\\\\n'
fid.write(rowString)
fid.write('\\hline\n')
for (srcI,sourceId) in enumerate(sources):
if srcI == 0:
rowString = ' \\multirow{{{0}}}{{*}}{{Heading Error}} '.format(len(sources))
else:
rowString = ' '
rowString += ' & ' + sourceNameMap[sourceId] + ' '
for seqId in sequences:
cErr = tblStats[seqId][sourceId][mmName]['map_localize_error_mean']
if np.isfinite(cErr[1]):
rowString += '& {0:.1f}$^\\circ$ '.format((180.0/np.pi)*cErr[1])
else:
rowString += '& * '
cErr = tblStats['sum'][sourceId][mmName]['map_localize_error_sum'] / tblStats['sum'][sourceId][mmName]['nframes_localize']
rowString += '& {0:.1f}$^\\circ$ '.format((180.0/np.pi)*cErr[1])
rowString += ' \\\\\n'
fid.write(rowString)
# fid.write('\\hline\n')
#
# for (srcI,sourceId) in enumerate(sources):
# if srcI == 0:
# rowString = ' \\multirow{{{0}}}{{*}}{{Localization Time}} '.format(len(sources))
# else:
# rowString = ' '
# rowString += ' & ' + sourceNameMap[sourceId] + ' '
# for seqId in sequences:
# cTime = tblStats[seqId][sourceId][mmName]['localize_time']
# if np.isfinite(cTime):
# rowString += '& {0:.0f}s '.format(cTime)
# else:
# rowString += '& * '
#
# cTime = tblStats['sum'][sourceId][mmName]['not_nan_localize_time'] / tblStats['sum'][sourceId][mmName]['localized']
# rowString += '& {0:.2f}s '.format(cTime)
# rowString += ' \\\\\n'
# fid.write(rowString)
fid.write('\\hline\n')
fid.write('\\end{tabular}\n')
# fid.write('\\end{table*}\n\n')
fid.close()
def plot_frame_errors(options,args,displayErrN):
import matplotlib
if options.img_format == 'pdf':
matplotlib.use('pdf')
else:
matplotlib.use('agg')
import matplotlib.pyplot as plt
load_config_file(options,args)
assert options.gps_data != None, 'GPS data is required to compute errors'
errorInfo = pickle.load(open(get_outfile_path(options,'errors.p'),'rb'))
topErrN = errorInfo['topErrN']
errors = errorInfo['errors']
topErrs = errors['absGPSErr']
localizedInd = errorInfo['localizedInd']
modes = errorInfo['modes']
localizedFlags = errorInfo['localizedFlags']
localizedTime = errorInfo['localizedTime']
dataTimes = errorInfo['dataTimes']
fig = plt.figure(figsize=(plotFigSize[0],2*plotFigSize[1]),dpi=plotDPI)
fig.clf()
ax = fig.add_subplot(2,1,1)
for (i,(cN,cErrList)) in enumerate(izip(topErrN,topErrs)):
if cN not in displayErrN: continue
cErrs = np.array(cErrList)
if cN == 1:
clabel = 'MAP Error'
else:
clabel = '{0} best error'.format(cN)
ax.plot(dataTimes,1000.0*cErrs[:,0], label = clabel)
ax.set_ylabel('Error (meters)')
ax.set_title('Positional Error')
ax.legend()
ax = fig.add_subplot(2,1,2)
for (i,(cN,cErrList)) in enumerate(izip(topErrN,topErrs)):
if cN not in displayErrN: continue
cErrs = np.array(cErrList)
if cN == 1:
clabel = 'MAP Error'
else:
clabel = '{0} best error'.format(cN)
ax.plot(dataTimes,(180.0/np.pi)*cErrs[:,1], label = clabel)
ax.set_xlabel('Data time (seconds)')
ax.set_ylabel('Error (degrees)')
ax.set_title('Heading Error')
ax.legend()
fname = get_outfile(options,'error.{0}'.format(options.img_format))
fig.savefig(fname,dpi=plotDPI)
fig.clf()
ax = fig.add_subplot(2,1,1)
for (i,(cN,cErrList)) in enumerate(izip(topErrN,topErrs)):
if cN not in displayErrN: continue
cErrs = np.array(cErrList)
if cN == 1:
clabel = 'MAP Error'
else:
clabel = '{0} best error'.format(cN)
ax.plot(dataTimes,1000.0*cErrs[:,0], label = clabel)
ax.set_ylabel('Error (meters)')
ax.set_title('Positional Error')
ax.set_yscale('log')
ax.legend()
ax = fig.add_subplot(2,1,2)
for (i,(cN,cErrList)) in enumerate(izip(topErrN,topErrs)):
if cN not in displayErrN: continue
cErrs = np.array(cErrList)
if cN == 1:
clabel = 'MAP Error'
else:
clabel = '{0} best error'.format(cN)
ax.plot(dataTimes,(180.0/np.pi)*cErrs[:,1], label = clabel)
ax.set_xlabel('Data time (seconds)')
ax.set_ylabel('Error (degrees)')
ax.set_title('Heading Error')
ax.set_yscale('log')
ax.legend()
fname = get_outfile(options,'logerror.{0}'.format(options.img_format))
fig.savefig(fname,dpi=plotDPI)
### Now output the localized error plots ###
fig.clf()
ax = fig.add_subplot(2,1,1)
for (i,(cN,cErrList)) in enumerate(izip(topErrN,topErrs)):
if cN not in displayErrN: continue
cErrs = np.array(cErrList)
if cN == 1:
clabel = 'MAP Error'
else:
clabel = '{0} best error'.format(cN)
ax.plot(dataTimes[localizedInd:],1000.0*cErrs[localizedInd:,0], label = clabel)
ax.set_ylabel('Error (meters)')
ax.set_title('Positional Error')
ax.legend()
ax = fig.add_subplot(2,1,2)
for (i,(cN,cErrList)) in enumerate(izip(topErrN,topErrs)):
if cN not in displayErrN: continue
cErrs = np.array(cErrList)
if cN == 1:
clabel = 'MAP Error'
else:
clabel = '{0} best error'.format(cN)
ax.plot(dataTimes[localizedInd:],(180.0/np.pi)*cErrs[localizedInd:,1], label = clabel)
ax.set_xlabel('Data time (seconds)')
ax.set_ylabel('Error (degrees)')
ax.set_title('Heading Error')
ax.legend()
fname = get_outfile(options,'localized-error.{0}'.format(options.img_format))
fig.savefig(fname,dpi=plotDPI)
fig.clf()
ax = fig.add_subplot(2,1,1)
for (i,(cN,cErrList)) in enumerate(izip(topErrN,topErrs)):
if cN not in displayErrN: continue
cErrs = np.array(cErrList)
if cN == 1:
clabel = 'MAP Error'
else:
clabel = '{0} best error'.format(cN)
ax.plot(dataTimes[localizedInd:],1000.0*cErrs[localizedInd:,0], label = clabel)
ax.set_ylabel('Error (meters)')
ax.set_title('Positional Error')
ax.set_yscale('log')
ax.legend()
ax = fig.add_subplot(2,1,2)
for (i,(cN,cErrList)) in enumerate(izip(topErrN,topErrs)):
if cN not in displayErrN: continue
cErrs = np.array(cErrList)
if cN == 1:
clabel = 'MAP Error'
else:
clabel = '{0} best error'.format(cN)
ax.plot(dataTimes[localizedInd:],(180.0/np.pi)*cErrs[localizedInd:,1], label = clabel)
ax.set_xlabel('Data time (seconds)')
ax.set_ylabel('Error (degrees)')
ax.set_title('Heading Error')
ax.set_yscale('log')
ax.legend()
fname = get_outfile(options,'localized-logerror.{0}'.format(options.img_format))
fig.savefig(fname,dpi=plotDPI)
return (modes,errors,localizedFlags,localizedInd,localizedTime,topErrN)
def do_plot_errors(options,args):
import matplotlib
if options.img_format == 'pdf':
matplotlib.use('pdf')
else:
matplotlib.use('agg')
import matplotlib.pyplot as plt
if options.mpi:
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
else:
size = 1
rank = 0
if args[0].endswith('.dcfg'):
srcName = 'default'
else:
srcName = args[0]
args = args[1:]
modeInfo = {}
for i in range(rank,len(args)+size,size):
if i >= len(args):
break
modeInfo[args[i]] = plot_frame_errors(copy(options),args[i],displayErrNs)
if options.mpi:
allModeInfo = comm.gather(modeInfo)
if rank == 0:
for cmode in allModeInfo:
modeInfo.update(cmode)
if rank == 0:
seqErrs = np.empty((len(topErrNs),len(args)+1,2,2))
gpsErrs = np.empty((len(args)+1,))
nFrames = np.empty((len(args),))
localizeTimes = np.empty((len(args)+1,))
localizedFlags = []
totalLocalizedErrs = 0
totalLocalizedFrames = 0
totalErrs = 0
totalFrames = 0
for (cfgI,cfgFile) in enumerate(args):
if modeInfo[cfgFile][1] == None:
print 'No GPS data for {0}.'.format(cfgFile)
continue
print '{0}:'.format(cfgFile)
currErrs = np.array(modeInfo[cfgFile][1]['absGPSErr'])
currGPSErrs = np.array(modeInfo[cfgFile][1]['gpsErr'])
localizedFrameFlags = np.array(modeInfo[cfgFile][2])
for (i,cN) in enumerate(topErrNs):
posErr,angErr = 1000.0*np.mean(currErrs[i,:,0]), (180.0/np.pi)*np.mean(currErrs[i,:,1])
posLocalErr,angLocalErr = 1000.0*np.mean(currErrs[i,localizedFrameFlags,0]), (180.0/np.pi)*np.mean(currErrs[i,localizedFrameFlags,1])
seqErrs[i,cfgI,0,0] = posErr
seqErrs[i,cfgI,1,0] = angErr
seqErrs[i,cfgI,0,1] = posLocalErr
seqErrs[i,cfgI,1,1] = angLocalErr
totalErrs += np.sum(currErrs,1)
totalFrames += currErrs.shape[1]
totalLocalizedErrs += np.sum(currErrs[:,localizedFrameFlags,:],1)
totalLocalizedFrames += np.sum(localizedFrameFlags)
localizeTimes[cfgI] = modeInfo[cfgFile][4]
localizedFlags.append(np.isfinite(modeInfo[cfgFile][4]))
gpsErrs[cfgI] = 1000.0*np.mean(currGPSErrs)
nFrames[cfgI] = currGPSErrs.shape[0]
localizedFlags.append(False)
localizedFlags = np.array(localizedFlags)
localizeTimes[len(args)] = np.mean(localizeTimes[localizedFlags])
gpsErrs[len(args)] = np.sum(np.multiply(nFrames/np.sum(nFrames),gpsErrs[0:len(args)]))
for (i,cN) in enumerate(topErrNs):
posErr,angErr = 1000.0*(totalErrs[i,0]/totalFrames), (180.0/np.pi)*(totalErrs[i,1]/totalFrames)
posLocalErr,angLocalErr = 1000.0*(totalLocalizedErrs[i,0]/totalLocalizedFrames), (180.0/np.pi)*(totalLocalizedErrs[i,1]/totalLocalizedFrames)
seqErrs[i,len(args),0,0] = posErr
seqErrs[i,len(args),1,0] = angErr
seqErrs[i,len(args),0,1] = posLocalErr
seqErrs[i,len(args),1,1] = angLocalErr
rowLabels = []
for cfgFile in args:
rowLabels.append(os.path.basename(cfgFile).split('.')[0])
rowLabels.append('Average')
csvColLabels = []
csvColLabels.append('Time to Localize (s)')
csvColLabels.append('GPS Error (m)')
for (i,cN) in enumerate(topErrNs):
if cN == 1:
errName = 'MAP'
else:
errName = 'Top {0}'.format(cN)
csvColLabels.append(errName + ' Position Error (m)')
csvColLabels.append(errName + ' Heading Error (deg)')
locErrTypes = ['Overall','Localized']
for (locTypeI,locType) in enumerate(locErrTypes):
csvFID = open(os.path.join(options.out_dir,'{1}-{0}-ErrorTable.csv'.format(locType,srcName)),'wt')
csvWriter = csv.writer(csvFID)
csvWriter.writerow([''] + csvColLabels)
for i in range(seqErrs.shape[1]):
csvWriter.writerow( [rowLabels[i]] + [localizeTimes[i]] + [e for e in seqErrs[:,i,:,locTypeI].reshape(-1)])
csvFID.close()
dispColLabels = []
for (i,cN) in enumerate(displayErrNs):
if cN == 1:
errName = 'MAP'
else:
errName = 'Top {0}'.format(cN)
dispColLabels.append(errName)
dispColLabels.append('Time to Localize (s)')
fig = plt.figure(figsize=(11,8.5),dpi=plotDPI)
errTypes = [('Position','meters','m'),('Heading','degrees', 'deg')];
for (locTypeI,locType) in enumerate(locErrTypes):
fig.clf()
for (errTypeI,errType) in enumerate(errTypes):
cellText = []
for i in range(seqErrs.shape[1]):
cellText.append(['{0:.2f}'.format(e) for (e,cN) in izip(seqErrs[:,i,errTypeI,locTypeI],topErrNs) if cN in displayErrNs ] + ['{0:.2f}'.format(localizeTimes[i])])
ax = fig.add_subplot(2,1,errTypeI+1)
ax.table(cellText = cellText, colLabels = dispColLabels, rowLabels = rowLabels, loc = 'center right')
ax.axis('off')
ax.set_title('{3} {2} {0} Error ({1})'.format(errType[0],errType[1],srcName,locType))
fname = os.path.join(options.out_dir,'{1}-{0}ErrorTable.pdf'.format(locType,srcName))
fig.savefig(fname,dpi=plotDPI)
fig.clf()
for (errTypeI,errType) in enumerate(errTypes):
ax = fig.add_subplot(2,1,errTypeI+1)
ax.plot(topErrNs,seqErrs[:,-1,errTypeI,locTypeI])
ax.set_ylabel('Best Error ({0})'.format(errType[1]))
ax.set_xlabel('Number of Modes')
ax.set_xlim([np.min(plotErrNs),np.max(plotErrNs)])
ax.set_title('{3} {2} {0} Error'.format(errType[0],errType[1],srcName,locType))
fname = os.path.join(options.out_dir,'{1}-{0}AverageErrorPlot.{2}'.format(locType,srcName,options.img_format))
fig.savefig(fname,dpi=plotDPI)
def do_extract_modes(options,args):
nmsNumModes = 200 # Maximum number of modes to return (may actually return fewer)
nmsSegmentLength = 5.0/1000.0 # Segment length to integrate the CDF over for finding initial mode candidates
nmsRadius = 30.0/1000.0 # Any smaller modes within this range are suppressed
nmsCutoffPct = 0.01 # All modes must be within this pct of the top mode
nmsParams = {'nmsNumModes':nmsNumModes, 'nmsSegmentLength':nmsSegmentLength, 'nmsRadius':nmsRadius, 'nmsCutoffPct':nmsCutoffPct}
load_config_file(options,args)
if options.mpi:
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
else:
size = 1
rank = 0
# Load the mapgraph
mapgraph,mapVersion = load_mapgraph(options)
# Get data file and statistics
baseData = pickle.load(open(get_datafile_path(options),'rb'))
mapdynamics = baseData['mapdynamics']
start_frame = baseData['start_frame']
stats_pkl_file = open(get_outfile_path(options,'stats.p'),'rb')
# Open the file with the posteriors
if baseData['mpi']:
post_pkl_files = [ open(get_outfile_path(options,'posteriors-rank{0:03}.p'.format(crank)),'rb') for crank in range(baseData['size']) ]
post = None
for pfile in post_pkl_files:
if post == None:
post = pickle.load(pfile)
else:
post.update(pickle.load(pfile))
else:
post_pkl_file = open(get_outfile_path(options,'posteriors.p'),'rb')
post = pickle.load(post_pkl_file)
# Open output file
modes_pkl_file = open(get_outfile_path(options,'modes.p'),'wb')
pickle.dump(nmsParams,modes_pkl_file)
firstReadDone = False
for curr_obs in baseData['obs']:
t = curr_obs['t']
tInd = curr_obs['tInd']
curr_data_dt = curr_obs['dt']
yt = curr_obs['obs']
if tInd < start_frame:
continue
try:
stepData = pickle.load(stats_pkl_file)
except:
print 'Failed to | |
glob-patterns.
rmany:a.csv,b*.csv - read a.csv and all b-csv's into 1 dataframe.
rmany:a,b* - same thing, .csv extension is tested for as well
Note that strange things may happen if the csv-files have different
column names.
For convenience, the abbreviation r: is an alias for rmany.
'''
# sanity check lhs, rhs
errors = []
if len(lhs) != 0:
errors.append('no lhs-fields allowed')
if len(rhs) < 1:
errors.append('need 1+ rhs-field')
import glob
fnames = []
for fname in rhs:
flist = glob.glob(fname) or glob.glob('{}.csv'.format(fname))
if len(flist) == 0:
errors.append('cant read {}'.format(fname))
else:
fnames.extend(flist)
self.fatal(errors, lhs, rhs)
try:
self.dfm = pd.DataFrame()
dflist = []
for fname in fnames:
df = self.load_csv(fname)
log.info('read {}'.format(fname))
log.info('rows, columns is {}'.format(df.shape))
log.info('column names: {}'.format(df.columns.values))
dflist.append(df)
self.dfm = pd.concat(dflist)
log.info('rows, columns is {}'.format(self.dfm.shape))
log.info('column names: {}'.format(self.dfm.columns.values))
except Exception as e:
self.fatal(['runtime error: {!r}'.format(e)], lhs, rhs)
self.saved = False
return self
def cmd_w(self, lhs, rhs):
'''
syntax: w:[filename]
info: w is short for write
descr:
See write
'''
return self.cmd_write(lhs, rhs)
def cmd_write(self, lhs, rhs):
'''
syntax: write:[filename]
info: write any existing dataframe in csv-format out to filename/stdout
descr:
Usually the last command in a stream that loads a dataset. But can
also be used to store/show the dataframe in the various stages of
processing.
If no filename is given, the dataframe is written to stdout.
For convenience, the abbreviation w:[filename] is an alias for write:
'''
# sanity check lhs, rhs
errors = []
if len(lhs) != 0:
errors.append('no lhs-fields allowed')
if len(rhs) == 0:
fname = sys.stdout
elif len(rhs) == 1:
fname = rhs[0]
else:
errors.append('need exactly 0 or 1 rhs-field')
self.fatal(errors, lhs, rhs)
try:
self.to_csv(fname)
if fname is not sys.stdout:
log.info('wrote {!r}'.format(fname))
log.info('rows, columns is {}'.format(self.dfm.shape))
except Exception as e:
self.fatal(['runtime error: {!r}'.format(e)], lhs, rhs)
self.saved = True
return self
def _dot_hierarchy(self, fields, fh):
'create nodes in a hierarchy in dot, if fields > 1'
if len(fields) > 1:
# create hierarchy of subgraphs (clusters)
df = self.dfm[fields].sort_values(fields, axis=0)
df = df.drop_duplicates(keep='first')
df.set_index(fields[:-1], inplace=True)
prev, idx = (), ()
for idx, row in df.itertuples():
idx = idx if isinstance(idx, tuple) else tuple([idx])
if prev != idx:
for g in prev:
if g not in idx:
print('}', file=fh)
for n,g in enumerate(idx):
if g not in prev:
print('{}subgraph "cluster_{}" {}'.format(' '*(1+n) ,g, '{'), file=fh)
print('{}label="{}";'.format(' '*(1+n), g), file=fh)
prev = idx
print('"{}";'.format(row), file=fh)
for g in idx:
print('}', file=fh)
return 0
def _dot_edges(self, src, dst, label, attrs, fh):
'print edges to fh, possibly with edge attributes'
# edge = [label, attr1, attr2, ..]
# -> turns into [label=label attr1 attr2 ..]
attrs = [] if attrs is None else attrs
label = '' if label is None else label
cols = [src, dst]
if len(label):
cols.append(label)
if len(attrs):
cols.extend(attrs)
df = self.dfm[cols]
df = df.sort_values(cols, axis=0)
df = df.drop_duplicates(keep='first')
for idx, row in df.iterrows():
pattrs = []
if label is not None and len(label):
pattrs.append('label="{}"'.format(row[label]))
for attr in attrs:
pattrs.append(row[attr])
pedge = '[{}]'.format(' '.join(pattrs)) if len(pattrs) else ''
print('"{}" -> "{}"{};'.format(row[src], row[dst], pedge), file=fh)
def cmd_dotify(self, lhs, rhs):
'''
syntax: fname[,title]=dotify:srcs,dsts[,attrs]
info: write a dotfile to file 'fname' using src,dst fields
descr:
'dotify:' will write a 'fname' file using srcs,dsts to define the
graph, possible using attrs to decorate edges.
`srcs`=[fx^[fy]]^fz is split on '^' to find 1 or more fields to use
as source nodes of the graph. The last field will create the actual
node, any preceeding fields will be used to encapsulate the node in a
subgraph named cluster_fy, which is then enclosed in cluster_fx and so
on. All fields used in `srcs` must exist in the dataframe.
`dsts`=[fb^[fc^]]fd is treated likewise, but for destiation nodes.
`attrs` is also split on '^' and should list label and/or edge
attriutes.
Example:
apps.dot,web-traffic=dot:sorg^svpn^src_net,dorg^dvpn^dst_net,service^edge
This would create source nodes form the src_net df-column, using
the svpn and sorg for enveloping. Likewise for dst_net, dvpn and
dorg. The attrs 'service^edge' takes the 'service' column as label
fr edges and puts the 'edge' field as edge attributes, so it should
contain values like 'color=blue', i.e. a string listing valid dot
edge attributes separated by spaces.
'''
# sanity check lhs, rhs
errors = []
if len(lhs) not in (1, 2):
errors.append('need 1 or 2 lhs fields')
if len(rhs) not in (2,3):
errors.append('need 2 or 3 rhs fields')
self.fatal(errors, lhs, rhs)
# decompose rhs and check for errors
srcs = list(filter(None, rhs[0].split('^')))
self.check_fields(errors, srcs)
dsts = list(filter(None, rhs[1].split('^')))
self.check_fields(errors, dsts)
label=''
if len(rhs) == 3:
label, *attrs = rhs[2].split('^')
if len(label):
self.check_fields(errors, [label])
else:
label = None
attrs = list(filter(None, attrs))
if len(attrs):
self.check_fields(errors, attrs)
else:
attrs = None
self.fatal(errors, lhs, rhs)
log.debug('writing dot file {!r}'.format(lhs, rhs[0]))
log.debug('source columns {}'.format(srcs))
log.debug('destination columns {}'.format(dsts))
fname = lhs[0]
title = None if len(lhs) == 1 else lhs[1]
fh = open(fname, 'wt') if len(fname) else sys.stdout
# fh = sys.stdout
# standard header
print('digraph dfm {', file=fh)
print(' overlap=scale;', file=fh)
print(' ranksep="1.5 equally";', file=fh)
print(' rankdir=LR;', file=fh)
if title is not None:
print(' labelloc="t";', file=fh)
print(' label="{}";'.format(title), file=fh)
self._dot_hierarchy(srcs, fh)
self._dot_hierarchy(dsts, fh)
self._dot_edges(srcs[-1], dsts[-1], label, attrs, fh)
print('}', file=fh) # close the digraph
if fh != sys.stdout:
fh.close()
try:
pass
except (TypeError, ValueError) as e:
errors.append('runtime error {!r}'.format(e))
self.fatal(errors, lhs, rhs)
return self
def cmd_help(self, lhs, rhs):
'''
syntax: help:[cmd,..]
info: print help documentation and exit
descr:
'help:' will print documentation for all available commands, or just
for those listed. This command takes precedence over all other
commands in the command stream on the cli. That is, if given only
this command will run and the program terminates with exit(0).
'''
log_switch(logging.INFO, logging.INFO,
logging.Formatter('%(message)s'))
log.info('\n')
if len(lhs):
log.info('ignoring {}'.format(lhs))
log.info('see help syntax')
rhs = rhs if len(rhs) else [x for x in self.hlp.keys()]
for cmd in rhs:
hlp = self.hlp.get(cmd, None) or self.hlp.get('cmd_{}'.format(cmd),
None)
if hlp:
msg = '### {:.9} - {}'.format(cmd.replace('cmd_', ''),
hlp['syntax'])
log.info(msg)
log.info('')
log.info('information:\n {}'.format(hlp['info']))
log.info('\ndescription:\n')
for line in hlp['descr'].splitlines():
log.info(' {}'.format(line))
else:
log.info('{!r} not a command, available are:'.format(cmd))
for idx, hlp in self.hlp.items():
idx = idx.replace('cmd_', '')
log.info(' - {}: {}'.format(idx, hlp['info']))
continue
log.info('\n')
sys.exit(0)
def cmd_info(self, lhs, rhs):
'''
syntax: info:
info: prints information about current invocation and exits
descr:
When called, 'info:' will print out information about how the
program was called and what the command stream looks like. This may
be helpful to check how a command line invocation is being
interpreted. Note that this command takes precedence over all other
commands except 'help'. 'info:' also terminates with exit(0).
'''
log.info('\n' + '-'*30)
# switch loglevels so all log.info always show up on console
org_levels = log_switch(logging.INFO, logging.INFO,
logging.Formatter('%(message)s'))
log.info(args.prog)
log.info( '\nflags:')
log.info( ' log level (-v or -d) {}'.format(
logging.getLevelName(args.log_level)))
if len(args.command):
log.info( '\ncli command stream:')
maxl = max(len(c) for c in args.command)
for idx, (org, cmd, lhs, rhs) in enumerate(args.cmds):
log.info(' cmd {:02} '.format(idx) +
'{:{w}} => {}={}:{}) '.format(org, lhs, cmd, rhs,
w=maxl))
log.info( '\navailable cmds:')
for k, v in sorted(self.hlp.items()):
log.info('{} - {}'.format(k[4:], v['info']))
if self.dfm is not None:
log.info( '\nCurrent DataFrame:')
log.info( ' {} rows by {} columns'.format(*self.dfm.shape))
maxl = max(len(c) for c in self.dfm.columns.values)+2 # +2 for !r quotes
log.info( ' {:{w}} - {}'.format('Column', 'DataType', w=maxl))
for col in self.dfm.columns:
log.info(' {!r:{w}} {}'.format(col, self.dfm[col].dtype, w=maxl))
log.info(' {:{w}} {}'.format('<index>', self.dfm.index.dtype, w=maxl))
log.info('\nFirst 3 rows:')
log.info(self.dfm.head(3))
log.info( '\n' + '-'*60 + 'info end\n')
sys.exit(0)
def cmd_show(self, lhs, rhs):
'''
syntax: show:[start,stop]
info: prints dataframe information and some rows to stderr
descr:
Each time show: is used in a command stream, it will log information
about the dataframe in its current state to stderr. This may be
helpful when analyzing what a series of commands is doing with the
dataframe.
Sample rows are printed via df.iloc[start:stop], where
start,stop = 0,5 if not given. If only 1 number is given, shows
that many lines from | |
2.3960170377866*m.b557 >= -3.49462932645471)
m.c1732 = Constraint(expr= m.x106 - m.x107 - 2.3960170377866*m.b558 >= -3.49462932645471)
m.c1733 = Constraint(expr= m.x107 - m.x108 - 2.3960170377866*m.b559 >= -3.49462932645471)
m.c1734 = Constraint(expr= m.x109 - m.x110 - 2.28868218782505*m.b549 >= -3.38729447649316)
m.c1735 = Constraint(expr= m.x110 - m.x111 - 2.28868218782505*m.b550 >= -3.38729447649316)
m.c1736 = Constraint(expr= m.x111 - m.x112 - 2.28868218782505*m.b551 >= -3.38729447649316)
m.c1737 = Constraint(expr= m.x112 - m.x113 - 2.28868218782505*m.b552 >= -3.38729447649316)
m.c1738 = Constraint(expr= m.x113 - m.x114 - 2.28868218782505*m.b553 >= -3.38729447649316)
m.c1739 = Constraint(expr= m.x114 - m.x115 - 2.28868218782505*m.b554 >= -3.38729447649316)
m.c1740 = Constraint(expr= m.x115 - m.x116 - 2.28868218782505*m.b555 >= -3.38729447649316)
m.c1741 = Constraint(expr= m.x116 - m.x117 - 2.28868218782505*m.b556 >= -3.38729447649316)
m.c1742 = Constraint(expr= m.x117 - m.x118 - 2.28868218782505*m.b557 >= -3.38729447649316)
m.c1743 = Constraint(expr= m.x118 - m.x119 - 2.28868218782505*m.b558 >= -3.38729447649316)
m.c1744 = Constraint(expr= m.x119 - m.x120 - 2.28868218782505*m.b559 >= -3.38729447649316)
m.c1745 = Constraint(expr= m.x121 - m.x122 - 2.32790290097834*m.b549 >= -3.42651518964645)
m.c1746 = Constraint(expr= m.x122 - m.x123 - 2.32790290097834*m.b550 >= -3.42651518964645)
m.c1747 = Constraint(expr= m.x123 - m.x124 - 2.32790290097834*m.b551 >= -3.42651518964645)
m.c1748 = Constraint(expr= m.x124 - m.x125 - 2.32790290097834*m.b552 >= -3.42651518964645)
m.c1749 = Constraint(expr= m.x125 - m.x126 - 2.32790290097834*m.b553 >= -3.42651518964645)
m.c1750 = Constraint(expr= m.x126 - m.x127 - 2.32790290097834*m.b554 >= -3.42651518964645)
m.c1751 = Constraint(expr= m.x127 - m.x128 - 2.32790290097834*m.b555 >= -3.42651518964645)
m.c1752 = Constraint(expr= m.x128 - m.x129 - 2.32790290097834*m.b556 >= -3.42651518964645)
m.c1753 = Constraint(expr= m.x129 - m.x130 - 2.32790290097834*m.b557 >= -3.42651518964645)
m.c1754 = Constraint(expr= m.x130 - m.x131 - 2.32790290097834*m.b558 >= -3.42651518964645)
m.c1755 = Constraint(expr= m.x131 - m.x132 - 2.32790290097834*m.b559 >= -3.42651518964645)
m.c1756 = Constraint(expr= m.x133 - m.x134 - 2.17630492708372*m.b549 >= -3.27491721575183)
m.c1757 = Constraint(expr= m.x134 - m.x135 - 2.17630492708372*m.b550 >= -3.27491721575183)
m.c1758 = Constraint(expr= m.x135 - m.x136 - 2.17630492708372*m.b551 >= -3.27491721575183)
m.c1759 = Constraint(expr= m.x136 - m.x137 - 2.17630492708372*m.b552 >= -3.27491721575183)
m.c1760 = Constraint(expr= m.x137 - m.x138 - 2.17630492708372*m.b553 >= -3.27491721575183)
m.c1761 = Constraint(expr= m.x138 - m.x139 - 2.17630492708372*m.b554 >= -3.27491721575183)
m.c1762 = Constraint(expr= m.x139 - m.x140 - 2.17630492708372*m.b555 >= -3.27491721575183)
m.c1763 = Constraint(expr= m.x140 - m.x141 - 2.17630492708372*m.b556 >= -3.27491721575183)
m.c1764 = Constraint(expr= m.x141 - m.x142 - 2.17630492708372*m.b557 >= -3.27491721575183)
m.c1765 = Constraint(expr= m.x142 - m.x143 - 2.17630492708372*m.b558 >= -3.27491721575183)
m.c1766 = Constraint(expr= m.x143 - m.x144 - 2.17630492708372*m.b559 >= -3.27491721575183)
m.c1767 = Constraint(expr= m.x145 - m.x146 - 2.08253202659828*m.b549 >= -3.18114431526639)
m.c1768 = Constraint(expr= m.x146 - m.x147 - 2.08253202659828*m.b550 >= -3.18114431526639)
m.c1769 = Constraint(expr= m.x147 - m.x148 - 2.08253202659828*m.b551 >= -3.18114431526639)
m.c1770 = Constraint(expr= m.x148 - m.x149 - 2.08253202659828*m.b552 >= -3.18114431526639)
m.c1771 = Constraint(expr= m.x149 - m.x150 - 2.08253202659828*m.b553 >= -3.18114431526639)
m.c1772 = Constraint(expr= m.x150 - m.x151 - 2.08253202659828*m.b554 >= -3.18114431526639)
m.c1773 = Constraint(expr= m.x151 - m.x152 - 2.08253202659828*m.b555 >= -3.18114431526639)
m.c1774 = Constraint(expr= m.x152 - m.x153 - 2.08253202659828*m.b556 >= -3.18114431526639)
m.c1775 = Constraint(expr= m.x153 - m.x154 - 2.08253202659828*m.b557 >= -3.18114431526639)
m.c1776 = Constraint(expr= m.x154 - m.x155 - 2.08253202659828*m.b558 >= -3.18114431526639)
m.c1777 = Constraint(expr= m.x155 - m.x156 - 2.08253202659828*m.b559 >= -3.18114431526639)
m.c1778 = Constraint(expr= m.x157 - m.x158 - 1.88519141402061*m.b549 >= -2.98380370268872)
m.c1779 = Constraint(expr= m.x158 - m.x159 - 1.88519141402061*m.b550 >= -2.98380370268872)
m.c1780 = Constraint(expr= m.x159 - m.x160 - 1.88519141402061*m.b551 >= -2.98380370268872)
m.c1781 = Constraint(expr= m.x160 - m.x161 - 1.88519141402061*m.b552 >= -2.98380370268872)
m.c1782 = Constraint(expr= m.x161 - m.x162 - 1.88519141402061*m.b553 >= -2.98380370268872)
m.c1783 = Constraint(expr= m.x162 - m.x163 - 1.88519141402061*m.b554 >= -2.98380370268872)
m.c1784 = Constraint(expr= m.x163 - m.x164 - 1.88519141402061*m.b555 >= -2.98380370268872)
m.c1785 = Constraint(expr= m.x164 - m.x165 - 1.88519141402061*m.b556 >= -2.98380370268872)
m.c1786 = Constraint(expr= m.x165 - m.x166 - 1.88519141402061*m.b557 >= -2.98380370268872)
m.c1787 = Constraint(expr= m.x166 - m.x167 - 1.88519141402061*m.b558 >= -2.98380370268872)
m.c1788 = Constraint(expr= m.x167 - m.x168 - 1.88519141402061*m.b559 >= -2.98380370268872)
m.c1789 = Constraint(expr= m.x169 - m.x170 - 2.45174823131027*m.b549 >= -3.55036051997838)
m.c1790 = Constraint(expr= m.x170 - m.x171 - 2.45174823131027*m.b550 >= -3.55036051997838)
m.c1791 = Constraint(expr= m.x171 - m.x172 - 2.45174823131027*m.b551 >= -3.55036051997838)
m.c1792 = Constraint(expr= m.x172 - m.x173 - 2.45174823131027*m.b552 >= -3.55036051997838)
m.c1793 = Constraint(expr= m.x173 - m.x174 - 2.45174823131027*m.b553 >= -3.55036051997838)
m.c1794 = Constraint(expr= m.x174 - m.x175 - 2.45174823131027*m.b554 >= -3.55036051997838)
m.c1795 = Constraint(expr= m.x175 - m.x176 - 2.45174823131027*m.b555 >= -3.55036051997838)
m.c1796 = Constraint(expr= m.x176 - m.x177 - 2.45174823131027*m.b556 >= -3.55036051997838)
m.c1797 = Constraint(expr= m.x177 - m.x178 - 2.45174823131027*m.b557 >= -3.55036051997838)
m.c1798 = Constraint(expr= m.x178 - m.x179 - 2.45174823131027*m.b558 >= -3.55036051997838)
m.c1799 = Constraint(expr= m.x179 - m.x180 - 2.45174823131027*m.b559 >= -3.55036051997838)
m.c1800 = Constraint(expr= m.x181 - m.x182 - 2.83303050362969*m.b549 >= -3.9316427922978)
m.c1801 = Constraint(expr= m.x182 - m.x183 - 2.83303050362969*m.b550 >= -3.9316427922978)
m.c1802 = Constraint(expr= m.x183 - m.x184 - 2.83303050362969*m.b551 >= -3.9316427922978)
m.c1803 = Constraint(expr= m.x184 - m.x185 - 2.83303050362969*m.b552 >= -3.9316427922978)
m.c1804 = Constraint(expr= m.x185 - m.x186 - 2.83303050362969*m.b553 >= -3.9316427922978)
m.c1805 = Constraint(expr= m.x186 - m.x187 - 2.83303050362969*m.b554 >= -3.9316427922978)
m.c1806 = Constraint(expr= m.x187 - m.x188 - 2.83303050362969*m.b555 >= -3.9316427922978)
m.c1807 = Constraint(expr= m.x188 - m.x189 - 2.83303050362969*m.b556 >= -3.9316427922978)
m.c1808 = Constraint(expr= m.x189 - m.x190 - 2.83303050362969*m.b557 >= -3.9316427922978)
m.c1809 = Constraint(expr= m.x190 - m.x191 - 2.83303050362969*m.b558 >= -3.9316427922978)
m.c1810 = Constraint(expr= m.x191 - m.x192 - 2.83303050362969*m.b559 >= -3.9316427922978)
m.c1811 = Constraint(expr= m.x193 - m.x194 - 1.83716337325342*m.b549 >= -2.93577566192153)
m.c1812 = Constraint(expr= m.x194 - m.x195 - 1.83716337325342*m.b550 >= -2.93577566192153)
m.c1813 = Constraint(expr= m.x195 - m.x196 - 1.83716337325342*m.b551 >= -2.93577566192153)
m.c1814 = Constraint(expr= m.x196 - m.x197 - 1.83716337325342*m.b552 >= -2.93577566192153)
m.c1815 = Constraint(expr= m.x197 - m.x198 - 1.83716337325342*m.b553 >= -2.93577566192153)
m.c1816 = Constraint(expr= m.x198 - m.x199 - 1.83716337325342*m.b554 >= -2.93577566192153)
m.c1817 = Constraint(expr= m.x199 - m.x200 - 1.83716337325342*m.b555 >= -2.93577566192153)
m.c1818 = Constraint(expr= m.x200 - m.x201 - 1.83716337325342*m.b556 >= -2.93577566192153)
m.c1819 = Constraint(expr= m.x201 - m.x202 - 1.83716337325342*m.b557 >= -2.93577566192153)
m.c1820 = Constraint(expr= m.x202 - m.x203 - 1.83716337325342*m.b558 >= -2.93577566192153)
m.c1821 = Constraint(expr= m.x203 - m.x204 - 1.83716337325342*m.b559 >= -2.93577566192153)
m.c1822 = Constraint(expr= m.x205 - m.x206 - 1.39146570952238*m.b549 >= -2.49007799819049)
m.c1823 = Constraint(expr= m.x206 - m.x207 - 1.39146570952238*m.b550 >= -2.49007799819049)
m.c1824 = Constraint(expr= m.x207 - m.x208 - 1.39146570952238*m.b551 >= -2.49007799819049)
m.c1825 = Constraint(expr= m.x208 - m.x209 - 1.39146570952238*m.b552 >= -2.49007799819049)
m.c1826 = Constraint(expr= m.x209 - m.x210 - 1.39146570952238*m.b553 >= -2.49007799819049)
m.c1827 = Constraint(expr= m.x210 - m.x211 - 1.39146570952238*m.b554 >= -2.49007799819049)
m.c1828 = Constraint(expr= m.x211 - m.x212 - 1.39146570952238*m.b555 >= -2.49007799819049)
m.c1829 = Constraint(expr= m.x212 - m.x213 - 1.39146570952238*m.b556 >= -2.49007799819049)
m.c1830 = Constraint(expr= m.x213 - m.x214 - 1.39146570952238*m.b557 >= -2.49007799819049)
m.c1831 = Constraint(expr= m.x214 - m.x215 - 1.39146570952238*m.b558 >= -2.49007799819049)
m.c1832 = Constraint(expr= m.x215 - m.x216 - 1.39146570952238*m.b559 >= -2.49007799819049)
m.c1833 = Constraint(expr= m.x217 - m.x218 - 2.3359350726756*m.b549 >= -3.43454736134371)
m.c1834 = Constraint(expr= m.x218 - m.x219 - 2.3359350726756*m.b550 >= -3.43454736134371)
m.c1835 = Constraint(expr= m.x219 - m.x220 - 2.3359350726756*m.b551 >= -3.43454736134371)
m.c1836 = Constraint(expr= m.x220 - m.x221 - 2.3359350726756*m.b552 >= -3.43454736134371)
m.c1837 = Constraint(expr= m.x221 - m.x222 - 2.3359350726756*m.b553 >= -3.43454736134371)
m.c1838 = Constraint(expr= m.x222 - m.x223 - 2.3359350726756*m.b554 >= -3.43454736134371)
m.c1839 = Constraint(expr= m.x223 - m.x224 - 2.3359350726756*m.b555 >= -3.43454736134371)
m.c1840 = Constraint(expr= m.x224 - m.x225 - 2.3359350726756*m.b556 >= -3.43454736134371)
m.c1841 = Constraint(expr= m.x225 - m.x226 - 2.3359350726756*m.b557 >= -3.43454736134371)
m.c1842 = Constraint(expr= m.x226 - m.x227 - 2.3359350726756*m.b558 >= -3.43454736134371)
m.c1843 = Constraint(expr= m.x227 - m.x228 - 2.3359350726756*m.b559 >= -3.43454736134371)
m.c1844 = Constraint(expr= m.x229 - m.x230 - 1.66080639761833*m.b549 >= -2.75941868628644)
m.c1845 = Constraint(expr= m.x230 - m.x231 - 1.66080639761833*m.b550 >= -2.75941868628644)
m.c1846 = Constraint(expr= m.x231 - m.x232 - 1.66080639761833*m.b551 >= -2.75941868628644)
m.c1847 = Constraint(expr= m.x232 - m.x233 - 1.66080639761833*m.b552 >= -2.75941868628644)
m.c1848 = Constraint(expr= m.x233 - m.x234 - 1.66080639761833*m.b553 >= -2.75941868628644)
m.c1849 = Constraint(expr= m.x234 - m.x235 - 1.66080639761833*m.b554 >= -2.75941868628644)
m.c1850 = Constraint(expr= m.x235 - m.x236 - 1.66080639761833*m.b555 >= -2.75941868628644)
m.c1851 = Constraint(expr= m.x236 - m.x237 - 1.66080639761833*m.b556 >= -2.75941868628644)
m.c1852 = Constraint(expr= m.x237 - m.x238 - 1.66080639761833*m.b557 >= -2.75941868628644)
m.c1853 = Constraint(expr= m.x238 - m.x239 - 1.66080639761833*m.b558 >= -2.75941868628644)
m.c1854 = Constraint(expr= m.x239 - m.x240 - 1.66080639761833*m.b559 >= -2.75941868628644)
m.c1855 = Constraint(expr= m.x241 - m.x242 - 3.57341020430782*m.b549 >= -4.67202249297593)
m.c1856 = Constraint(expr= m.x242 - m.x243 - 3.57341020430782*m.b550 >= -4.67202249297593)
m.c1857 = Constraint(expr= m.x243 - m.x244 - 3.57341020430782*m.b551 >= -4.67202249297593)
m.c1858 = Constraint(expr= m.x244 - m.x245 - 3.57341020430782*m.b552 >= -4.67202249297593)
m.c1859 = Constraint(expr= m.x245 - m.x246 - 3.57341020430782*m.b553 >= -4.67202249297593)
m.c1860 = Constraint(expr= m.x246 - m.x247 - 3.57341020430782*m.b554 >= -4.67202249297593)
m.c1861 = Constraint(expr= m.x247 - m.x248 - 3.57341020430782*m.b555 >= -4.67202249297593)
m.c1862 = Constraint(expr= m.x248 - m.x249 - 3.57341020430782*m.b556 >= -4.67202249297593)
m.c1863 = Constraint(expr= m.x249 - m.x250 - 3.57341020430782*m.b557 >= -4.67202249297593)
m.c1864 = Constraint(expr= m.x250 - m.x251 - 3.57341020430782*m.b558 >= -4.67202249297593)
m.c1865 = Constraint(expr= m.x251 - m.x252 - 3.57341020430782*m.b559 >= -4.67202249297593)
m.c1867 = Constraint(expr= m.x278 - 18.8261458520605*m.b549 <= -9.21034037197618)
m.c1868 = Constraint(expr= m.x279 - 18.8261458520605*m.b550 <= -9.21034037197618)
m.c1869 = Constraint(expr= m.x280 - 18.8261458520605*m.b551 <= -9.21034037197618)
m.c1870 = Constraint(expr= m.x281 - 18.8261458520605*m.b552 <= -9.21034037197618)
m.c1871 = Constraint(expr= m.x282 - 18.8261458520605*m.b553 <= -9.21034037197618)
m.c1872 = Constraint(expr= m.x283 - 18.8261458520605*m.b554 <= -9.21034037197618)
m.c1873 = Constraint(expr= m.x284 - 18.8261458520605*m.b555 <= -9.21034037197618)
m.c1874 = Constraint(expr= m.x285 - 18.8261458520605*m.b556 <= -9.21034037197618)
m.c1875 = Constraint(expr= m.x286 - 18.8261458520605*m.b557 <= -9.21034037197618)
m.c1876 = Constraint(expr= m.x287 - 18.8261458520605*m.b558 <= -9.21034037197618)
m.c1877 = | |
<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import pprint
from subprocess import Popen, PIPE
import os
from future.tests.base import CodeHandler, unittest
class TestFuturizeSimple(CodeHandler):
"""
This class contains snippets of Python 2 code (invalid Python 3) and
tests for whether they can be passed to ``futurize`` and immediately
run under both Python 2 again and Python 3.
"""
@unittest.expectedFailure
def test_problematic_string(self):
""" This string generates a SyntaxError on Python 3 unless it has
an r prefix.
"""
before = r"""
s = 'The folder is "C:\Users"'.
"""
after = r"""
s = r'The folder is "C:\Users"'.
"""
self.convert_check(before, after)
def test_import_builtins(self):
before = """
a = raw_input()
b = open(a, b, c)
c = filter(a, b)
d = map(a, b)
e = isinstance(a, str)
f = bytes(a, encoding='utf-8')
for g in xrange(10**10):
pass
super(MyClass, self)
"""
after = """
from __future__ import unicode_literals
from future.builtins import bytes
from future.builtins import filter
from future.builtins import input
from future.builtins import map
from future.builtins import open
from future.builtins import range
from future.builtins import super
a = input()
b = open(a, b, c)
c = list(filter(a, b))
d = list(map(a, b))
e = isinstance(a, str)
f = bytes(a, encoding='utf-8')
for g in range(10**10):
pass
super(MyClass, self)
"""
self.convert_check(before, after, ignore_imports=False, run=False)
def test_xrange(self):
code = '''
for i in xrange(10):
pass
'''
self.convert(code)
@unittest.expectedFailure
def test_source_coding_utf8(self):
"""
Tests to ensure that the source coding line is not corrupted or
removed. It must be left as the first line in the file (including
before any __future__ imports). Also tests whether the unicode
characters in this encoding are parsed correctly and left alone.
"""
code = """
# -*- coding: utf-8 -*-
icons = [u"◐", u"◓", u"◑", u"◒"]
"""
self.unchanged(code)
def test_exception_syntax(self):
"""
Test of whether futurize handles the old-style exception syntax
"""
before = """
try:
pass
except IOError, e:
val = e.errno
"""
after = """
try:
pass
except IOError as e:
val = e.errno
"""
self.convert_check(before, after)
def test_super(self):
"""
This tests whether futurize keeps the old two-argument super() calls the
same as before. It should, because this still works in Py3.
"""
code = '''
class VerboseList(list):
def append(self, item):
print('Adding an item')
super(VerboseList, self).append(item)
'''
self.unchanged(code)
@unittest.expectedFailure
def test_file(self):
"""
file() as a synonym for open() is obsolete and invalid on Python 3.
"""
before = '''
f = file(__file__)
data = f.read()
f.close()
'''
after = '''
f = open(__file__)
data = f.read()
f.close()
'''
self.convert_check(before, after)
def test_apply(self):
before = '''
def addup(*x):
return sum(x)
assert apply(addup, (10,20)) == 30
'''
after = """
def addup(*x):
return sum(x)
assert addup(*(10,20)) == 30
"""
self.convert_check(before, after)
@unittest.skip('not implemented yet')
def test_download_pypi_package_and_test(self, package_name='future'):
URL = 'http://pypi.python.org/pypi/{0}/json'
import requests
r = requests.get(URL.format(package_name))
pprint.pprint(r.json())
download_url = r.json()['urls'][0]['url']
filename = r.json()['urls'][0]['filename']
# r2 = requests.get(download_url)
# with open('/tmp/' + filename, 'w') as tarball:
# tarball.write(r2.content)
def test_raw_input(self):
"""
Passes in a string to the waiting input() after futurize
conversion.
The code is the first snippet from these docs:
http://docs.python.org/2/library/2to3.html
"""
before = """
def greet(name):
print "Hello, {0}!".format(name)
print "What's your name?"
name = raw_input()
greet(name)
"""
desired = """
def greet(name):
print("Hello, {0}!".format(name))
print("What's your name?")
name = input()
greet(name)
"""
self.convert_check(before, desired, run=False)
for interpreter in self.interpreters:
p1 = Popen([interpreter, self.tempdir + 'mytestscript.py'],
stdout=PIPE, stdin=PIPE, stderr=PIPE, env=self.env)
(stdout, stderr) = p1.communicate(b'Ed')
self.assertEqual(stdout, b"What's your name?\nHello, Ed!\n")
def test_literal_prefixes_are_not_stripped(self):
"""
Tests to ensure that the u'' and b'' prefixes on unicode strings and
byte strings are not removed by the futurize script. Removing the
prefixes on Py3.3+ is unnecessary and loses some information -- namely,
that the strings have explicitly been marked as unicode or bytes,
rather than just e.g. a guess by some automated tool about what they
are.
"""
code = '''
s = u'unicode string'
b = b'byte string'
'''
self.unchanged(code)
@unittest.expectedFailure
def test_division(self):
"""
TODO: implement this!
"""
before = """
x = 1 / 2
"""
after = """
from future.utils import old_div
x = old_div(1, 2)
"""
self.convert_check(before, after, stages=[1])
class TestFuturizeRenamedStdlib(CodeHandler):
def test_renamed_modules(self):
before = """
import ConfigParser
import copy_reg
import cPickle
import cStringIO
s = cStringIO.StringIO('blah')
"""
after = """
import configparser
import copyreg
import pickle
import io
s = io.StringIO('blah')
"""
self.convert_check(before, after)
@unittest.expectedFailure
def test_urllib_refactor(self):
# Code like this using urllib is refactored by futurize --stage2 to use
# the new Py3 module names, but ``future`` doesn't support urllib yet.
before = """
import urllib
URL = 'http://pypi.python.org/pypi/future/json'
package_name = 'future'
r = urllib.urlopen(URL.format(package_name))
data = r.read()
"""
after = """
import urllib.request
URL = 'http://pypi.python.org/pypi/future/json'
package_name = 'future'
r = urllib.request.urlopen(URL.format(package_name))
data = r.read()
"""
self.convert_check(before, after)
def test_renamed_copy_reg_and_cPickle_modules(self):
"""
Example from docs.python.org/2/library/copy_reg.html
"""
before = """
import copy_reg
import copy
import cPickle
class C(object):
def __init__(self, a):
self.a = a
def pickle_c(c):
print('pickling a C instance...')
return C, (c.a,)
copy_reg.pickle(C, pickle_c)
c = C(1)
d = copy.copy(c)
p = cPickle.dumps(c)
"""
after = """
import copyreg
import copy
import pickle
class C(object):
def __init__(self, a):
self.a = a
def pickle_c(c):
print('pickling a C instance...')
return C, (c.a,)
copyreg.pickle(C, pickle_c)
c = C(1)
d = copy.copy(c)
p = pickle.dumps(c)
"""
self.convert_check(before, after)
@unittest.expectedFailure
def test_Py2_StringIO_module(self):
"""
Ideally, there would be a fixer for this. For now:
TODO: add the Py3 equivalent for this to the docs
"""
before = """
import cStringIO
s = cStringIO.StringIO('my string')
assert isinstance(s, cStringIO.InputType)
"""
after = """
import io
s = io.StringIO('my string')
# assert isinstance(s, io.InputType)
# There is no io.InputType in Python 3. What should we change this to
# instead?
"""
self.convert_check(before, after)
class TestFuturizeStage1(CodeHandler):
"""
Tests "stage 1": safe optimizations: modernizing Python 2 code so that it
uses print functions, new-style exception syntax, etc.
The behaviour should not change and this should introduce no dependency on
the ``future`` package. It produces more modern Python 2-only code. The
goal is to reduce the size of the real porting patch-set by performing
the uncontroversial patches first.
"""
def test_apply(self):
"""
apply() should be changed by futurize --stage1
"""
before = '''
def f(a, b):
return a + b
args = (1, 2)
assert apply(f, args) == 3
assert apply(f, ('a', 'b')) == 'ab'
'''
after = '''
def f(a, b):
return a + b
args = (1, 2)
assert f(*args) == 3
assert f(*('a', 'b')) == 'ab'
'''
self.convert_check(before, after, stages=[1])
def test_xrange(self):
"""
xrange should not be changed by futurize --stage1
"""
code = '''
for i in xrange(10):
pass
'''
self.unchanged(code, stages=[1])
@unittest.expectedFailure
def test_absolute_import_changes(self):
"""
Implicit relative imports should be converted to absolute or explicit
relative imports correctly.
Issue #16 (with porting bokeh/bbmodel.py)
"""
with open('specialmodels.py', 'w') as f:
f.write('pass')
before = """
import specialmodels.pandasmodel
specialmodels.pandasmodel.blah()
"""
after = """
from __future__ import absolute_import
from .specialmodels import pandasmodel
pandasmodel.blah()
"""
self.convert_check(before, after, stages=[1])
def test_safe_futurize_imports(self):
"""
The standard library module names should not be changed until stage 2
"""
before = """
import ConfigParser
import HTMLParser
import collections
ConfigParser.ConfigParser
HTMLParser.HTMLParser
d = collections.OrderedDict()
"""
self.unchanged(before, stages=[1])
def test_print(self):
before = """
print 'Hello'
"""
after = """
print('Hello')
"""
self.convert_check(before, after, stages=[1])
before = """
import sys
print >> sys.stderr, 'Hello', 'world'
"""
after = """
import sys
print('Hello', 'world', file=sys.stderr)
"""
self.convert_check(before, after, stages=[1])
def test_print_already_function(self):
"""
Running futurize --stage1 should not add a second set of parentheses
"""
before = """
print('Hello')
"""
self.unchanged(before, stages=[1])
@unittest.expectedFailure
def test_print_already_function_complex(self):
"""
Running futurize --stage1 does add a second second set of parentheses
in this case. This is because the underlying lib2to3 has two distinct
grammars -- with a print statement and with a print function -- and,
when going forwards (2 to both), futurize assumes print is a statement,
which raises a ParseError.
"""
before = """
| |
body)
self.assertIn(b"last project", body)
class SeleniumTest(MyHTTPTestCase):
def setUp(self):
super(SeleniumTest, self).setUp()
from selenium import webdriver
if os.environ['TAGUETTE_TEST_WEBDRIVER'] == 'firefox':
self.driver = webdriver.Firefox()
elif os.environ['TAGUETTE_TEST_WEBDRIVER'] == 'chromium':
from selenium.webdriver.chrome.options import Options
options = Options()
options.add_argument('--disable-gpu')
options.add_argument('--disable-dev-shm-usage')
options.add_argument('--no-sandbox')
self.driver = webdriver.Chrome(options=options)
else:
raise EnvironmentError
self.driver.set_window_size(1024, 768)
self.driver_pool = concurrent.futures.ThreadPoolExecutor(1)
self.logs = []
def tearDown(self):
super(SeleniumTest, self).tearDown()
if self.logs:
raise ValueError("Error in browser console: %s"
% self.logs[0]['message'])
self.driver.quit()
@staticmethod
def extract_path(url):
m = re.match('^http://127.0.0.1:[0-9]+(.+)$', url)
return m.group(1)
@property
def s_path(self):
return self.extract_path(self.driver.current_url)
async def s_get(self, url):
self.store_logs()
url = self.get_url(url)
await asyncio.get_event_loop().run_in_executor(
self.driver_pool,
lambda: self.driver.get(url),
)
await asyncio.sleep(0.2)
async def s_click(self, element):
self.store_logs()
await asyncio.get_event_loop().run_in_executor(
self.driver_pool,
lambda: element.click(),
)
await asyncio.sleep(0.2)
async def s_click_button(self, text, tag='button', parent=None):
await asyncio.sleep(0.2)
if parent is None:
parent = self.driver
buttons = parent.find_elements_by_tag_name(tag)
correct_button, = [
button for button in buttons
if button.text == text
]
await self.s_click(correct_button)
async def s_perform_action(self, action):
self.store_logs()
await asyncio.get_event_loop().run_in_executor(
self.driver_pool,
lambda: action.perform(),
)
await asyncio.sleep(0.2)
def _filter_logs(self, logs):
return [
line
for line in logs
if 'Polling failed:' not in line['message']
]
def store_logs(self):
from selenium import webdriver
if isinstance(self.driver, webdriver.Chrome):
logs = self.driver.get_log('browser')
logs = self._filter_logs(logs)
if self.logs:
for i in reversed(range(len(logs))):
if logs[i] == self.logs[-1]:
self.logs.extend(logs[i + 1:])
return
self.logs.extend(logs)
def get_logs(self):
from selenium import webdriver
if isinstance(self.driver, webdriver.Chrome):
self.store_logs()
logs = self.logs
self.logs = []
return logs
else:
return None
@unittest.skipUnless(
os.environ.get('TAGUETTE_TEST_WEBDRIVER', ''),
"TAGUETTE_TEST_WEBDRIVER not set",
)
class TestSeleniumMultiuser(SeleniumTest):
def get_app(self):
with mock.patch.object(web.Application, '_set_password',
new=set_dumb_password):
self.application = web.make_app(dict(
main.DEFAULT_CONFIG,
NAME="Test Taguette instance", PORT=7465,
DATABASE=DATABASE_URI,
REDIS_SERVER=REDIS,
TOS_FILE=None,
EMAIL='<EMAIL>',
MAIL_SERVER={'host': 'localhost', 'port': 25},
COOKIES_PROMPT=True,
MULTIUSER=True,
SECRET_KEY='<KEY>',
))
return self.application
@gen_test(timeout=120)
async def test_login(self):
# Fetch index, should have welcome message and register link
await self.s_get('/')
self.assertEqual(self.driver.title, 'Welcome | Taguette')
self.assertEqual(
[el.text for el in self.driver.find_elements_by_tag_name('h1')],
['Welcome'],
)
self.assertIn(
'Register now',
[el.text for el in self.driver.find_elements_by_tag_name('a')],
)
# Only admin so far
db = self.application.DBSession()
self.assertEqual([user.login
for user in db.query(database.User).all()],
['admin'])
# Fetch registration page, should hit cookies prompt
await self.s_get('/register')
self.assertEqual(self.s_path, '/cookies?next=%2Fregister')
# Accept cookies
await self.s_click_button('Accept cookies')
self.assertEqual(self.s_path, '/register')
# Register
elem = self.driver.find_element_by_id('register-login')
elem.send_keys('Tester')
elem = self.driver.find_element_by_id('register-password1')
elem.send_keys('<PASSWORD>')
elem = self.driver.find_element_by_id('register-password2')
elem.send_keys('<PASSWORD>')
await self.s_click_button('Register')
# User exists in database
db = self.application.DBSession()
self.assertEqual([user.login
for user in db.query(database.User).all()],
['admin', 'tester'])
# Fetch index, should have project list
await self.s_get('/')
self.assertEqual(
[el.text for el in self.driver.find_elements_by_tag_name('h1')],
['Welcome tester'],
)
self.assertIn(
'Here are your projects:',
[el.text for el in self.driver.find_elements_by_tag_name('p')],
)
# Fetch project creation page
await self.s_get('/project/new')
# Create a project
elem = self.driver.find_element_by_id('project-name')
elem.send_keys('test project')
await self.s_click_button('Create')
self.assertEqual(self.s_path, '/project/1')
# Log out
await self.s_get('/logout')
self.assertEqual(self.s_path, '/')
# Hit error page
await self.s_get('/api/project/1/highlights/')
self.assertNotEqual(
self.driver.find_element_by_tag_name('body').text.find(
'"Not logged in"',
),
-1,
)
logs = self.get_logs()
if logs is not None:
self.assertEqual(len(logs), 1)
self.assertTrue(re.search(
r'Failed to load resource: the server responded with a status '
+ r'of 403 \(Forbidden\)',
logs[0]['message'],
))
# Login
await self.s_get('/login?' + urlencode(dict(next='/project/1')))
elem = self.driver.find_element_by_id('log-in-login')
elem.send_keys('Tester')
elem = self.driver.find_element_by_id('log-in-password')
elem.send_keys('hack<PASSWORD>')
await self.s_click_button('Log in')
self.assertEqual(self.s_path, '/project/1')
# Check redirect to account
await self.s_get('/.well-known/change-password')
self.assertEqual(self.s_path, '/account')
def get_highlight_add_tags(self):
tags = {}
form = self.driver.find_element_by_id('highlight-add-form')
for elem in form.find_elements_by_tag_name('input'):
if elem.get_attribute('type') == 'checkbox':
id = elem.get_attribute('id')
self.assertTrue(id.startswith('highlight-add-tags-'))
id = int(id[19:])
tags[id] = elem.get_property('checked')
return tags
@gen_test(timeout=120)
async def test_projects(self):
# project 1
# ---------
# create
# (tag 1 'interesting')
# tag 2 'people'
# doc 1
# change project metadata
# doc 2
# hl 1 doc=1 tags=[1]
# hl 2 doc=1 tags=[1, 2]
# hl 3 doc=2 tags=[2]
# tag 3 'interesting.places'
# hl 1 doc=1 tags=[3] (edit)
# highlights 'people*': [2, 3]
# highlights 'interesting.places*': [1]
# highlights 'interesting*': [1, 2]
# highlights: [1, 2, 3]
# export doc 1
# merge tag 2 -> 1
# highlights: [1, 2, 3]
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.select import Select
# Accept cookies
await self.s_get('/cookies')
await self.s_click_button('Accept cookies')
self.assertEqual(self.s_path, '/')
# Log in
await self.s_get('/login')
elem = self.driver.find_element_by_id('log-in-login')
elem.send_keys('admin')
elem = self.driver.find_element_by_id('log-in-password')
elem.send_keys('<PASSWORD>')
await self.s_click_button('Log in')
self.assertEqual(self.s_path, '/')
# Create project 1
await self.s_get('/project/new')
elem = self.driver.find_element_by_id('project-name')
elem.send_keys('one')
elem = self.driver.find_element_by_id('project-description')
elem.send_keys("Remi's project")
await self.s_click_button('Create')
self.assertEqual(self.s_path, '/project/1')
# Check project page
expected = (
' var user_login = "admin";\n'
' var project_id = 1;\n'
' var last_event = -1;\n'
' var documents = {};\n'
' var highlights = {};\n'
' var tags = %s;\n'
' var members = {"admin": {"privileges": "ADMIN"}};\n'
' var version = \'%s\';\n'
'' % (
json.dumps(
{
"1": {
"count": 0, "id": 1, "path": "interesting",
"description": "Further review required",
},
},
sort_keys=True,
),
exact_version(),
)
)
self.assertTrue(any(
expected.strip() == script.get_property('textContent').strip()
for script in self.driver.find_elements_by_tag_name('script')
))
# Create tag 2 in project 1
await self.s_click(self.driver.find_element_by_id('tags-tab'))
await self.s_click_button('Create a tag', tag='a')
elem = self.driver.find_element_by_id('tag-add-path')
elem.send_keys('people')
elem = self.driver.find_element_by_id('tag-add-description')
elem.send_keys('People of interest')
await self.s_click_button('Save & Close')
# Check tags
tag_links = (
self.driver.find_element_by_id('tags-list')
.find_elements_by_class_name('tag-name')
)
self.assertEqual(
[link.text for link in tag_links],
['interesting', 'people'],
)
# Create document 1 in project 1
await self.s_click(self.driver.find_element_by_id('documents-tab'))
await self.s_click_button('Add a document', tag='a')
elem = self.driver.find_element_by_id('document-add-name')
elem.send_keys('otherdoc')
elem = self.driver.find_element_by_id('document-add-file')
with tempfile.NamedTemporaryFile('wb', suffix='.html') as tmp:
tmp.write(b'different content')
tmp.flush()
elem.send_keys(tmp.name)
await self.s_click_button('Import')
db = self.application.DBSession()
doc = db.query(database.Document).get(1)
self.assertEqual(doc.name, 'otherdoc')
self.assertEqual(doc.description, '')
self.assertEqual(doc.text_direction,
database.TextDirection.LEFT_TO_RIGHT)
self.assertTrue(doc.filename, os.path.basename(tmp.name))
# Change project 2 metadata
self.assertEqual(
self.driver.find_element_by_class_name('project-name').text,
'one',
)
await self.s_click(self.driver.find_element_by_id('project-tab'))
elem = self.driver.find_element_by_id('project-name')
await self.s_perform_action(
ActionChains(self.driver)
.click(elem)
.key_down(Keys.CONTROL).send_keys('a').key_up(Keys.CONTROL)
.send_keys('new project')
)
elem = self.driver.find_element_by_id('project-description')
elem.click()
await asyncio.sleep(0.5)
await self.s_perform_action(
ActionChains(self.driver)
.key_down(Keys.CONTROL).send_keys('a').key_up(Keys.CONTROL)
)
await asyncio.sleep(0.5)
elem.send_keys('<PASSWORD>')
await self.s_click(self.driver.find_element_by_id('document-contents'))
await asyncio.sleep(1) # Wait for XHR
self.assertEqual(
self.driver.find_element_by_class_name('project-name').text,
'new project',
)
db = self.application.DBSession()
proj = db.query(database.Project).get(1)
self.assertEqual(proj.name, 'new project')
self.assertEqual(proj.description, 'Meaningful')
# Create document 2 in project 1
await self.s_click(self.driver.find_element_by_id('documents-tab'))
await self.s_click_button('Add a document', tag='a')
elem = self.driver.find_element_by_id('document-add-name')
elem.send_keys('third')
elem = self.driver.find_element_by_id('document-add-description')
elem.send_keys('Last one')
await self.s_click_button('Right to left', tag='label')
elem = self.driver.find_element_by_id('document-add-file')
with tempfile.NamedTemporaryFile('wb', suffix='.html') as tmp:
tmp.write(b'<strong>Opinions</strong> and <em>facts</em>!')
tmp.flush()
elem.send_keys(tmp.name)
await self.s_click_button('Import')
db = self.application.DBSession()
doc = db.query(database.Document).get(2)
self.assertEqual(doc.name, 'third')
self.assertEqual(doc.description, 'Last one')
self.assertEqual(doc.text_direction,
database.TextDirection.RIGHT_TO_LEFT)
self.assertTrue(doc.filename, os.path.basename(tmp.name))
# Create highlight 1 in document 1
await self.s_click(
self.driver.find_element_by_id('document-link-1')
.find_element_by_class_name('document-link-a')
)
self.driver.execute_script('restoreSelection([0, 4]);')
await self.s_click_button('new highlight\n(shortcut: n)', tag='a')
self.assertEqual(
self.get_highlight_add_tags(),
{1: False, 2: False},
)
await self.s_click(
self.driver.find_element_by_id('highlight-add-tags-1'),
)
await self.s_click_button('Save & Close')
# Create highlight 2 in document 1
self.driver.execute_script('restoreSelection([13, 17]);')
await self.s_click_button('new highlight\n(shortcut: n)', tag='a')
self.assertEqual(
self.get_highlight_add_tags(),
{1: False, 2: False},
)
await self.s_click(
self.driver.find_element_by_id('highlight-add-tags-1'),
)
await self.s_click(
self.driver.find_element_by_id('highlight-add-tags-2'),
)
await self.s_click_button('Save & Close')
# Edit highlight 1 in document 1
hl, = self.driver.find_elements_by_class_name('highlight-1')
await self.s_click(hl)
self.assertEqual(
self.get_highlight_add_tags(),
{1: True, 2: False},
)
# Create tag 3 in project 1
await self.s_click_button('Create a tag', tag='a')
elem = self.driver.find_element_by_id('tag-add-path')
elem.send_keys('interesting.places')
await self.s_click_button(
'Save & Close',
parent=self.driver.find_element_by_id('tag-add-form'),
)
# Finish editing highlight 1 in document 1
self.assertEqual(
self.get_highlight_add_tags(),
# TODO: 3 should be selected, issue #135
{1: True, 2: False, 3: False},
)
await self.s_click(
self.driver.find_element_by_id('highlight-add-tags-1')
)
await self.s_click(
self.driver.find_element_by_id('highlight-add-tags-3')
)
self.assertEqual(
self.get_highlight_add_tags(),
{1: False, 2: False, 3: True},
)
await self.s_click_button('Save & Close')
# Check tags
await self.s_click(self.driver.find_element_by_id('tags-tab'))
tag_links = (
self.driver.find_element_by_id('tags-list')
.find_elements_by_class_name('tag-name')
)
self.assertEqual(
[link.text for link in tag_links],
['interesting', 'interesting.places', 'people'],
)
# Create highlight 3 in document 2
await self.s_click(self.driver.find_element_by_id('documents-tab'))
await self.s_click(
self.driver.find_element_by_id('document-link-2')
.find_element_by_class_name('document-link-a')
)
self.driver.execute_script('restoreSelection([0, 7]);')
await self.s_click_button('new highlight\n(shortcut: n)', tag='a')
self.assertEqual(
self.get_highlight_add_tags(),
{1: False, 2: False, 3: False},
)
await self.s_click(
self.driver.find_element_by_id('highlight-add-tags-2'),
)
await self.s_click_button('Save & Close')
# List highlights in project 1 under 'people'
await self.s_click(self.driver.find_element_by_id('tags-tab'))
await self.s_click(self.driver.find_element_by_id('tag-link-2'))
self.assertEqual(self.s_path, '/project/1/highlights/people')
self.assertEqual(
self.driver.find_element_by_id('document-contents').text,
'tent\notherdoc interesting people\nOpinion\nthird people',
)
# List highlights in project 1 under 'interesting.places'
await self.s_get('/project/1/highlights/interesting.places')
await asyncio.sleep(1) # Wait for XHR
self.assertEqual(
self.driver.find_element_by_id('document-contents').text,
'diff\notherdoc interesting.places',
)
# List highlights in project 1 under 'interesting'
await self.s_get('/project/1/highlights/interesting')
await asyncio.sleep(1) # Wait for XHR
self.assertEqual(
self.driver.find_element_by_id('document-contents').text,
('diff\notherdoc interesting.places\n'
'tent\notherdoc interesting people'),
)
# List all highlights in project 1
await self.s_click(self.driver.find_element_by_id('tags-tab'))
await self.s_click_button('See all highlights', tag='a')
await asyncio.sleep(1) # Wait for XHR
self.assertEqual(self.s_path, '/project/1/highlights/')
self.assertEqual(
self.driver.find_element_by_id('document-contents').text,
('diff\notherdoc interesting.places\n'
'tent\notherdoc interesting people\n'
'Opinion\nthird people'),
)
# Check export options for document 1
await self.s_get('/project/1/document/1')
await asyncio.sleep(1) # Wait for XHR
await self.s_click_button('Export this | |
'</td>' + \
'<td>' + r13c17 + '</td>' + \
'<td>' + r13c18 + '</td>' + \
'<td>' + r13c19 + '</td>' + \
'<td>' + r13c20 + '</td>' + \
'<td>' + r13c21 + '</td>' + \
'<td>' + r13c22 + '</td>' + \
'<td>' + r13c23 + '</td>' + \
'<td>' + r13c24 + '</td>' + \
'<td>' + r13c25 + '</td>' + \
'<td>' + r13c26 + '</td>' + \
'<td>' + r13c27 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Other</td>' + \
'<td>' + r14c1 + '</td>' + \
'<td>' + r14c2 + '</td>' + \
'<td>' + r14c3 + '</td>' + \
'<td>' + r14c4 + '</td>' + \
'<td>' + r14c5 + '</td>' + \
'<td>' + r14c6 + '</td>' + \
'<td>' + r14c7 + '</td>' + \
'<td>' + r14c8 + '</td>' + \
'<td>' + r14c9 + '</td>' + \
'<td>' + r14c10 + '</td>' + \
'<td>' + r14c11 + '</td>' + \
'<td>' + r14c12 + '</td>' + \
'<td>' + r14c13 + '</td>' + \
'<td>' + r14c14 + '</td>' + \
'<td>' + r14c15 + '</td>' + \
'<td>' + r14c16 + '</td>' + \
'<td>' + r14c17 + '</td>' + \
'<td>' + r14c18 + '</td>' + \
'<td>' + r14c19 + '</td>' + \
'<td>' + r14c20 + '</td>' + \
'<td>' + r14c21 + '</td>' + \
'<td>' + r14c22 + '</td>' + \
'<td>' + r14c23 + '</td>' + \
'<td>' + r14c24 + '</td>' + \
'<td>' + r14c25 + '</td>' + \
'<td>' + r14c26 + '</td>' + \
'<td>' + r14c27 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Total budget</td>' + \
'<td>' + r15c1 + '</td>' + \
'<td>' + r15c2 + '</td>' + \
'<td>' + r15c3 + '</td>' + \
'<td>' + r15c4 + '</td>' + \
'<td>' + r15c5 + '</td>' + \
'<td>' + r15c6 + '</td>' + \
'<td>' + r15c7 + '</td>' + \
'<td>' + r15c8 + '</td>' + \
'<td>' + r15c9 + '</td>' + \
'<td>' + r15c10 + '</td>' + \
'<td>' + r15c11 + '</td>' + \
'<td>' + r15c12 + '</td>' + \
'<td>' + r15c13 + '</td>' + \
'<td>' + r15c14 + '</td>' + \
'<td>' + r15c15 + '</td>' + \
'<td>' + r15c16 + '</td>' + \
'<td>' + r15c17 + '</td>' + \
'<td>' + r15c18 + '</td>' + \
'<td>' + r15c19 + '</td>' + \
'<td>' + r15c20 + '</td>' + \
'<td>' + r15c21 + '</td>' + \
'<td>' + r15c22 + '</td>' + \
'<td>' + r15c23 + '</td>' + \
'<td>' + r15c24 + '</td>' + \
'<td>' + r15c25 + '</td>' + \
'<td>' + r15c26 + '</td>' + \
'<td>' + r15c27 + '</td>' + \
'</tr>' + \
'</tbody>' + \
'</table>'
body += '<br>' + \
'</div>' + \
'</div>' + \
'</div>' + \
'<br>' + \
'<script src="https://code.jquery.com/jquery-3.5.1.slim.min.js"' + \
'integrity="<KEY>"' + \
'crossorigin="anonymous"></script>' + \
'<script src="https://cdn.jsdelivr.net/npm/bootstrap@4.5.3/dist/js/bootstrap.bundle.min.js"' + \
'integrity="<KEY>"' + \
'crossorigin="anonymous"></script>' + \
'</body>' + \
'</html>'
options = {
'page-size': 'A4',
'orientation': 'landscape',
'header-center': 'Month operating budget variance',
'footer-left': 'Company : ' + company_name + ' [' + establishment_number + ']',
'footer-right': '[page] sur [topage]',
'encoding': 'UTF-8',
'no-outline': None,
'custom-header': [
('Accept-Encoding', 'pdf')
]
}
# path_wkthmltopdf = 'static/reporting/static/wkhtmltopdf.exe'
# config = pdfkit.configuration(wkhtmltopdf=path_wkthmltopdf)
# output = pdfkit.from_string(body, output_path=False, configuration=config, options=options)
output = pdfkit.from_string(body, output_path=False, options=options)
response = HttpResponse(output, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="month_operating_budget_variance.pdf"'
return response
def static_budget_variance(request):
return render(request, 'reporting/static_budget_variance.html')
def generate_html_to_pdf_static_budget_variance(request):
company_name = request.POST.get('company_name').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
share_capital = request.POST.get('share_capital').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
head_office_address = request.POST.get('head_office_address').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
establishment_number = request.POST.get('establishment_number').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
register_of_trade_and_companies = request.POST.get('register_of_trade_and_companies').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
main_activities = request.POST.get('main_activities').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
activity_number = request.POST.get('activity_number').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
intra_community_vat_number = request.POST.get('intra_community_vat_number').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
president = request.POST.get('president').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
registration_date = request.POST.get('registration_date').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c1 = request.POST.get('r1c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c2 = request.POST.get('r1c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c3 = request.POST.get('r1c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c4 = request.POST.get('r1c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c5 = request.POST.get('r1c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c6 = request.POST.get('r1c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c1 = request.POST.get('r2c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c2 = request.POST.get('r2c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c3 = request.POST.get('r2c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c4 = request.POST.get('r2c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c5 = request.POST.get('r2c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c6 = request.POST.get('r2c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c1 = request.POST.get('r3c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c2 = request.POST.get('r3c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c3 = request.POST.get('r3c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c4 = request.POST.get('r3c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c5 = request.POST.get('r3c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c6 = request.POST.get('r3c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c1 = request.POST.get('r4c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c2 = request.POST.get('r4c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c3 = request.POST.get('r4c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c4 = request.POST.get('r4c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c5 = request.POST.get('r4c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c6 = request.POST.get('r4c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c1 = request.POST.get('r5c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c2 = request.POST.get('r5c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c3 = request.POST.get('r5c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c4 = request.POST.get('r5c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c5 = request.POST.get('r5c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c6 = request.POST.get('r5c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c1 = request.POST.get('r6c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c2 = request.POST.get('r6c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c3 = request.POST.get('r6c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c4 = request.POST.get('r6c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c5 = request.POST.get('r6c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c6 = request.POST.get('r6c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c1 = request.POST.get('r7c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c2 = request.POST.get('r7c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c3 = request.POST.get('r7c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c4 = request.POST.get('r7c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c5 = request.POST.get('r7c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c6 = request.POST.get('r7c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c1 = request.POST.get('r8c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c2 = request.POST.get('r8c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c3 = request.POST.get('r8c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c4 = request.POST.get('r8c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c5 = request.POST.get('r8c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c6 = request.POST.get('r8c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c1 = request.POST.get('r9c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c2 = request.POST.get('r9c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c3 = request.POST.get('r9c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c4 = request.POST.get('r9c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c5 = request.POST.get('r9c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c6 = request.POST.get('r9c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
body = '<!doctype html>' + \
'<html lang="en">' + \
'<head>' + \
'<meta charset="utf-8">' + \
'<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">' + \
'<link rel="stylesheet"' + \
'href="https://cdn.jsdelivr.net/npm/bootstrap@4.5.3/dist/css/bootstrap.min.css"' + \
'integrity="<KEY>"' + \
'crossorigin="anonymous">' + \
'<title>Static budget variance</title>' + \
'</head>' + \
'<body>' + \
'<div class="container">' + \
'<div class="card text-center">' + \
'<div class="card-header text-center">Static budget variance</div>' + \
'<div class="card-body">'
body += '<h6>Comapny name : ' + company_name + '</h6>' + \
'<h6>Share capital : ' + share_capital + '</h6>' + \
'<h6>Head office address : ' + head_office_address + '</h6>' + \
'<h6>Establishment number : ' + establishment_number + '</h6>' + \
'<h6>Register of Trade and Companies : ' + register_of_trade_and_companies + '</h6>' + \
'<h6>Main activities : ' + main_activities + '</h6>' + \
'<h6>Activity number : ' + activity_number + '</h6>' + \
'<h6>Intra-community VAT number : ' + intra_community_vat_number + '</h6>' + \
'<h6>President : ' + president + '</h6>' + \
'<h6>Registration date : ' + registration_date + '</h6>' + \
'<br>'
body += '<br>'
body += '<table class="table table-striped table-bordered">' + \
'<thead>' + \
'<tr>' + \
'<th scope="col">Details</th>' + \
'<th scope="col">Last year budget</th>' + \
'<th scope="col">Increase amount</th>' + \
'<th scope="col">Current budget</th>' + \
'<th scope="col">Actual cost</th>' + \
'<th scope="col">Variance</th>' + \
'<th scope="col">Rate</th>' + \
'</tr>' + \
'</thead>' + \
'<tbody>' + \
'<tr>' + \
'<td>Salaries</td>' + \
'<td>' + r1c1 + '</td>' + \
'<td>' + r1c2 + '</td>' + \
'<td>' + r1c3 + '</td>' + \
'<td>' + r1c4 + '</td>' + \
'<td>' + r1c5 + '</td>' + \
'<td>' + r1c6 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Rent</td>' + \
'<td>' + r2c1 + '</td>' + \
'<td>' + r2c2 + '</td>' + \
'<td>' + r2c3 + '</td>' + \
'<td>' + r2c4 + '</td>' + \
'<td>' + r2c5 + '</td>' + \
'<td>' + r2c6 + '</td>' + \
'</tr>' + \
'<tr>' | |
RfmtFormatter(CodeFormatter):
"""Formatter for:
rfmt: A code formatter for R.
(https://github.com/google/rfmt)
"""
shortname = 'rfmt'
configfilename = '.rfmtrc'
language_exts = [['R', REXTS]]
def __init__(self, exe, cache=None):
super(RfmtFormatter, self).__init__(exe, cache=cache)
def register_options(self):
# type: () -> None
options = []
for optionname, optiontype, configs in [
('margin0', 'int', [0]),
('margin1', 'int', [80]),
('cost0', 'float', [0.01, 0.05, 0.1]),
('cost1', 'float', [10, 100, 300]),
('costb', 'float', [1, 2, 3, 4]),
('indent', 'int', [1, 2, 3, 4]),
('force_brace', 'bool', [True, False]),
('space_arg_eq', 'bool', [True, False]),
('adj_comment', 'float', [0.01, 0.5, 100]),
('adj_flow', 'float', [0.001, 0.3, 1000.0]),
('adj_call', 'float', [0.001, 0.01, 0.5, 1000]),
('adj_arg', 'float', [0.01, 1, 5, 10]),
('cpack', 'float', [0.0001, 0.001, 0.01, 25]),
]:
options.append(option_make(optionname, optiontype, configs))
self.styledefinition = styledef_make(options)
def styletext(self, style):
# type: (Style) -> str
fragments = []
for optionname, value in self.sorted_style(style).items():
fragments.append('%s=%s' % (optionname, textrepr(value)))
return '\n'.join(fragments) + '\n'
def inlinestyletext(self, style):
# type: (Style) -> str
return self.styletext(style)
def cmdargs_for_style(self, formatstyle, filename=None):
# type: (Style, Optional[str]) -> List[str]
assert isinstance(formatstyle, Style)
cmdargs = ['--quiet', 'true'] # type: List[str]
for optname, value in self.sorted_style(formatstyle).items():
cmdargs.extend(self.cmdlineopts(optname, value))
return cmdargs
def cmdlineopts(self, optionname, value):
# type: (str, str, OptionValue) -> List[str]
return ['--' + optionname, textrepr(value)]
def should_report_error(self, job, jobres):
# type: (ExeCall, ExeResult) -> bool
if jobres.error is not None:
return True
return jobres.returncode != 0
def valid_job_result(self, job, jobres):
# type: (ExeCall, ExeResult) -> bool
if jobres.error is not None:
return False
if jobres.returncode != 0:
return False
return True
def variants_for(self, option):
# type: (Option) -> List[Style]
stylename = option_name(option)
configs = option_configs(option)
def kvpairs(vs):
# type: (Iterable[OptionValue]) -> List[Style]
return stylevariants(stylename, vs)
if configs:
return kvpairs(configs)
return []
def reformat(self, sourcefile, destfile, configfile):
# type: (str, str, str) -> None
formatstyle = style_make()
with open(configfile) as fp:
for line in fp.readlines():
line = line.rstrip()
if line.startswith('#'):
continue
parts = line.split('=')
if len(parts) == 2:
optionname, value = parts
set_option(formatstyle, optionname, value)
sourcedata = readbinary(sourcefile)
data = self.formatcode(formatstyle, sourcedata, filename=sourcefile)
if data is None:
data = b''
writebinary(destfile, data)
# ----------------------------------------------------------------------
class RustfmtFormatter(CodeFormatter):
"""Formatter for:
rustfmt: A tool for formatting Rust code according to style guidelines.
(https://github.com/rust-lang-nursery/rustfmt)
"""
shortname = 'rustfmt'
configfilename = 'rustfmt.toml'
language_exts = [['Rust', RUSTEXTS]]
def __init__(self, exe, cache=None):
super(RustfmtFormatter, self).__init__(exe, cache=cache)
def register_options(self):
# type: () -> None
"""Parse options from text like this:
Configuration Options:
verbose <boolean> Default: false
Use verbose output
skip_children <boolean> Default: false
Don't reformat out of line modules
max_width <unsigned integer> Default: 100
Maximum width of each line
"""
exeresult = run_executable(self.exe, ['--config-help'], cache=self.cache)
options = []
text = unistr(exeresult.stdout)
for m in re.finditer(r'^\s*([a-z_]+)\s+(.*) Default: (\w+)', text, re.MULTILINE):
optionname, typedesc, default = m.groups()
configs = [] # type: ignore
if optionname in ['verbose', 'report_todo', 'report_fixme']:
continue
if typedesc == '<boolean>':
optiontype = 'bool'
configs = [True, False]
elif typedesc[:1] + typedesc[-1:] == '[]':
optiontype = 'enum'
configs = typedesc[1:-1].split('|')
elif typedesc in ['<unsigned integer>', '<signed integer>']:
optiontype = 'int'
if optionname == 'ideal_width':
# Let's leave ideal_width (default 80) and only tweak max_width.
continue
if optionname == 'max_width':
configs = list(inclusiverange(80, 100))
elif optionname == 'tab_spaces':
configs = list(inclusiverange(1, 8))
elif optionname == 'fn_call_width':
configs = list(inclusiverange(60, 90))
elif optionname == 'struct_lit_width':
configs = list(inclusiverange(8, 20))
elif optionname == 'closure_block_indent_threshold':
configs = [-1] + list(inclusiverange(1, 10))
if not configs:
continue
options.append(option_make(optionname, optiontype, configs))
self.styledefinition = styledef_make(options)
def styletext(self, style):
# type: (Style) -> str
fragments = []
for optionname, value in self.sorted_style(style).items():
fragments.append('%s = %s' % (optionname, textrepr(value)))
return '\n'.join(fragments) + '\n'
def inlinestyletext(self, style):
# type: (Style) -> str
return self.styletext(style)
def cmdargs_for_style(self, formatstyle, filename=None):
# type: (Style, Optional[str]) -> List[str]
assert isinstance(formatstyle, Style)
configdata = bytestr(self.styletext(formatstyle))
sha = shahex(configdata)
cfg = os.path.join(tempfile.gettempdir(),
'whatstyle_rustfmt_%s/%s' % (sha, self.configfilename))
try:
dirpath = os.path.dirname(cfg)
os.makedirs(dirpath)
self.add_tempfile(dirpath)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
if not self.tempfile_exists(cfg):
writebinary(cfg, configdata)
self.add_tempfile(cfg)
cmdargs = ['--config-path', cfg]
return cmdargs
def should_report_error(self, job, jobres):
# type: (ExeCall, ExeResult) -> bool
if jobres.error is not None:
return True
return jobres.returncode != 0
def valid_job_result(self, job, jobres):
# type: (ExeCall, ExeResult) -> bool
if jobres.error is not None:
return False
if jobres.returncode != 0:
return False
return True
def variants_for(self, option):
# type: (Option) -> List[Style]
stylename = option_name(option)
configs = option_configs(option)
def kvpairs(vs):
# type: (Iterable[OptionValue]) -> List[Style]
return stylevariants(stylename, vs)
if configs:
return kvpairs(configs)
return []
def reformat(self, sourcefile, destfile, configfile):
# type: (str, str, str) -> None
formatstyle = style_make()
with open(configfile) as fp:
for line in fp.readlines():
line = line.rstrip()
if line.startswith('#'):
continue
parts = re.split(r'\s+=\s+', line)
if len(parts) == 2:
optionname, value = parts
set_option(formatstyle, optionname, value)
sourcedata = readbinary(sourcefile)
data = self.formatcode(formatstyle, sourcedata, filename=sourcefile)
if data is None:
data = b''
writebinary(destfile, data)
# ----------------------------------------------------------------------
# Functions for the in-memory cache
FILECACHE = {} # type: Dict[str, bytes]
NUMLINESCACHE = {} # type: Dict[str, int]
def readbinary(filename):
# type: (str) -> bytes
with open(filename, 'rb') as fp:
return fp.read()
def writebinary(filename, data):
# type: (str, bytes) -> None
with open(filename, 'wb') as fp:
fp.write(data)
def get_cache_value(key, func, cachevar):
# type: (str, Callable[[str], Any], Dict[str, Any]) -> Any
data = cachevar.get(key)
if data is None:
data = func(key)
cachevar[key] = data
return data
def count_content_lines(data):
# type: (bytes) -> int
return len(list(data.splitlines()))
def count_lines(filename):
# type: (str) -> int
return count_content_lines(get_cached_file(filename))
def get_cached_file(filename):
# type: (str) -> bytes
return get_cache_value(filename, readbinary, FILECACHE)
def get_num_lines(filename):
# type: (str) -> int
return get_cache_value(filename, count_lines, NUMLINESCACHE)
def filesha(filename):
# type: (str) -> bytes
return shadigest(get_cached_file(filename))
def filemetadata(filename):
# type: (str) -> Optional[FileMeta]
p_filename = which(filename)
if p_filename is None:
return None
filename = p_filename
s = os.stat(filename)
if filename != sys.executable:
result = run_executable(filename, ['--version'])
versionstring = result.stdout
else:
# filename is the Python interpreter itself
versionstring = bytestr(sys.version)
return FileMeta(filename, s.st_size, s.st_mtime, filesha(filename), versionstring)
# ----------------------------------------------------------------------
class KeyValueStore(object):
def get(self, key):
raise NotImplementedError
def mget(self, keys):
raise NotImplementedError
def put(self, key, value):
raise NotImplementedError
def delete(self, key):
raise NotImplementedError
def close(self):
raise NotImplementedError
class SqliteKeyValueStore(KeyValueStore):
"""A key-value store based on sqlite.
"""
_sqcreate = """CREATE TABLE IF NOT EXISTS {tablename} (
{keycolumn} TEXT PRIMARY KEY,
{valuecolumn} BLOB%s)"""
_sqtimestamp = """,
{timestampcolumn} DATETIME DEFAULT CURRENT_TIMESTAMP"""
_sqget = 'SELECT {valuecolumn} FROM {tablename} WHERE {keycolumn} = ?'
_sqmget = 'SELECT {keycolumn}, {valuecolumn} FROM {tablename} WHERE {keycolumn} IN (%s)'
_sqput = 'REPLACE INTO {tablename} ({keycolumn}, {valuecolumn})' \
' VALUES (?, ?)'
_sqdelete = 'DELETE FROM {tablename} WHERE {keycolumn} = ?'
defaultprefix = 'kv'
tabledescription = {
'tablename': 'store',
'keycolumn': 'key',
'valuecolumn': 'value',
'timestampcolumn': 'timestamp'
}
def __init__(self, database, tabledesc=None, timestamp=False):
# type: (str, Optional[Dict[str, str]], bool) -> None
self.database = database
if tabledesc is None:
tabledesc = self.prefixdesc()
self.tabledesc = tabledesc
timestampsql = self._sqtimestamp if timestamp else ''
sqcreate = self._sqcreate % timestampsql
self.kv_create = sqcreate.format(**tabledesc)
self.kv_get = self._sqget.format(**tabledesc)
self.kv_mget = self._sqmget.format(**tabledesc)
self.kv_put = self._sqput.format(**tabledesc)
self.kv_delete = self._sqdelete.format(**tabledesc)
self._connection = None # type: Optional[sqlite3.Connection]
self.sqlite_limit_variable_number = 999
self.support_mget = True
@classmethod
def prefixdesc(cls, desc=None, prefix=None):
# type: (Optional[Dict[str, str]], Optional[str]) -> Dict[str, str]
if desc is None:
if prefix is None:
prefix = cls.defaultprefix
desc = cls.tabledescription
eprefix = prefix if prefix is not None else ''
return dict((k, eprefix + v) for k, v in desc.items())
@property
def conn(self):
# type: () -> sqlite3.Connection
if self._connection is not None:
return self._connection
connection = sqlite3.Connection(self.database)
with connection:
connection.execute(self.kv_create)
self._connection = connection
return connection
def get(self, key):
# type: (str) -> Optional[bytes]
with self.conn as conn:
row = conn.execute(self.kv_get, (key, )).fetchone()
if row is not None:
return row[0]
return None
__get = get
def mget(self, keys):
# type: (List[str]) -> List[Optional[bytes]]
rows = []
if self.support_mget:
try:
with self.conn as conn:
for somekeys in grouper(self.sqlite_limit_variable_number, keys):
keylist = list(somekeys)
questionmarks = ','.join(['?'] * len(keylist))
sql = self.kv_mget % questionmarks
for row in conn.execute(sql, keylist):
rows.append(row)
resultdict = dict(rows) # type: Dict[str, bytes]
rget = resultdict.get
return [rget(k) for k in keys]
except sqlite3.OperationalError:
self.support_mget = False
return | |
yt - np.cos(Angles[i]) * dr
i += 1
maskx = np.logical_and(x > np.min(xb)-5, x<np.max(xb)+5)
masky = np.logical_and(y > np.min(yb)-5, y<np.max(yb)+5)
mask = np.logical_and(maskx, masky)
ptop = griddata(np.vstack((x[mask], y[mask])).transpose(),
p[mask], top)
pbot = griddata(np.vstack((x[mask], y[mask])).transpose(),
p[mask], bot)
veltop = griddata(np.vstack((x[mask], y[mask])).transpose(),
np.sqrt(u**2 + v**2)[mask], top)
velbot = griddata(np.vstack((x[mask], y[mask])).transpose(),
np.sqrt(u**2 + v**2)[mask], bot)
Lift = 2 * (np.sum(np.cos(Angles)*(pbot - ptop)* Sb)
+ nu * np.sum(np.sin(Angles)*(veltop+velbot)* Sb)) / chord
Drag = 2 * (-np.sum(np.sin(Angles)*(pbot - ptop)* Sb)
+ nu * np.sum(np.cos(Angles)*(veltop+velbot)* Sb))/ chord
return Lift, Drag
# %% Read VTR
def read_vtr(file):
"""
Parameters
----------
file : PATH
Path to file to be read.
Returns
-------
u : NxM Array
u-velocity at each grid point.
v : NxM Array
v-velocity at each grid point..
Pressure : NxM Array
Pressure at each grid point..
x : Mx1 Array
x-coordinates of gridpoints.
y : Nx1 Array
y-coordinates of gridpoints.
"""
reader = vtk.vtkXMLRectilinearGridReader()
reader.SetFileName(file)
reader.Update()
data = reader.GetOutput()
pointData = data.GetPointData()
sh = data.GetDimensions()[::-1]
ndims = len(sh)
# Get Vector Field
v = np.array(pointData.GetVectors("Velocity")).reshape(sh + (ndims, ))
Velocity = []
for d in range(ndims):
a = v[..., d]
Velocity.append(a)
# Get Sccalar field
Pressure = np.array(pointData.GetScalars('Pressure')).reshape(sh + (1, )).squeeze()
u = Velocity[0].squeeze()
v = Velocity[1].squeeze()
# Obtain Grid
x = np.array(data.GetXCoordinates())
y = np.array(data.GetYCoordinates())
return u, v, Pressure, x, y
# %% Read Data
def Read_Data(AoAs, start=50, timesteps=100, step=1, verbose=False,
getPressure=False):
"""
Parameters
----------
AoAs : tuple with N scalar entries
Angles of attack in degrees for which to read data.
start : scalar, optional
First tmestep to use. The default is 50.
timesteps : TYPE, optional
Total number of timesteps, will use start-timesteps. The default is 100.
Returns
-------
x : MxO Array
x-coordinates of grid.
y : MxO Array
y-coordinates of grid.
u : NxMxO Array
u-velocity at each AoA and grid point.
v : NxMxO Array
v-velocity at each AoA and grid point.
vort : NxMxO Array
vorticity at each AoA and grid point.
u_std : NxMxO Array
u standard deviation at each AoA and grid point.
v_std : NxMxO Array
v standard deviation at each AoA and grid point.
Cont : NxMxO Array
Continuity error at each AoA and grid point.
Mom : NxMxO Array
Momentum error at each AoA and grid point.
"""
n_files = timesteps-start
j = 0
for alpha in AoAs:
print('alpha = {:03d}deg'.format(alpha))
u0, v0, Press0, xlin, ylin = read_vtr("../Data/arc_{:03d}_Re_150/dat0x0x0/fluid.{:01d}.vtr".format(alpha, start))
u_files = np.empty((n_files, u0.shape[0], u0.shape[1]))
u_files[0] = u0
v_files = np.empty((n_files, v0.shape[0], v0.shape[1]))
v_files[0] = v0
Press_files = np.empty((n_files, Press0.shape[0], Press0.shape[1]))
Press_files[0] = Press0
for i in range(1, n_files):
file = "../Data/arc_{:03d}_Re_150/dat0x0x0/fluid.{:01d}.vtr".format(alpha, i+start)
u_files[i], v_files[i], Press_files[i], xlin, ylin = read_vtr(file)
x_stretch, y_stretch = np.meshgrid(xlin, ylin)
# Statistics
if j == 0:
u_std = np.zeros((len(AoAs), u0.shape[0], u0.shape[1]))
v_std = np.zeros((len(AoAs), u0.shape[0], u0.shape[1]))
u_stretch = np.mean(u_files, axis=0)
v_stretch = np.mean(v_files, axis=0)
Press = np.mean(Press_files, axis=0)
u_std[j] = np.std(u_files, axis=0)
v_std[j] = np.std(v_files, axis=0)
if verbose:
print('Mean std u: %.8f' % np.mean(u_std[j]))
print('Mean std v: %.8f' % np.mean(v_std[j]))
print('Max std u: %.8f' % np.max(u_std[j]))
print('Max std v: %.8f' % np.max(v_std[j]))
dx, dy = CellSizes(xlin, ylin)
vort_stretch = Vorticity(u_stretch, v_stretch, dx, dy)
# Interpolate to regular grid
if j == 0:
xmin = np.ceil(np.min(xlin))
xmax = np.floor(np.max(xlin))
ymin = np.ceil(np.min(ylin))
ymax = np.floor(np.max(ylin))
x, y = (np.mgrid[xmin+1:xmax:step, ymin:ymax:step] - .5)
x = x.transpose().astype(float)
y = y.transpose().astype(float)
u = np.zeros((len(AoAs), x.shape[0], x.shape[1]))
v = np.zeros_like(u)
m = np.zeros_like(u)
vort = np.zeros_like(u)
Cont = np.zeros_like(u)
Mom = np.zeros_like(u)
if getPressure:
p = np.zeros_like(u)
u[j] = griddata(np.array([x_stretch.flatten(), y_stretch.flatten()]).transpose(),
u_stretch.flatten(), (x, y))
v[j] = griddata(np.array([x_stretch.flatten(), y_stretch.flatten()]).transpose(),
v_stretch.flatten(), (x, y))
vort[j] = griddata(np.array([x_stretch.flatten(), y_stretch.flatten()]).transpose(),
vort_stretch.flatten(), (x, y))
if getPressure:
p[j] = griddata(np.array([x_stretch.flatten(), y_stretch.flatten()]).transpose(),
Press.flatten(), (x, y))
dx, dy = np.ones_like(x[0, :])*.5, np.ones_like(y[:, 0])*.5
# vort[j] = Vorticity(u[j], v[j], dx, dy)
Cont[j] = Continuity(u[j], v[j], x, y)
Mom[j] = Momentum(vort[j], u[j], v[j], dx, dy)
print('Max Continuity Error: %.8f' % np.max(Cont[j]))
print('Max Momentum Error: %.8f' % np.max(Mom[j]))
j += 1
if getPressure:
return x, y, u, v, vort, p, u_std, v_std, Cont, Mom
else:
return x, y, u, v, vort, u_std, v_std, Cont, Mom
# %% make_square
def make_square(x, y, u, v, vort, square, step=1, p=None, Mom=None, xOffset=0):
"""
Outputs the square fields of given size. With an option to interpolate
onto a coarser grid.
Parameters
----------
x : 2D-Array
x-Coordinates.
y : 2D-Array
x-Coordinates.
u : 2D-Array
Velocity x-component.
v : 2D-Array
Velocity x-component.
vort : 2D-Array
Vorticity.
square : Int
Edge length of desired square.
step : int
Step size, if > 1 field is interpolated onto coarser grid.
p : 2D-Array, optional
Pressure field. The default is None.
Mom : 2D-Array, optional
Momentum Error on domain. The default is None.
xOffset : int, optional
Square offset from origin. The default is 0.
Returns
-------
x_square : 2D-Array
x-Coordinates..
y_square : 2D-Array
y-Coordinates..
u_square : 2D-Array
Velocity x-component.
v_square : 2D-Array
Velocity y-component.
vort_square : 2D-Array
Vorticity.
p_square : 2D-Array, Optional
Pressure field.
Mom_square : 2D-Array, Optional
Momentum Error.
"""
# size0 = int(square/step)
mask = np.logical_and(abs(x-xOffset)<(square/2), abs(y)<(square/2))
size0 = int(np.sqrt(np.sum(mask)))
vort_square = vort[:, mask].reshape((len(u), size0, size0))
x_square = x[mask].reshape((size0, size0))
y_square = y[mask].reshape((size0, size0))
u_square = u[:, mask].reshape((vort.shape[0], size0, size0))
v_square = v[:, mask].reshape((vort.shape[0], size0, size0))
if p is not None:
p_square = p[:, mask].reshape((vort.shape[0], size0, size0))
if step is not 1:
vort_square = block_reduce(vort_square, block_size=(1, step, step), func=np.mean)
x_square = block_reduce(x_square, block_size=(step, step), func=np.mean)
y_square = block_reduce(y_square, block_size=(step, step), func=np.mean)
u_square = block_reduce(u_square, block_size=(1, step, step), func=np.mean)
v_square = block_reduce(v_square, block_size=(1, step, step), func=np.mean)
if p is not None:
p_square = block_reduce(p_square, block_size=(1, step, step), func=np.mean)
if Mom is not None:
Mom_square = Mom[:, mask].reshape((vort.shape[0], size0, size0))
Mom_square = block_reduce(Mom_square, block_size=(1, step, step),
func=np.mean)
return x_square, y_square, u_square, v_square, vort_square, Mom_square
if p is not None:
return x_square, y_square, u_square, v_square, vort_square, p_square
else:
return x_square, y_square, u_square, v_square, vort_square
# %% u_omega
@njit(parallel=True)
def u_omega(x, y, xi, yi, omega, h):
"""
calculates the velocities induced by a vorticity field.
Parameters
----------
x : Vector
x-location of points to be evaluated.
y : Vector
y-location of points to be evaluated.
xi : Vector
x-location of point with non-negligible vorticity.
yi : Vector
y-location of point with non-negligible vorticity.
omega : Vector
Vorticity as above specified points.
h : Scalar
Step size.
Returns
-------
u : Vector
induced velocity in x-direction at evaluated points.
v : Vector
induced velocity in x-direction at evaluated points
"""
# u, v = np.zeros_like(x), np.zeros_like(x)
# for xp, yp, op in zip(xi, yi, omega):
# rx, ry = x-xp, y-yp
# a = op/(rx**2 + ry**2 + 0.5*h**2) # +.5h**2 to avoid division by zero
# u += -a*ry
# v += a*rx
# u = u*h**2 / (2*np.pi)
# v = v*h**2 / (2*np.pi)
# return u, v
u, v = np.zeros_like(x), np.zeros_like(x)
for i in prange(len(xi)):
# for xp, yp, op in zip(xi, yi, omega):
xp = xi[i]
yp = yi[i]
op = omega[i]
rx, ry = x-xp, y-yp
a = op/(rx**2 + ry**2 + 0.5*h**2) # +.5h**2 to avoid division by zero
u += -a*ry
v += a*rx
u = u*h**2 / (2*np.pi)
v = v*h**2 / (2*np.pi)
if np.max(u) == 0:
print('u is 0')
return u, v
def u_omega_nojit(x, y, xi, yi, omega, h):
"""
Same as above, no njit used as it can lead to problems in certain cases.
Parameters
----------
x : Vector
x-location of points to be evaluated.
y : Vector
y-location of points to be evaluated.
xi : Vector
x-location of point with non-negligible vorticity.
yi : Vector
y-location of point with non-negligible vorticity.
omega : Vector
Vorticity as above specified points.
h : Scalar
Step | |
<reponame>ob/remote
import logging
import re
import sys
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime
from functools import wraps
from pathlib import Path
from typing import List, Optional, Union
import click
from .configuration import WorkspaceConfig
from .configuration.discovery import get_configuration_medium, load_cwd_workspace_config, save_config
from .configuration.shared import HOST_REGEX, PATH_REGEX
from .exceptions import InvalidInputError, RemoteError
from .explain import explain
from .util import CommunicationOptions, ForwardingOptions
from .workspace import SyncedWorkspace
BASE_LOGGING_FORMAT = "%(message)s"
CONNECTION_STRING_FORMAT_REGEX = re.compile(f"^{HOST_REGEX}(:{PATH_REGEX})?$")
DEFAULT_CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
EXECUTION_CONTEXT_SETTINGS = dict(
help_option_names=["-h", "--help"], ignore_unknown_options=True, allow_interspersed_args=False
)
def log_exceptions(f):
"""A decorator that prints the custom exceptions and exit, but propagates internal ones"""
@wraps(f)
def wrapper(*args, **kwards):
try:
f(*args, **kwards)
except Exception as e:
if isinstance(e, RemoteError):
click.secho(str(e), fg="yellow")
sys.exit(1)
raise
return wrapper
def validate_connection_string(ctx, param, value):
matcher = CONNECTION_STRING_FORMAT_REGEX.match(value)
if matcher is None:
raise click.BadParameter(
"Please fix value to match the specified format for connection string", ctx=ctx, param=param
)
return value
def int_or_str_label(label: Optional[str]) -> Optional[Union[int, str]]:
"""Try to convert the label to int and return the result, if it's not successful, return the label"""
if label is None:
return None
try:
# Users enter indexes starting with 1 and internally we use indexes starting with 0
return int(label) - 1
except ValueError:
return label
def check_command(command: List[str]):
if command and command[0].startswith("-"):
# Our execution entry points use ignore_unknown_options=True and allow_interspersed_args=False
# to be able to stream the command to the remote machine. However, there is a downside.
# If user runs this command with an unknown option, this option will become a part of the command.
# That's why we need to manually check if the command starts with an unknown option and print an
# error message in this case.
ctx = click.get_current_context()
click.echo(ctx.get_usage())
click.echo(f"Try '{ctx.info_name} -h' for help\n\nError: no such option {command[0]}")
sys.exit(2)
def _add_remote_host(config: WorkspaceConfig, connection: str):
"""Add a new remote host to the workspace config, check the connection, and save it if connection is ok
:param config: the workspace config decription object
:param connection: connection string in format of 'host-name[:remote_dir]'
"""
parts = connection.split(":")
remote_host = parts[0]
config_medium = get_configuration_medium(config)
remote_dir = config_medium.generate_remote_directory(config) if len(parts) == 1 else Path(parts[1])
added, index = config.add_remote_host(remote_host, remote_dir)
if not added:
click.echo(f"{connection} already exists in config")
sys.exit(0)
# Check if we can connect to the remote host and create a directory there
workspace = SyncedWorkspace.from_config(config, config.root, index)
try:
workspace.create_remote()
except RemoteError:
click.secho(f"Failed to create {workspace.remote.directory} on remote host {remote_host}", fg="yellow")
click.secho("Please check if host is accessible via SSH", fg="yellow")
sys.exit(1)
click.echo(f"Created remote directory at {workspace.remote.host}:{workspace.remote.directory}")
click.echo("Remote is configured and ready to use")
# No errors when executing the above code means we can save the config
config_medium.save_config(config)
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.argument("connection", metavar="host-name[:remote_dir]", callback=validate_connection_string)
@log_exceptions
def remote_add(connection: str):
"""Add one more host for remote connection to a config file"""
config = load_cwd_workspace_config()
_add_remote_host(config, connection)
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.argument("connection", metavar="host-name[:remote_dir]", callback=validate_connection_string)
@log_exceptions
def remote_init(connection: str):
"""Initiate workspace for the remote execution in the current working directory"""
try:
workspace = load_cwd_workspace_config()
if workspace.root == Path.cwd():
click.secho("A configured workspace already exists in the current working directory.", fg="yellow")
else:
click.secho(
f"A configured workspace already initiated in the current working directory's parent {workspace.root}.",
fg="yellow",
)
click.secho("If you want to add a new host to it, please use remote-add.", fg="yellow")
sys.exit(1)
except RemoteError:
# we expect it to fail. It means we don't overwrite an existing workspace
pass
config = WorkspaceConfig.empty(Path.cwd())
_add_remote_host(config, connection)
# help out with .gitignore if we are in a git repository
if not (config.root / ".git").exists():
return
# make sure we don't keep adding to .gitignore
gitignore = config.root / ".gitignore"
if gitignore.exists():
for line in gitignore.read_text().splitlines():
if line.startswith(".remote"):
return
with gitignore.open("a") as f:
f.write("\n")
f.write(".remote*")
f.write("\n")
click.echo("Added '.remote*' to .gitignore")
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.option(
"-p", "--push", is_flag=True, help="add IGNORE pattern to push ignore list (mutually exclusive with '--pull')"
)
@click.option(
"-l", "--pull", is_flag=True, help="add IGNORE pattern to pull ignore list (mutually exclusive with '--push')"
)
@click.argument("ignore", nargs=-1, required=True)
@log_exceptions
def remote_ignore(ignore: List[str], push: bool, pull: bool):
"""Add new IGNORE patterns to the ignores list
IGNORE pattern should be a string in rsync-friendly format.
If no options provided these patterns will be ignored on both push and pull
"""
config = load_cwd_workspace_config()
if not push and not pull:
config.ignores.add(ignore)
elif pull and not push:
config.ignores.pull.add(ignore)
elif push and not pull:
config.ignores.push.add(ignore)
else:
raise InvalidInputError("You cannot use both '--pull' and '--push' flags")
config.ignores.trim()
save_config(config)
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@log_exceptions
def remote_host():
"""Print the default remote host in use and exit"""
workspace = SyncedWorkspace.from_cwd()
click.echo(workspace.remote.host)
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.argument("index", type=int)
@log_exceptions
def remote_set(index: int):
"""Set a new default remote host for the workspace
INDEX is an index of host in config file to use by default (strating from 1)
"""
config = load_cwd_workspace_config()
if len(config.configurations) < index:
click.secho(
f"Index is too big ({index}). Only have {len(config.configurations)} hosts to choose from.", fg="yellow"
)
sys.exit(1)
elif index < 1:
click.secho("Index should be 1 or higher", fg="yellow")
sys.exit(1)
# we use 0-base index internally
index = index - 1
config.default_configuration = index
save_config(config)
click.echo(f"Remote host is set to {config.configurations[index].host}")
@click.command(context_settings=EXECUTION_CONTEXT_SETTINGS)
@click.option("-n", "--dry-run", is_flag=True, help="do a dry run of the whole cycle")
@click.option("-m", "--mirror", is_flag=True, help="mirror local files on the remote host")
@click.option("-v", "--verbose", is_flag=True, help="increase verbosity")
@click.option("-e", is_flag=True, help="(deprecated) kept for backward compatibility, noop")
@click.option(
"-t",
"--tunnel",
"port_args",
type=str,
help="Enable local port forwarding. Pass value as <remote port>:<local port>. \
If local port is not passed, the local port value would be set to <remote port> value by default",
)
@click.option(
"-s",
"--stream-changes",
default=False,
is_flag=True,
help="Resync local changes if any while the command is being run remotely",
)
@click.option("-l", "--label", help="use the host that has corresponding label for the remote execution")
@click.option("--multi", is_flag=True, help="sync and run the remote commands on each remote host from config")
@click.option(
"--log",
type=click.Path(file_okay=False, resolve_path=True),
help="Write sync and remote command output to the log file instead of stdout. "
"Log file will be located inside DIRECTORY/<timestamp>/<host>_output.log",
)
@click.argument("command", nargs=-1, required=True)
@log_exceptions
def remote(
command: List[str],
dry_run: bool,
mirror: bool,
verbose: bool,
e: bool,
port_args: Optional[str],
label: Optional[str],
stream_changes: bool,
log: Optional[str],
multi: bool,
):
"""Sync local workspace files to remote machine, execute the COMMAND and sync files back regardless of the result"""
check_command(command)
if verbose:
logging.basicConfig(level=logging.INFO, format=BASE_LOGGING_FORMAT)
ports = ForwardingOptions.from_string(port_args) if port_args else None
if multi and label:
raise InvalidInputError("--multi and --label options cannot be used together")
workspaces = SyncedWorkspace.from_cwd_mass() if multi else [SyncedWorkspace.from_cwd(int_or_str_label(label))]
with ThreadPoolExecutor(max_workers=len(workspaces)) as executor:
futures = {}
descriptors = []
start_timestamp = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
for workspace in workspaces:
host = workspace.remote.host
if multi or log:
# We save logs into the <log_dir>/<timestamp>/<hostname>_output.log
log_dir = Path(log) if log else (workspace.local_root / "logs")
log_dir = log_dir / start_timestamp
log_dir.mkdir(parents=True, exist_ok=True)
try:
# If the logs are enabled and they are inside the workspace root, we need to exclude them from
# syncing
relative_path = log_dir.relative_to(workspace.local_root)
workspace.ignores.add([f"{relative_path}/*_output.log"])
except ValueError:
# Value error means that logs are placed outside of the workspace root
pass
fd = (log_dir / f"{host}_output.log").open("w")
descriptors.append(fd)
workspace.communication = CommunicationOptions(stdin=None, stdout=fd, stderr=fd)
future = executor.submit(
workspace.execute_in_synced_env,
command,
dry_run=dry_run,
verbose=verbose,
mirror=mirror,
ports=ports,
stream_changes=stream_changes,
)
futures[future] = workspace
final_exit_code = 0
for future in as_completed(list(futures.keys())):
workspace = futures[future]
try:
exit_code = future.result(timeout=0)
if exit_code != 0:
click.secho(f"Remote command on {workspace.remote.host} exited with {exit_code}", fg="yellow")
final_exit_code = exit_code
except Exception as e: # noqa: F841
class_name = e.__class__.__name__
click.secho(f"{class_name}: {e}", fg="yellow")
final_exit_code = 255
for fd in descriptors:
fd.close()
sys.exit(final_exit_code)
@click.command(context_settings=EXECUTION_CONTEXT_SETTINGS)
@click.option("-l", "--label", help="use the host that has corresponding label for the remote execution")
@click.argument("command", nargs=-1, required=True)
@log_exceptions
def remote_quick(command: List[str], label: Optional[str]):
"""Execute the COMMAND remotely"""
check_command(command)
workspace = SyncedWorkspace.from_cwd(int_or_str_label(label))
code = workspace.execute(command, raise_on_error=False)
sys.exit(code)
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.option("-n", "--dry-run", is_flag=True, help="do a dry run of a pull")
@click.option("-v", "--verbose", is_flag=True, help="increase verbosity")
@click.option("-l", "--label", help="use the host that has corresponding label for the remote execution")
@click.argument("path", nargs=-1)
@log_exceptions
def remote_pull(dry_run: bool, verbose: bool, path: List[str], label: Optional[str]):
"""Bring in files from the default remote directory to local workspace.
Optionally bring in PATH instead of the whole workspace.
PATH is a path of file or directory to bring back relative to the remote workspace root.
All sync exclude rules will be omitted if PATH is provided.
"""
if verbose:
logging.basicConfig(level=logging.INFO, format=BASE_LOGGING_FORMAT)
workspace = SyncedWorkspace.from_cwd(int_or_str_label(label))
if not path:
workspace.pull(info=True, verbose=verbose, dry_run=dry_run)
return
for subpath in path:
workspace.pull(info=True, verbose=verbose, dry_run=dry_run, subpath=Path(subpath))
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.option("-n", "--dry-run", is_flag=True, help="do a dry run of a push")
@click.option("-m", "--mirror", | |
import sys
import time as time_lib
import os
from bin import spl_memory as mem
def replace_bool_none(string: str):
"""
Returns a str with 'None', 'True', and 'False' replaced with 'null', 'true', and 'false'.
This function also removes quotes generated by spl String object.
:param string: the str object to be replaced
:return: a str with 'None', 'True', and 'False' replaced with 'null', 'true', and 'false'
"""
in_single = False
in_double = False
lst = []
i = 0
while i < len(string):
ch = string[i]
if in_single:
if ch == "'":
in_single = False
i += 1
continue
elif in_double:
if ch == '"':
in_double = False
i += 1
continue
else:
if ch == "'":
in_single = True
i += 1
continue
elif ch == '"':
in_double = True
i += 1
continue
if not in_single and not in_double:
if i <= len(string) - 4:
if string[i:i + 4] == "True":
lst.append("true")
i += 4
continue
elif string[i:i + 4] == "None":
lst.append("null")
i += 4
continue
if i <= len(string) - 5:
if string[i:i + 5] == "False":
lst.append("false")
i += 5
continue
lst.append(ch)
i += 1
return "".join(lst)
def compile_time_warning(msg: str):
sys.stderr.write(msg + "\n")
sys.stderr.flush()
def concatenate_path(path: str, directory: str) -> str:
if os.path.isabs(path):
return path
else:
return directory + os.sep + path
def get_string_literal(lit) -> str:
if lit is None:
return "null"
elif isinstance(lit, bool):
return "true" if lit else "false"
elif isinstance(lit, String):
return lit.literal
else:
return str(lit)
def get_string_repr(o) -> str:
if o is None:
return "null"
elif isinstance(o, bool):
return "true" if o else "false"
elif isinstance(o, int) or isinstance(o, float):
return str(o)
elif isinstance(o, String):
return "'" + o.__repr__() + "'"
else:
return repr(o)
# Native functions with no dependency
class SplObject:
"""
An superset of spl objects.
There are two types of SplObjects: NativeType and Class
----- Attributes -----
id: the identifier of this object, is guaranteed to be unique
"""
id: int
def __init__(self):
self.id = mem.MEMORY.allocate()
class NativeType(SplObject):
def __init__(self):
SplObject.__init__(self)
@classmethod
def type_name__(cls) -> str:
raise NotImplementedError
@classmethod
def doc__(cls) -> str:
"""
:return: the doc string of this type
"""
doc = ["NativeObject ", cls.type_name__(), " ", cls.__doc__, "\n"]
for x in dir(cls):
if len(x) < 2 or x[-2:] != "__":
attr = getattr(cls, x)
if callable(attr):
doc.append(" method ")
doc.append(x)
doc.append("(")
params = attr.__code__.co_varnames
for p in params:
if p != "self":
doc.append(p)
doc.append(", ")
if doc[-1] == ", ":
doc.pop()
doc.append("):")
doc.append("\n")
attr_doc = attr.__doc__
if attr_doc:
doc.append(attr_doc)
doc.append("\n")
return "".join([str(x) for x in doc])
class Iterable:
def __init__(self):
pass
def __iter__(self):
raise NotImplementedError
class String(NativeType, Iterable):
"""
An object of a string literal.
"""
def __init__(self, lit):
NativeType.__init__(self)
self.literal = get_string_literal(lit)
def __contains__(self, item):
return item in self.literal
def __iter__(self):
return (c for c in self.literal)
def __str__(self):
return self.literal
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return isinstance(other, String) and self.literal == other.literal
def __hash__(self):
return hash(self.literal)
def __ne__(self, other):
return not self.__eq__(other)
def __add__(self, other):
if isinstance(other, String):
return String(self.literal + other.literal)
else:
raise TypeException("Cannot add <String> with <{}>".format(type(other).__name__))
def __getitem__(self, index):
return self.literal[index]
def length(self):
"""
Returns the length of this string.
:return: the length of this string
"""
return len(self.literal)
def contains(self, char):
"""
Returns whether the <char> is a substring of this <String>.
:param char: the character or string to look for
:return: <true> iff the <char> is a substring of this <String>
"""
return char.literal in self.literal
def format(self, *args):
"""
Formats this string with the specified format.
:param args: the formats
:return: the formatted string
"""
lst = []
i = 0
count = 0
while i < self.length():
ch = self.literal[i]
if ch == "%":
j = i + 1
params = []
while not self.literal[j].isalpha():
params.append(self.literal[j])
j += 1
if count >= len(args):
raise IndexOutOfRangeException("Not enough arguments for string format")
flag = self.literal[j]
if flag == "s":
lit = args[count]
try:
lst.append(lit.literal)
except AttributeError:
raise StringFormatException("Cannot resolve type '{}' with symbol '%s'"
.format(type(lit).__name__))
elif flag == "d":
lst.append(str(int(args[count])))
elif flag == "f":
if len(params) > 0:
precision = int(params[0])
lst.append(str(round(args[count], precision)))
else:
lst.append(str(args[count]))
elif flag == "r":
lit = args[count]
lst.append(str(lit))
else:
# print_waring("Warning: Unknown flag: %" + flag)
lst.append("%")
i = j
continue
i = j + 1
count += 1
continue
lst.append(ch)
i += 1
# if count < len(args):
# print_waring("Warning: too much arguments for string format")
return String("".join(lst))
@classmethod
def type_name__(cls):
return "String"
def substring(self, from_, to=None):
length = self.length()
end = length if to is None else to
if from_ < 0 or end > length:
raise IndexOutOfRangeException("Substring index out of range")
return String(self.literal[from_: end])
class PyInputStream(NativeType):
def __init__(self, stream):
NativeType.__init__(self)
self.stream = stream
@classmethod
def type_name__(cls) -> str:
return "PyInputStream"
def read(self):
return self.stream.read()
def readline(self):
return self.stream.readline()
def close(self):
self.stream.close()
class PyOutputStream(NativeType):
def __init__(self, stream):
NativeType.__init__(self)
self.stream = stream
@classmethod
def type_name__(cls) -> str:
return "PyOutputStream"
def write(self, obj):
self.stream.write(str(obj))
def flush(self):
self.stream.flush()
def close(self):
self.stream.close()
class Array(NativeType, Iterable):
"""
A collector of sequential data with static size and dynamic type.
"""
def __init__(self, *initial):
NativeType.__init__(self)
self.list = [*initial]
def __iter__(self):
return (x for x in self.list)
def __str__(self):
return str([String(get_string_repr(v)) for v in self.list])
def __repr__(self):
return self.__str__()
def __getitem__(self, item):
return self.list[item]
def __setitem__(self, key, value):
self.list[key] = value
@classmethod
def type_name__(cls):
return "Array"
def contains(self, item):
return item in self.list
def size(self):
return len(self.list)
def sort(self):
return self.list.sort()
def sub_array(self, from_, to=None):
length = self.size()
end = length if to is None else to
if from_ < 0 or end > length:
raise IndexOutOfRangeException("Sub array index out of range")
return Array(self.list[from_: end])
def reverse(self):
return self.list.reverse()
class Pair(NativeType, Iterable):
def __init__(self, initial: dict):
NativeType.__init__(self)
self.pair = initial.copy()
def __iter__(self):
return (k for k in self.pair)
def __str__(self):
return str({String(get_string_repr(k)): String(get_string_repr(self.pair[k])) for k in self.pair})
def __repr__(self):
return self.__str__()
def __getitem__(self, item):
return self.pair[item]
def __setitem__(self, key, value):
self.pair[key] = value
def contains(self, item):
return item in self.pair
def get(self, key):
return self.__getitem__(key)
def put(self, key, value):
self.__setitem__(key, value)
def size(self):
return len(self.pair)
@classmethod
def type_name__(cls):
return "Pair"
class Set(NativeType, Iterable):
def __init__(self, *initial):
NativeType.__init__(self)
self.set = set(initial)
def __iter__(self):
return (v for v in self.set)
def __str__(self):
return str(set([String(get_string_repr(v)) for v in self.set]))
def __repr__(self):
return self.__str__()
def get(self, item):
for x in self.set:
if x == item:
return x
def size(self):
return len(self.set)
def add(self, item):
self.set.add(item)
def pop(self):
self.set.pop()
def clear(self):
self.set.clear()
def union(self, other):
self.set.union(other)
def update(self, s):
self.set.update(s)
def contains(self, item):
return item in self.set
@classmethod
def type_name__(cls):
return "Set"
class System(NativeType):
"""
A class consists of system calls
----- Attributes -----
argv: command line arguments
cwd: the working directory
encoding: the encoding mode
stdout: system standard output stream, ClassInstance extends OutputStream
stderr: system standard error output stream, ClassInstance extends OutputStream
stdin: system standard input stream, ClassInstance extends InputStream
"""
argv: Array
cwd: String
encoding: str
native_in = None
native_out = None
native_err = None
stdout = None # ClassInstance <NativeOutputStream>
stderr = None # ClassInstance <NativeOutputStream>
stdin = None # ClassInstance <NativeInputStream>
def __init__(self, argv_: Array, directory: String, enc: str, in_out_err):
NativeType.__init__(self)
self.native_in = PyInputStream(in_out_err[0])
self.native_out = PyOutputStream(in_out_err[1])
self.native_err = PyOutputStream(in_out_err[2])
self.cwd = directory
self.argv = argv_
self.encoding = enc
def set_in(self, in_):
self.stdin = in_
def set_out(self, out):
self.stdout = out
def set_err(self, err):
self.stderr = err
@staticmethod
def time():
"""
Returns the current system time, in millisecond.
:return: the current system time, in millisecond
"""
return int(time_lib.time() * 1000)
@staticmethod
def sleep(milli):
"""
Pause the current thread for a period of time, in millisecond.
:param milli: the time to pause, in millisecond
"""
time_lib.sleep(milli / 1000)
@classmethod
def type_name__(cls):
return "System"
class Os(NativeType):
"""
A class consists of functions related to operating system
----- Attributes -----
name: the name of the os
separator: the default path separator of the os
"""
name = String(os.name)
separator = String(os.sep)
def __init__(self):
| |
logger.info(self.metar.raw)
self.reset_update_time()
if ignore_updated:
updated = True
if updated and (self.on_main or force_main):
self.draw_main()
elif force_main and not updated:
self.error_no_data()
def new_station(self):
"""
Update the current station from ident and display new main screen
"""
logger.info("Calling new update")
self.draw_loading_screen()
new_metar = avwx.Metar(self.station)
try:
if not new_metar.update():
return self.error_no_data()
except (TimeoutError, ConnectionError, avwx.exceptions.SourceError):
self.error_connection()
except avwx.exceptions.InvalidRequest:
self.error_station()
except Exception as exc:
logger.exception(f"An unknown error has occurred: {exc}")
self.error_unknown()
else:
logger.info(new_metar.raw)
self.metar = new_metar
self.old_ident = copy(self.ident)
self.reset_update_time()
self.export_session()
self.draw_main()
def verify_station(self):
"""
Verifies the station value before calling new data
"""
try:
station = avwx.station.Station.from_icao(self.station)
if not station.sends_reports:
return self.error_reporting()
except avwx.exceptions.BadStation:
return self.error_station()
return self.new_station()
def cancel_station(self):
"""
Revert ident and redraw main screen
"""
self.ident = self.old_ident
if self.metar.data is None:
return self.error_no_data()
self.draw_main()
def draw_buttons(self):
"""
Draw all current buttons
"""
for button in self.buttons:
button.draw(self.win, self.c)
@draw_func
def draw_selection_screen(self):
"""
Load selection screen elements
"""
self.win.fill(self.c.WHITE)
# Draw Selection Grid
yes, no = self.layout["select"]["yes"], self.layout["select"]["no"]
self.buttons = [
IconButton(yes, self.verify_station, SpChar.CHECKMARK, "WHITE", "GREEN"),
CancelButton(no, self.cancel_station, fill="RED"),
]
upy = self.layout["select"]["row-up"]
chary = self.layout["select"]["row-char"]
downy = self.layout["select"]["row-down"]
for col in range(4):
x = self.__selection_getx(col)
self.buttons.append(
IconButton((x, upy), self.__incr_ident(col, 1), SpChar.UP_TRIANGLE)
)
self.buttons.append(
IconButton((x, downy), self.__incr_ident(col, 0), SpChar.DOWN_TRIANGLE)
)
rendered = FONT_L1.render(IDENT_CHARS[self.ident[col]], 1, self.c.BLACK)
self.win.blit(rendered, centered(rendered, (x, chary)))
def __selection_getx(self, col: int) -> int:
"""
Returns the top left x pixel for a desired column
"""
offset = self.layout["select"]["col-offset"]
spacing = self.layout["select"]["col-spacing"]
return offset + col * spacing
def __incr_ident(self, pos: int, down: bool) -> Callable:
"""
Returns a function to update and replace ident char on display
pos: 0-3 column
down: increment/decrement counter
"""
def update_func():
# Update ident
if down:
if self.ident[pos] == 0:
self.ident[pos] = len(IDENT_CHARS)
self.ident[pos] -= 1
else:
self.ident[pos] += 1
if self.ident[pos] == len(IDENT_CHARS):
self.ident[pos] = 0
# Update display
rendered = FONT_L1.render(IDENT_CHARS[self.ident[pos]], 1, self.c.BLACK)
x = self.__selection_getx(pos)
chary = self.layout["select"]["row-char"]
spacing = self.layout["select"]["col-spacing"]
region = (x - spacing / 2, chary - spacing / 2, spacing, spacing)
pygame.draw.rect(self.win, self.c.WHITE, region)
self.win.blit(rendered, centered(rendered, (x, chary)))
pygame.display.update(region)
return update_func
@draw_func
def draw_loading_screen(self):
"""
Display load screen
"""
# Reset on_main because the main screen should always display on success
self.on_main = True
self.win.fill(self.c.WHITE)
point = self.layout["error"]["line1"]
self.win.blit(FONT_M2.render("Fetching weather", 1, self.c.BLACK), point)
point = self.layout["error"]["line2"]
self.win.blit(
FONT_M2.render("data for " + self.station, 1, self.c.BLACK), point
)
def __draw_clock(self):
"""
Draw the clock components
"""
now = datetime.utcnow() if cfg.clock_utc else datetime.now(tzlocal())
label = now.tzname() or "UTC"
clock_font = globals().get("FONT_L2") or FONT_L1
clock_text = clock_font.render(now.strftime(cfg.clock_format), 1, self.c.BLACK)
x, y = self.layout["main"]["clock"]
w, h = clock_text.get_size()
pygame.draw.rect(self.win, self.c.WHITE, ((x, y), (x + w, (y + h) * 0.9)))
self.win.blit(clock_text, (x, y))
label_font = FONT_M1 if self.is_large else FONT_S3
point = self.layout["main"]["clock-label"]
self.win.blit(label_font.render(label, 1, self.c.BLACK), point)
def __draw_wind_compass(
self, data: avwx.structs.MetarData, center: List[int], radius: int
):
"""
Draw the wind direction compass
"""
wdir = data.wind_direction
speed = data.wind_speed
var = data.wind_variable_direction
pygame.draw.circle(self.win, self.c.GRAY, center, radius, 3)
if not speed.value:
text = FONT_S3.render("Calm", 1, self.c.BLACK)
elif wdir and wdir.repr == "VRB":
text = FONT_S3.render("VRB", 1, self.c.BLACK)
elif wdir:
text = FONT_M1.render(str(wdir.value).zfill(3), 1, self.c.BLACK)
rad_point = radius_point(wdir.value, center, radius)
width = 4 if self.is_large else 2
pygame.draw.line(self.win, self.c.RED, center, rad_point, width)
if var:
for point in var:
rad_point = radius_point(point.value, center, radius)
pygame.draw.line(self.win, self.c.BLUE, center, rad_point, width)
else:
text = FONT_L1.render(SpChar.CANCEL, 1, self.c.RED)
self.win.blit(text, centered(text, center))
def __draw_wind(self, data: avwx.structs.MetarData, unit: str):
"""
Draw the dynamic wind elements
"""
speed, gust = data.wind_speed, data.wind_gust
point = self.layout["main"]["wind-compass"]
radius = self.layout["main"]["wind-compass-radius"]
self.__draw_wind_compass(data, point, radius)
if speed.value:
text = FONT_S3.render(f"{speed.value} {unit}", 1, self.c.BLACK)
point = self.layout["main"]["wind-speed"]
self.win.blit(text, centered(text, point))
text = f"G: {gust.value}" if gust else "No Gust"
text = FONT_S3.render(text, 1, self.c.BLACK)
self.win.blit(text, centered(text, self.layout["main"]["wind-gust"]))
def __draw_temp_icon(self, temp: int):
"""
Draw the temperature icon
"""
therm_level = 0
if temp:
therm_level = temp // 12 + 2
if therm_level < 0:
therm_level = 0
add_i = "I" if self.inverted else ""
therm_icon = f"Therm{therm_level}{add_i}.png"
point = self.layout["main"]["temp-icon"]
self.win.blit(pygame.image.load(str(ICON_PATH / therm_icon)), point)
def __draw_temp_dew_humidity(self, data: avwx.structs.MetarData):
"""
Draw the dynamic temperature, dewpoint, and humidity elements
"""
temp = data.temperature
dew = data.dewpoint
if self.is_large:
temp_text = "Temp "
diff_text = "Std Dev "
dew_text = "Dewpoint "
hmd_text = "Humidity "
else:
temp_text = "TMP: "
diff_text = "STD: "
dew_text = "DEW: "
hmd_text = "HMD: "
# Dewpoint
dew_text += f"{dew.value}{SpChar.DEGREES}" if dew else "--"
point = self.layout["main"]["dew"]
self.win.blit(FONT_S3.render(dew_text, 1, self.c.BLACK), point)
# Temperature
if temp:
temp_text += f"{temp.value}{SpChar.DEGREES}"
if self.is_large:
temp_text += self.metar.units.temperature
temp_diff = temp.value - 15
diff_sign = "-" if temp_diff < 0 else "+"
diff_text += f"{diff_sign}{abs(temp_diff)}{SpChar.DEGREES}"
else:
temp_text += "--"
diff_text += "--"
point = self.layout["main"]["temp"]
self.win.blit(FONT_S3.render(temp_text, 1, self.c.BLACK), point)
point = self.layout["main"]["temp-stdv"]
self.win.blit(FONT_S3.render(diff_text, 1, self.c.BLACK), point)
if "temp-icon" in self.layout["main"]:
self.__draw_temp_icon(temp.value)
# Humidity
if isinstance(temp.value, int) and isinstance(dew.value, int):
relHum = (
(6.11 * 10.0 ** (7.5 * dew.value / (237.7 + dew.value)))
/ (6.11 * 10.0 ** (7.5 * temp.value / (237.7 + temp.value)))
* 100
)
hmd_text += f"{int(relHum)}%"
else:
hmd_text += "--"
point = self.layout["main"]["humid"]
self.win.blit(FONT_S3.render(hmd_text, 1, self.c.BLACK), point)
def __draw_cloud_graph(
self, clouds: List[avwx.structs.Cloud], tl: List[int], br: List[int]
):
"""
Draw cloud layers in chart
Scales everything based on top left and bottom right points
"""
tlx, tly = tl
brx, bry = br
header = FONT_S3.render("Clouds AGL", 1, self.c.BLACK)
header_height = header.get_size()[1]
header_point = midpoint(tl, (brx, tly + header_height))
self.win.blit(header, centered(header, header_point))
tly += header_height
pygame.draw.lines(
self.win, self.c.BLACK, False, ((tlx, tly), (tlx, bry), (brx, bry)), 3
)
if not clouds:
text = FONT_M2.render("CLR", 1, self.c.BLUE)
self.win.blit(text, centered(text, midpoint((tlx, tly), (brx, bry))))
return
top = 80
LRBool = 1
tlx += 5
brx -= 5
bry -= 10
for cloud in clouds[::-1]:
if cloud.base:
if cloud.base > top:
top = cloud.base
drawHeight = bry - (bry - tly) * cloud.base / top
text = FONT_S1.render(cloud.repr, 1, self.c.BLUE)
width, height = text.get_size()
liney = drawHeight + height / 2
if LRBool > 0:
self.win.blit(text, (tlx, drawHeight))
pygame.draw.line(
self.win, self.c.BLUE, (tlx + width + 2, liney), (brx, liney)
)
else:
self.win.blit(text, (brx - width, drawHeight))
pygame.draw.line(
self.win, self.c.BLUE, (tlx, liney), (brx - width - 2, liney)
)
LRBool *= -1
def __draw_wx_raw(self):
"""
Draw wx and raw report
"""
x, y = self.layout["wxraw"]["start"]
spacing = self.layout["wxraw"]["line-space"]
raw_key = "large"
wxs = [c.value for c in self.metar.data.wx_codes]
wxs.sort(key=lambda x: len(x))
if wxs:
wx_length = self.layout["wxraw"]["wx-length"]
y = self.__draw_text_lines(wxs, (x, y), wx_length, space=spacing)
raw_key = "small"
raw_font, raw_length, raw_padding = self.layout["wxraw"]["raw"][raw_key]
y += raw_padding
self.__draw_text_lines(
self.metar.data.raw, (x, y), raw_length, space=spacing, fontsize=raw_font
)
def __main_draw_dynamic(
self, data: avwx.structs.MetarData, units: avwx.structs.Units
) -> bool:
"""
Load Main dynamic foreground elements
Returns True if "Other-WX" or "Remarks" is not empty, else False
"""
if self.is_large:
altm_text = "Altm "
vis_text = "Visb "
else:
altm_text = "ALT: "
vis_text = "VIS: "
tstamp = data.time.dt
if not cfg.clock_utc:
tstamp = tstamp.astimezone(tzlocal())
tstamp = tstamp.strftime(cfg.timestamp_format)
if "title" in cfg.layout["main"]:
time_text = data.station + " " + tstamp
point = self.layout["main"]["title"]
self.win.blit(FONT_M1.render(time_text, 1, self.c.BLACK), point)
else:
self.__draw_clock()
point = self.layout["main"]["station"]
self.win.blit(FONT_M1.render(data.station, 1, self.c.BLACK), point)
if self.is_large:
point = self.layout["main"]["timestamp-label"]
self.win.blit(FONT_S3.render(f"Updated", 1, self.c.BLACK), point)
else:
tstamp = "TS: " + tstamp
point = self.layout["main"]["timestamp"]
self.win.blit(FONT_S3.render(tstamp, 1, self.c.BLACK), point)
# Current Flight Rules
fr = data.flight_rules or "N/A"
fr_color, fr_x_offset = self.layout["fr-display"][fr]
point = copy(self.layout["main"]["flight-rules"])
point[0] += fr_x_offset
self.win.blit(FONT_M1.render(fr, 1, getattr(self.c, fr_color.upper())), point)
# Wind
self.__draw_wind(data, units.wind_speed)
# Temperature / Dewpoint / Humidity
self.__draw_temp_dew_humidity(data)
# Altimeter
altm = data.altimeter
altm_text += str(altm.value) if altm else "--"
point = self.layout["main"]["altim"]
self.win.blit(FONT_S3.render(altm_text, 1, self.c.BLACK), point)
# Visibility
vis = data.visibility
vis_text += f"{vis.value}{units.visibility}" if vis else "--"
point = self.layout["main"]["vis"]
self.win.blit(FONT_S3.render(vis_text, 1, self.c.BLACK), point)
# Cloud Layers
points = self.layout["main"]["cloud-graph"]
self.__draw_cloud_graph(data.clouds, *points)
def __draw_text_lines(
self,
items: List[str],
left_point: Coord,
length: int,
header: str = None,
space: int = None,
right_x: int = None,
fontsize: | |
elif self.model.estimator_type == "clusterer":
self._send_table_description("cluster")
# Debug information is printed to the terminal and logs if the paramater debug = true
if self.model.debug:
self._print_log(4)
return self.response
# If the function was called through a chart expression we return a Series
else:
# Dimensionality reduction is only possible through the load script
if self.model.estimator_type == "decomposer":
err = "Dimensionality reduction is only possible through the load script."
raise Exception(err)
# Debug information is printed to the terminal and logs if the paramater debug = true
if self.model.debug:
self._print_log(4)
return self.response.loc[:,'result']
def calculate_metrics(self, caller="external", ordered_data=False):
"""
Return key metrics based on a test dataset.
Metrics returned for a classifier are: accuracy, precision, recall, fscore, support
Metrics returned for a regressor are: r2_score, mean_squared_error, mean_absolute_error, median_absolute_error, explained_variance_score
"""
# If the function call was made externally, process the request
if caller == "external":
# Open an existing model and get the training & test dataset and targets based on the request
self.X_test, self.y_test = self._get_model_and_data(target=True, ordered_data=ordered_data)
# Keep a copy of the y_test before any transformations
y_test_copy = self.y_test.copy()
# Scale the targets and increase stationarity if required
if self.model.scale_target or self.model.make_stationary:
# If using differencing, we assume sufficient lag values for inversing the transformation later
y_orig = self.y_test.values.ravel() if self.model.make_stationary=='difference' else None
# Apply the transformer to the test targets
self.y_test = self.model.target_transformer.transform(self.y_test)
# Drop samples where self.y_test cannot be transformed due to insufficient lags
self.X_test = self.X_test.iloc[len(self.X_test)-len(self.y_test):]
# Refresh the keras model to avoid tensorflow errors
if self.model.using_keras:
self._keras_refresh()
# Get predictions based on the samples
if ordered_data:
self.y_pred = self.sequence_predict(variant="internal")
# Handle possible null values where a prediction could not be generated
self.y_pred = self.y_pred[self.rows_per_pred - self.first_pred_modifier:]
self.y_test = y_test_copy.iloc[-1*len(self.y_pred):]
# Inverse transformations predictions if required
if self.model.scale_target or self.model.make_stationary:
# Set up indices for differencing
end = self.placeholders
start = end - self.diff_lags
# Add lags for inversing differencing
self.y_pred = self.y_pred if y_orig is None else np.append(y_orig[start : end], self.y_pred)
# Apply the transformer to the test targets
self.y_pred = self.model.target_transformer.inverse_transform(self.y_pred)
# Remove lags used for making the series stationary in case of differencing
if self.model.make_stationary == 'difference':
self.y_pred = self.y_pred[self.diff_lags:]
else:
self.y_pred = self.model.pipe.predict(self.X_test)
# Inverse transformations on the predictions if required
if self.model.scale_target or self.model.make_stationary:
# Apply the transformer to the predictions
self.y_pred = self.model.target_transformer.inverse_transform(self.y_pred)
# Reset y_test to orginal values
self.y_test = y_test_copy
# Flatten the y_test DataFrame
self.y_test = self.y_test.values.ravel()
# Try getting the metric_args from the model
try:
metric_args = self.model.metric_args
except AttributeError:
metric_args = {}
if self.model.estimator_type == "classifier":
labels = self.model.pipe.named_steps['estimator'].classes_
# Check if the average parameter is specified
if len(metric_args) > 0 and "average" in metric_args:
# Metrics are returned as an overall average
metric_rows = ["overall"]
else:
# Get the class labels to be used as rows for the result DataFrame
metric_rows = labels
# Get key classifier metrics
metrics_df = pd.DataFrame([x for x in metrics.precision_recall_fscore_support\
(self.y_test, self.y_pred, **metric_args)],\
index=["precision", "recall", "fscore", "support"], columns=metric_rows).transpose()
# Add accuracy
self.model.score = metrics.accuracy_score(self.y_test, self.y_pred)
metrics_df.loc["overall", "accuracy"] = self.model.score
# Finalize the structure of the result DataFrame
metrics_df.loc[:,"model_name"] = self.model.name
metrics_df.loc[:,"class"] = metrics_df.index
metrics_df = metrics_df.loc[:,["model_name", "class", "accuracy", "precision", "recall", "fscore", "support"]]
# Prepare the confusion matrix and add it to the model
self._prep_confusion_matrix(self.y_test, self.y_pred, labels)
elif self.model.estimator_type == "regressor":
# Get the r2 score
self.model.score = metrics.r2_score(self.y_test, self.y_pred, **metric_args)
metrics_df = pd.DataFrame([[self.model.score]], columns=["r2_score"])
# Get the mean squared error
metrics_df.loc[:,"mean_squared_error"] = metrics.mean_squared_error(self.y_test, self.y_pred, **metric_args)
# Get the mean absolute error
metrics_df.loc[:,"mean_absolute_error"] = metrics.mean_absolute_error(self.y_test, self.y_pred, **metric_args)
# Get the median absolute error
metrics_df.loc[:,"median_absolute_error"] = metrics.median_absolute_error(self.y_test, self.y_pred)
# Get the explained variance score
metrics_df.loc[:,"explained_variance_score"] = metrics.explained_variance_score(self.y_test, self.y_pred, **metric_args)
# If the target was scaled we need to inverse transform certain metrics to the original scale
# However, if we used the sequence prediction function, the inverse transform has already been performed
if not ordered_data and (self.model.scale_target or self.model.make_stationary):
for m in ["mean_squared_error", "mean_absolute_error", "median_absolute_error"]:
metrics_df.loc[:, m] = self.model.target_transformer.inverse_transform(metrics_df.loc[:, [m]], array_like=False).values.ravel()
# Finalize the structure of the result DataFrame
metrics_df.loc[:,"model_name"] = self.model.name
metrics_df = metrics_df.loc[:,["model_name", "r2_score", "mean_squared_error", "mean_absolute_error",\
"median_absolute_error", "explained_variance_score"]]
if caller == "external":
if self.model.calc_feature_importances:
# Calculate model agnostic feature importances
self._calc_importances(X = self.X_test, y = self.y_test)
self.response = metrics_df
# Send the reponse table description to Qlik
if self.model.estimator_type == "classifier":
self._send_table_description("metrics_clf")
elif self.model.estimator_type == "regressor":
self._send_table_description("metrics_reg")
# Debug information is printed to the terminal and logs if the paramater debug = true
if self.model.debug:
self._print_log(4)
# Finally send the response
return self.response
else:
# Save the metrics_df to the model
self.model.metrics_df = metrics_df
def get_confusion_matrix(self):
"""
Returns a confusion matrix calculated previously using testing data with fit or calculate_metrics
"""
# Get the model from cache or disk based on the model_name in request
self._get_model_by_name()
try:
# Prepare the output
self.response = self.model.confusion_matrix
except AttributeError:
err = "The confusion matrix is only avaialble for classifiers, and when hold-out testing " + \
"or K-fold cross validation has been performed."
raise Exception(err)
# Send the reponse table description to Qlik
if "step" in self.response.columns:
self._send_table_description("confusion_matrix_multi")
else:
self._send_table_description("confusion_matrix")
# Debug information is printed to the terminal and logs if the paramater debug = true
if self.model.debug:
self._print_log(4)
# Finally send the response
return self.response
def predict(self, load_script=False, variant="predict"):
"""
Return a prediction by applying an existing model to the supplied data.
If variant='predict_proba', return the predicted probabilties for each sample. Only applicable for certain classes.
If variant='predict_log_proba', return the log probabilities for each sample. Only applicable for certain classes.
This method can be called from a chart expression or the load script in Qlik.
The load_script flag needs to be set accordingly for the correct response.
"""
# Interpret the request data based on the expected row and column structure
row_template = ['strData', 'strData']
col_headers = ['model_name', 'n_features']
feature_col_num = 1
# An additional key field column is expected if the call is made through the load script
if load_script:
row_template = ['strData', 'strData', 'strData']
col_headers = ['model_name', 'key', 'n_features']
feature_col_num = 2
# Create a Pandas Data Frame for the request data
self.request_df = utils.request_df(self.request, row_template, col_headers)
# Initialize the persistent model
self.model = PersistentModel()
# Get the model name from the request dataframe
self.model.name = self.request_df.loc[0, 'model_name']
# Get the model from cache or disk
self._get_model()
# Debug information is printed to the terminal and logs if the paramater debug = true
if self.model.debug:
self._print_log(3)
if load_script:
# Set the key column as the index
self.request_df.set_index("key", drop=False, inplace=True)
try:
# Split the features provided as a string into individual columns
self.X = pd.DataFrame([x[feature_col_num].split("|") for x in self.request_df.values.tolist()],\
columns=self.model.features_df.loc[:,"name"].tolist(),\
index=self.request_df.index)
except AssertionError as ae:
err = "The number of input columns do not match feature definitions. Ensure you are using the | delimiter and that the target is not included in your input to the prediction function."
raise AssertionError(err) from ae
# Convert the data types based on feature definitions
self.X = utils.convert_types(self.X, self.model.features_df, sort=False)
if variant in ('predict_proba', 'predict_log_proba'):
# If probabilities need to be returned
if variant == 'predict_proba':
# Get the predicted probability for each sample
self.y = self.model.pipe.predict_proba(self.X)
elif variant == 'predict_log_proba':
# Get the log probability for each sample
self.y = self.model.pipe.predict_log_proba(self.X)
# Prepare a list of probability by class for each sample
probabilities = []
for a in self.y:
s = ""
i = 0
for b in a:
s = s + ", {0}: {1:.3f}".format(self.model.pipe.named_steps['estimator'].classes_[i], b)
i = i + 1
probabilities.append(s[2:])
self.y = probabilities
else:
# Predict y for X using the previously fit pipeline
self.y |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.