input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<reponame>rohitsupekar/learn-PDEs-from-data
import numpy as np
import logging
import pandas as pd
import time
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from joblib import Parallel, delayed
import pickle as pkl
from tqdm import tqdm
import copy
from sklearn import preprocessing
from pdel.solvers import *
from pdel.funcs import *
from sklearn.model_selection import RepeatedKFold, KFold
logger = logging.getLogger(__name__)
class PDElearn:
def __init__(self, f_desc, ft_desc, features, data_raw, poly_order=0, sparse_algo='stridge', \
print_flag=False, path='.'):
"""
f, ft: string descriptors of the field variable
features: list with string descriptors of all the features
data_raw: dictioary of data for each descriptor in features
poly_order: largest order of f to construct the feature matrix
sparse_algo: which sparsity promoting algo to use
currently implemented 'STRidge'
path: various text/pdf files will be saved at path + qualifier . extension
"""
self.ft_desc = ft_desc
self.f_desc= f_desc
self.features = features
self.P = poly_order
self.print_flag = print_flag
self.path = path
if sparse_algo.lower() not in ['stridge']:
self.sparse_algo = 'stridge' #default
else:
self.sparse_algo = sparse_algo.lower()
#build the feature matrix and get descriptors
self.Theta, self.Theta_desc = self.create_feature_matrix(data_raw, P=poly_order)
def create_feature_columns(self, data_raw):
"""
Makes column vectors of elements in data_raw and stores in data_cv
Also adds features that are multiplications of the base features
(indicated with a * in the descriptor)
INPUT:
data_raw: dictionary of the raw non-columnar data
"""
data_cv = {}
data_cv[self.f_desc] = np.expand_dims(data_raw[self.f_desc].flatten(), axis=1)
data_cv[self.ft_desc] = np.expand_dims(data_raw[self.ft_desc].flatten(), axis=1)
for key in self.features:
if key in data_raw:
data_cv[key] = np.expand_dims(data_raw[key].flatten(), axis=1)
elif '*' in key:
split_desc = key.split('*')
if split_desc[0] in data_raw and split_desc[1] in data_raw:
field1 = np.expand_dims(data_raw[split_desc[0]].flatten(), axis=1)
field2 = np.expand_dims(data_raw[split_desc[1]].flatten(), axis=1)
data_cv[key] = field1*field2
else:
logger.error('`%s` not in data_raw!' %(key))
else:
logger.error('`%s` not in data_raw!' %(key))
return data_cv
def create_feature_matrix(self, data_raw, P=3):
"""
Makes a feature matrix by first converting the features to column vectors
and then stacking these columns, also stores the field and the time derivative of the field
INPUT:
data_raw: dictionary of the raw non-columnar data (could be columnar too)
split: the fraction of data to use for training. The rest is used for validation.
"""
data_cv = self.create_feature_columns(data_raw)
f_desc, ft_desc = self.f_desc, self.ft_desc
f, ft = data_cv[f_desc], data_cv[ft_desc]
self.f, self.ft, self.P = f, ft, P
Th_list = []
Theta_desc = [] #description of the columns
W_list = []
for key in self.features:
for p in range(P+1):
#don't include the constant term in the features
if key == '1' and p==0:
continue
Th_list.append(np.multiply(data_cv[key], self.f**p))
if p == 0:
Theta_desc.append(key)
else:
if key=='1':
Theta_desc.append('%s^%i' %(f_desc, p))
else:
Theta_desc.append('%s^%i %s' %(f_desc, p, key))
#feature matrix
Theta = np.hstack(Th_list)
n, d = Theta.shape
logger.debug('Created (n_data, n_features) = (%i, %i)' %(n,d))
return Theta, Theta_desc
def create_weight_matrix(self, weightFac=0, w_list=None):
""" Makes the weights matrix W"""
#set base weights if w_list is unspecified
if w_list is None:
#if weight factor unspecified
if weightFac==0:
w_list = {key:1 for key in self.features}
else:
w_list = {key:weightFac for key in self.features}
W_list = []
for key in self.features:
for p in range(self.P+1):
#don't include the constant term in the features
if key == '1' and p==0:
continue
#add the base weight and the additional weight due to the nonlinearity
if key=='1' and p>0:
#special treatment for terms like rho^1, rho^2 etc
W_list.append(w_list[key] + weightFac*(p-1))
elif '*' in key:
#add an additional weightfac if the feature itself has two terms
W_list.append(w_list[key] + weightFac*p + weightFac)
else:
W_list.append(w_list[key] + weightFac*p)
#weights matrix
self.W = np.diag(W_list)
return self.W
def get_sparse_solver(self):
"""
Return a wrapper for the sparse solver
"""
#build the thresh_nonzero vector based the entries in self.forced_features
if hasattr(self, 'forced_features'):
temp_vec = np.array([0. if key in self.forced_features else 1. \
for key in self.Theta_desc])
thresh_nonzero = temp_vec[:, np.newaxis]
else:
thresh_nonzero = None
if hasattr(self, 'w_list'):
w_list = self.w_list
else:
w_list = None
W = self.create_weight_matrix(w_list=w_list)
if self.sparse_algo == 'stridge':
find_sparse_coeffs = lambda X, y, lam1, lam2, maxit: \
STRidge(X, y, lam2, lam1, maxit, W=W, thresh_nonzero=thresh_nonzero, print_flag = self.print_flag)
else:
pass #implement lasso
logger.info('Spase solver selected: %s' %(self.sparse_algo))
return find_sparse_coeffs
def run_cross_validation(self, lam1_arr, lam2_arr, n_cores=1, n_folds=4, \
n_repeats=1, random_state=None, maxit=1000, plot_folds=False):
"""
Performs cross_validation
lam1_arr: array with values for tau for STRidge and lambda for LASSO and IHTd
lam2_arr: array with values for lambda for STRidge (ignored for LASSO and IHTd)
num_cores: cores to be used for running in parallel
n_folds, n_repeats: parameters for k-fold cross validation
maxit: max iterations for the solver
"""
if self.sparse_algo != 'stridge' and len(lam2_arr) != 1:
logger.error('Since solver is not STRidge, size of lambda2 array must be 1! Exiting..')
return
self.lam1_arr = lam1_arr #store lambda values to use in stability selection plots
self.lam2_arr = lam2_arr
k_fold = RepeatedKFold(n_folds, n_repeats=n_repeats, random_state=random_state)
tot_folds = k_fold.get_n_splits()
n, d = self.Theta.shape
train_inds_list, test_inds_list = [None]*tot_folds, [None]*tot_folds
#do the regular k_fold split of n_folds > 1 otherwise set train_inds and
#test_inds (both) to the entire dataset
for k, (train_inds, test_inds) in enumerate(k_fold.split(self.Theta)) \
if tot_folds !=1 else enumerate([(np.arange(n), np.arange(n))]):
train_inds_list[k] = train_inds
test_inds_list[k] = test_inds
find_sparse_coeffs = self.get_sparse_solver()
logger.info('Running cross validation: %i folds, %i repeats' %(n_folds, n_repeats))
#function that does the computation for each fold
def run_fold(train_inds, test_inds):
#scaling training set
XTrainStd, yTrainStd = scale_X_y(self.Theta[train_inds], self.ft[train_inds])
#scaling testing set
XTestStd, yTestStd = scale_X_y(self.Theta[test_inds], self.ft[test_inds])
#initialize lists for coeffs and error
CoeffsList = [np.zeros((d,1)) for i in range(len(lam1_arr)*len(lam2_arr))]
ErrorList = [0.0 for i in range(len(lam1_arr)*len(lam2_arr))]
count = 0
#sweep through hyperparameters and construct a list
#lam2 has to be in the outer loop because of the way the stability path
#is constructed in select_stable_components()
for lam2 in lam2_arr:
for lam1 in lam1_arr:
w_train = find_sparse_coeffs(XTrainStd, yTrainStd, lam1, lam2, maxit)
test_error = find_error(XTestStd, yTestStd, w_train)
CoeffsList[count][:] = w_train
ErrorList[count] = test_error
count += 1
return CoeffsList, ErrorList
print('*** Done ***')
output = Parallel(n_jobs=n_cores)\
(delayed(run_fold)(train_inds_list[i], test_inds_list[i]) \
for i in tqdm(range(tot_folds)))
self.coeffs_folds = [i[0] for i in output]
self.error_folds = [i[1] for i in output]
#plot the error and complexity of all the PDEs in each fold
if plot_folds:
for k, lst in enumerate(self.coeffs_folds):
fig = plt.figure(figsize=(5,3))
for i, arr in enumerate(lst):
tup = tuple(np.where(arr[:,0]==0., 0., 1.))
complexity = np.sum(self.W @ (np.array(tup)[:, np.newaxis]))
error = self.error_folds[k][i]
plt.scatter(np.log10(error), complexity, 10, 'k')
plt.xlabel(r'$\log(Loss)$')
plt.ylabel('Complexity')
plt.title('Fold %i' %(k))
plt.tight_layout()
plt.savefig('%s/fold_%i.pdf' %(self.path, k))
plt.close(fig)
logger.info('Cross Validation done!')
return self.coeffs_folds, self.error_folds
def find_intersection_of_folds(self, thresh=0.8, plot_hist=False):
"""
Finds the intersection of the PDEs from multiple folds using Python sets.
Each PDE is represented by a tuple like (0, 1, 1, ...., 0) where
0 indicates a term being present and 1 otherwise.
These tuples are used as keys for the dictionaries for storing the
corresponding actual coefficients, the squared errors and the complexity of the PDE.
Note that each fold has mutliple repeated PDEs in the
hyperparameter-sweeping. Using the sets avoids such repeated PDEs.
INPUT:
thresh: a number out of 1 that indicates the percentage of folds in which
the PDEs are supposed to belong
"""
if not hasattr(self, 'coeffs_folds'):
logger.error('Execute PDElearn.run_cross_validation() first!')
return
logger.info('Finding the intersection set of PDEs from the folds!')
n, d = self.Theta.shape
coeffs_folds = self.coeffs_folds
error_folds = self.error_folds
n_folds = len(coeffs_folds)
tup_sets = [set() for i in range(n_folds)]
coeff_dict = [{} for i in range(n_folds)]
error_dict = [{} for i in range(n_folds)]
for k, lst in enumerate(coeffs_folds):
for i, arr in enumerate(lst):
tup = tuple(np.where(arr[:,0]==0., 0., 1.))
tup_sets[k].add(tup)
coeff_dict[k][tup] = arr
error_dict[k][tup] = error_folds[k][i]
#find q-intersection of the pdes
q = int(np.floor((1-thresh)*n_folds)) #number of sets to leave out for intersection
tup_sets_, score_ = find_relaxed_intersection(*tup_sets, q=q)
#sort the tuples and the scores in terms of number of terms
n_coeffs_list = [sum(tup) for tup in tup_sets_]
tup_sets_all = [x for _, x in sorted(zip(n_coeffs_list, tup_sets_))]
score_all = [x for _, x in sorted(zip(n_coeffs_list, score_))]
if plot_hist:
plt.figure(figsize=(5,3), dpi=200)
plt.bar(np.arange(1,len(score_all)+1), np.sort(score_all)[::-1])
plt.plot([1, len(score_all)+1], [thresh, thresh], 'r--')
plt.xlabel('PDE #'); plt.ylabel('Score');
plt.tight_layout(); plt.savefig(self.path + '/scores.pdf');
logger.info('Score plot saved at %s' %(self.path + '/scores.pdf'))
#get the coeffs and the errors
coeffs_all, error_all, num_terms_all, complexity_all = [], [], [], []
#if set is not empty
if | |
import numpy as np
import cv2
def applyThresh(image, thresh=(0,255)):
"""
Apply threshold to binary image. Setting to '1' pixels> minThresh & pixels <= maxThresh.
"""
binary = np.zeros_like(image)
binary[(image > thresh[0]) & (image <= thresh[1])] = 1
return binary
def S_channel(image):
"""
Returns the Saturation channel from an RGB image.
"""
hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
S = hls[:,:,2]
return S
def sobel_X(image):
"""
Applies Sobel in the x direction to an RGB image.
"""
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
abs_sobelx = np.abs(cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=3))
sobelx = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
return sobelx
def binary_pipeline(image):
"""
Combination of color and gradient thresholds for lane detection.
Input image must be RGB
"""
sobelx = sobel_X(image)
s_channel = S_channel(image)
bin_sobelx = applyThresh(sobelx, thresh=(20,100))
bin_s_channel = applyThresh(s_channel, thresh=(90,255))
return bin_sobelx | bin_s_channel
def find_lane_pixels_in_sliding_window(binary_warped, nwindows=9, margin=100, minpix=50):
"""
There is a left and right window sliding up independent from each other.
This function returns the pixel coordinates contained within the sliding windows
as well as the sliding windows midpoints
PARAMETERS
* nwindows : number of times window slides up
* margin : half of window's width (+/- margin from center of window box)
* minpix : minimum number of pixels found to recenter window
"""
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
(height , width) = binary_warped.shape
histogram = np.sum(binary_warped[int(height/2):,:], axis=0)
window_leftx_midpoint = np.argmax(histogram[:np.int(width/2)])
window_rightx_midpoint = np.argmax(histogram[np.int(width/2):]) + np.int(width/2)
# Set height of windows
window_height = np.int(height/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Create empty lists
left_lane_inds = [] # left lane pixel indices
right_lane_inds = [] # Right lane pixel indices
xleft_lane_win_midpts = [] # left lane sliding window midpoints (x-coord)
xright_lane_win_midpts = [] # Right lane sliding window midpoints (x-coord)
# Step through the left and right windows one slide at a time
for i in range(nwindows):
# Identify right and left window boundaries
win_y_top = height - (i+1)*window_height
win_y_bottom = height - i *window_height
win_xleft_low = max(window_leftx_midpoint - margin , 0)
win_xleft_high = window_leftx_midpoint + margin
win_xright_low = window_rightx_midpoint - margin
win_xright_high = min(window_rightx_midpoint + margin , width)
# Identify the nonzero pixels within the window and append to list
good_left_inds = ((nonzeroy >= win_y_top) & (nonzeroy < win_y_bottom) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_top) & (nonzeroy < win_y_bottom) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
left_lane_inds.extend(good_left_inds)
right_lane_inds.extend(good_right_inds)
# Recenter next window midpoint If you found > minpix pixels and append previous midpoint
xleft_lane_win_midpts.append(window_leftx_midpoint)
xright_lane_win_midpts.append(window_rightx_midpoint)
if len(good_left_inds > minpix): window_leftx_midpoint = np.mean(nonzerox[good_left_inds], dtype=np.int32)
if len(good_right_inds > minpix): window_rightx_midpoint = np.mean(nonzerox[good_right_inds], dtype=np.int32)
# Extract left and right line pixel positions
xleft_lane = nonzerox[left_lane_inds]
yleft_lane = nonzeroy[left_lane_inds]
xright_lane = nonzerox[right_lane_inds]
yright_lane = nonzeroy[right_lane_inds]
return (xleft_lane,yleft_lane), (xright_lane,yright_lane), (xleft_lane_win_midpts,xright_lane_win_midpts)
def draw_lane_pixels_in_sliding_window(binary_warped, left_lane_pts, right_lane_pts, window_midpts, margin=100):
"""
Paints lane pixels and sliding windows.
PARAMETERS
* margin : half of window's width (+/- margin from center of window box)
"""
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
# Unpack and Define variables
(height , width) = binary_warped.shape
(xleft_lane , yleft_lane) = left_lane_pts
(xright_lane, yright_lane) = right_lane_pts
(xleft_lane_win_midpts, xright_lane_win_midpts) = window_midpts
nwindows = len(xleft_lane_win_midpts) # number of times window slided up
window_height = int(height/nwindows)
# Color left and right lane regions
out_img[yleft_lane , xleft_lane] = [255, 0, 0]
out_img[yright_lane, xright_lane] = [0, 0, 255]
# Draw the windows on the visualization image
for i in range(nwindows):
window_leftx_midpoint = xleft_lane_win_midpts[i]
window_rightx_midpoint = xright_lane_win_midpts[i]
win_y_top = height - (i+1)*window_height
win_y_bottom = height - i *window_height
win_xleft_low = max(window_leftx_midpoint - margin , 0)
win_xleft_high = window_leftx_midpoint + margin
win_xright_low = window_rightx_midpoint - margin
win_xright_high = min(window_rightx_midpoint + margin , width)
cv2.rectangle(out_img,(win_xleft_low,win_y_top),
(win_xleft_high,win_y_bottom),(0,255,0), 12)
cv2.rectangle(out_img,(win_xright_low,win_y_top),
(win_xright_high,win_y_bottom),(0,255,0), 12)
return out_img
def ransac_polyfit(x, y, order=2, n=100, k=10, t=100, d=20, f=0.9):
"""
RANSAC: finds and returns best model coefficients
n – minimum number of data points required to fit the model
k – maximum number of iterations allowed in the algorithm
t – threshold value to determine when a data point fits a model
d – number of close data points required to assert that a model fits well to data
f – fraction of close data points required
"""
besterr = np.inf
bestfit = None
if len(x) > 0: #if input data not empty
for kk in range(k):
maybeinliers = np.random.randint(len(x), size=n)
maybemodel = np.polyfit(x[maybeinliers], y[maybeinliers], order)
alsoinliers = np.abs(np.polyval(maybemodel,x)-y) < t
if sum(alsoinliers) > d and sum(alsoinliers) > len(x)*f:
bettermodel = np.polyfit(x[alsoinliers], y[alsoinliers], order)
thiserr = np.sum(np.abs(np.polyval(bettermodel,x[alsoinliers])-y[alsoinliers]))
if thiserr < besterr:
bestfit = bettermodel
besterr = thiserr
return bestfit
def fit_polynomial(img_height, left_lane_pts, right_lane_pts):
"""
Returns pixel coordinates and polynomial coefficients of left and right lane fit.
If empty lane pts are provided it returns coordinate (0,0) for left and right lane
and sets fits to None.
"""
# Unpack and Define variables
(xleft_lane , yleft_lane) = left_lane_pts
(xright_lane, yright_lane) = right_lane_pts
try:
# Fit a second order polynomial to each lane
left_fit = ransac_polyfit(yleft_lane , xleft_lane, order=2)
right_fit = ransac_polyfit(yright_lane, xright_lane, order=2)
#print(left_fit)
#print(right_fit)
# Generate x and y values of left and right fit
ploty = np.linspace(0, img_height-1, img_height)
left_fitx = np.polyval(left_fit, ploty)
right_fitx = np.polyval(right_fit, ploty)
except TypeError:
# Avoids an error if `left` and `right_fit` are still none or incorrect
print('[WARNING] The function failed to fit a line!')
ploty = 0
left_fitx = 0
right_fitx = 0
left_fit = None
right_fit = None
return left_fit, right_fit, left_fitx, right_fitx, ploty
def find_lane_pixels_around_poly(binary_warped, left_fit, right_fit, margin = 100):
"""
Returns the pixel coordinates contained within a margin from left and right polynomial fits.
Left and right fits shoud be from the previous frame.
PARAMETER
* margin: width around the polynomial fit
"""
# Grab activated pixels
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Search within the +/- margin of the polynomial from previous frame
left_lane_inds = ((nonzerox >= (np.polyval(left_fit,nonzeroy)-margin)) & (nonzerox <= (np.polyval(left_fit,nonzeroy)+margin))).nonzero()[0]
right_lane_inds = ((nonzerox >= (np.polyval(right_fit,nonzeroy)-margin)) & (nonzerox <= (np.polyval(right_fit,nonzeroy)+margin))).nonzero()[0]
# Extract left and right line pixel positions
xleft_lane = nonzerox[left_lane_inds]
yleft_lane = nonzeroy[left_lane_inds]
xright_lane = nonzerox[right_lane_inds]
yright_lane = nonzeroy[right_lane_inds]
return (xleft_lane,yleft_lane), (xright_lane,yright_lane)
def draw_lane_pixels_around_poly(binary_warped, left_lane_pts, right_lane_pts, previous_fit_pts, margin=100):
"""
Paints lane pixels and poly fit margins. Poly fit margins are based on previous frame values.
PARAMETER
* margin: width around the polynomial fit
"""
# Create two output images to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
out_img_margins = np.zeros_like(out_img)
# Unpack and Define variables
(height , width) = binary_warped.shape
(xleft_lane , yleft_lane) = left_lane_pts
(xright_lane, yright_lane) = right_lane_pts
(left_fitx, right_fitx, ploty) = previous_fit_pts
# Color left and right lane pixels
out_img[yleft_lane , xleft_lane] = [255, 0, 0] # Red
out_img[yright_lane, xright_lane] = [0, 0, 255] # Blue
# Color left and right previous polynomial fit. NOTE: type of fit values are returned in float
for cx,cy in zip(np.int_(left_fitx), np.int_(ploty)):
cv2.circle(out_img, (cx,cy), radius= 1, color=[255, 0, 255], thickness=10)
for cx,cy in zip(np.int_(right_fitx), np.int_(ploty)):
cv2.circle(out_img, (cx,cy), radius= 1, color=[255, 0, 255], thickness=10)
# Draw polynomial margins
# Generate a polygon to illustrate the search area. NOTE: you flip array to keep contour when cv2.fillPoly
left_line_left_margin = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_right_margin = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, ploty])))])
left_line_margin_pts = np.hstack((left_line_left_margin, left_line_right_margin))
right_line_left_margin = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_right_margin = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin,ploty])))])
right_line_margin_pts = np.hstack((right_line_left_margin, right_line_right_margin))
cv2.fillPoly(out_img_margins, np.int_([left_line_margin_pts]), (0,255, 0))
cv2.fillPoly(out_img_margins, np.int_([right_line_margin_pts]), (0,255, 0))
# Combine output images
result = cv2.addWeighted(out_img, 1, out_img_margins, 0.3, 0)
return result
def augment_previous_fit_pts(left_lane_pts, right_lane_pts, previous_fit_pts, density=4, line_width_margin=10):
"""
Add to detected points the pts | |
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a shift scheduling problem and solves it."""
from __future__ import print_function
import time
from ortools.sat.python import cp_model
class ObjectiveSolutionPrinter(cp_model.CpSolverSolutionCallback):
"""Print intermediate solutions objective and time."""
def __init__(self):
cp_model.CpSolverSolutionCallback.__init__(self)
self.__solution_count = 0
self.__start_time = time.time()
def OnSolutionCallback(self):
"""Called on each new solution."""
current_time = time.time()
objective = self.ObjectiveValue()
print('Solution %i, time = %f s, objective = [%i, %i]' %
(self.__solution_count, current_time - self.__start_time,
objective, self.BestObjectiveBound()))
self.__solution_count += 1
def SolutionCount(self):
return self.__solution_count
def negated_span_with_borders(works, start, length):
"""Extract a sub-sequence of works with the given length, with borders."""
sequence = []
pos = start
if pos != 0:
sequence.append(works[pos])
pos += 1
for _ in range(length):
sequence.append(works[pos].Not())
pos += 1
if pos < len(works):
sequence.append(works[pos])
return sequence
def add_sequence_constraint(model, works, hard_min, soft_min, min_cost,
soft_max, hard_max, max_cost):
"""Sequence constraint with soft and hard bounds.
This constraint look at every maximal contiguous sequence of variables
assigned to true. If forbids sequence of length < hard_min or > hard_max.
Then it creates penalty terms if the length is < soft_min or > soft_max.
"""
cost_literals = []
cost_coefficients = []
for length in range(1, hard_min):
for start in range(len(works) - length - 1):
model.AddBoolOr(negated_span_with_borders(works, start, length))
for length in range(hard_min, soft_min):
for start in range(len(works) - length - 1):
span = negated_span_with_borders(works, start, length)
lit = model.NewBoolVar('')
span.append(lit)
model.AddBoolOr(span)
cost_literals.append(lit)
cost_coefficients.append(min_cost)
for length in range(soft_max + 1, hard_max + 1):
for start in range(len(works) - length - 1):
span = negated_span_with_borders(works, start, length)
lit = model.NewBoolVar('')
span.append(lit)
model.AddBoolOr(span)
cost_literals.append(lit)
cost_coefficients.append(max_cost)
# Just forbid any sequence of true variables with length hard_max + 1
for start in range(len(works) - hard_max - 1):
model.AddBoolOr(
[works[i].Not() for i in range(start, start + hard_max + 1)])
return cost_literals, cost_coefficients
def add_weekly_sum_constraint(model, works, hard_min, soft_min, min_cost,
soft_max, hard_max, max_cost):
"""Sum constraint with soft and hard bounds.
This constraint counts the variables assigned to true from works.
If forbids sum < hard_min or > hard_max.
Then it creates penalty terms if the sum is < soft_min or > soft_max.
"""
cost_variables = []
cost_coefficients = []
sum_var = model.NewIntVar(hard_min, hard_max, '')
model.Add(sum_var == sum(works))
if soft_min > hard_min and min_cost > 0:
delta = model.NewIntVar(-7, 7, '')
model.Add(delta == soft_min - sum_var)
excess = model.NewIntVar(0, 7, '')
model.AddMaxEquality(excess, [delta, 0])
cost_variables.append(excess)
cost_coefficients.append(min_cost)
if soft_max < hard_max and max_cost > 0:
delta = model.NewIntVar(-7, 7, '')
model.Add(delta == sum_var - soft_max)
excess = model.NewIntVar(0, 7, '')
model.AddMaxEquality(excess, [delta, 0])
cost_variables.append(excess)
cost_coefficients.append(max_cost)
return cost_variables, cost_coefficients
def main():
"""Solves the shift scheduling problem."""
# Data
num_employees = 8
num_weeks = 3
shifts = ['O', 'M', 'A', 'N']
# Fixed assignment: (employee, day, shift).
# This fixes the first 2 days of the schedule.
fixed_assignments = [
(0, 0, 0),
(1, 0, 0),
(2, 0, 1),
(3, 0, 1),
(4, 0, 2),
(5, 0, 2),
(6, 0, 2),
(7, 0, 3),
(0, 1, 1),
(1, 1, 1),
(2, 1, 2),
(3, 1, 2),
(4, 1, 2),
(5, 1, 0),
(6, 1, 0),
(7, 1, 3),
]
# Request: (employee, day, shift, weight)
positive_requests = [
# Employee 3 wants the first Saturday off.
(3, 5, 0, 2),
# Employee 5 wants a night shift on the second Thursday.
(5, 10, 3, 2)
]
negative_requests = [
# Employee 2 does not want a night shift on the third Friday.
(2, 4, 3, 4)
]
# Shift constraints on continuous sequence :
# (shift , hard_min, soft_min, min_penalty,
# soft_max, hard_max, max_penalty)
shift_constraints = [
# One or two consecutive days of rest, this is a hard constraint.
(0, 1, 1, 0, 2, 2, 0),
# betweem 2 and 3 consecutive days of night shifts, 1 and 4 are
# possible but penalized.
(3, 1, 2, 20, 3, 4, 5),
]
# Weekly sum constraints on shifts days:
# (shift, hard_min, soft_min, min_penalty,
# soft_max, hard_max, max_penalty)
weekly_sum_constraints = [
# Constraints on rests per week.
(0, 1, 2, 7, 2, 3, 4),
# At least 1 night shift per week (penalized). At most 4 (hard).
(3, 0, 1, 3, 4, 4, 0),
]
# Penalized transitions:
# (previous_shift, next_shift, penalty (0 means forbidden))
penalized_transitions = [
# Afternoon to night has a penalty of 4.
(2, 3, 4),
# Night to morning is forbidden.
(3, 1, 0),
]
# daily demands for work shifts (morning, afternon, night) for each day
# of the week starting on Monday.
weekly_cover_demands = [
(2, 3, 1), # Monday
(2, 3, 1), # Tuesday
(2, 2, 2), # Wednesday
(2, 3, 1), # Thursday
(2, 2, 2), # Friday
(1, 2, 3), # Saturday
(1, 3, 1), # Sunday
]
# Penalty for exceeding the cover constraint per shift type.
excess_cover_penalties = (2, 2, 5)
num_days = num_weeks * 7
num_shifts = len(shifts)
# Create model.
model = cp_model.CpModel()
work = {}
for e in range(num_employees):
for s in range(num_shifts):
for d in range(num_days):
work[e, s, d] = model.NewBoolVar('work%i_%i_%i' % (e, s, d))
# Linear terms of the objective in a minimization context.
obj_vars = []
obj_coeffs = []
# Exactly one shift per day.
for e in range(num_employees):
for d in range(num_days):
model.Add(sum(work[e, s, d] for s in range(num_shifts)) == 1)
# Fixed assignments.
for e, d, s in fixed_assignments:
model.Add(work[e, s, d] == 1)
# Employee requests
for e, d, s, w in positive_requests:
obj_vars.append(work[e, s, d])
obj_coeffs.append(-w) # We gain 'w' is the shift is selected.
for e, d, s, w in negative_requests:
obj_vars.append(work[e, s, d])
obj_coeffs.append(w) # We loose 'w' is the shift is selected.
# Shift constraints
for ct in shift_constraints:
shift, hard_min, soft_min, min_cost, soft_max, hard_max, max_cost = ct
for e in range(num_employees):
works = [work[e, shift, d] for d in range(num_days)]
variables, coeffs = add_sequence_constraint(
model, works, hard_min, soft_min, min_cost, soft_max, hard_max,
max_cost)
obj_vars.extend(variables)
obj_coeffs.extend(coeffs)
# Weekly sum constraints
for ct in weekly_sum_constraints:
shift, hard_min, soft_min, min_cost, soft_max, hard_max, max_cost = ct
for e in range(num_employees):
for w in range(num_weeks):
works = [work[e, shift, d + w * 7] for d in range(7)]
variables, coeffs = add_weekly_sum_constraint(
model, works, hard_min, soft_min, min_cost, soft_max,
hard_max, max_cost)
obj_vars.extend(variables)
obj_coeffs.extend(coeffs)
# Penalized transitions
for previous_shift, next_shift, cost in penalized_transitions:
for e in range(num_employees):
for d in range(num_days - 1):
transition = [
work[e, previous_shift, d].Not(),
work[e, next_shift, d + 1].Not()
]
if cost == 0:
model.AddBoolOr(transition)
else:
trans_var = model.NewBoolVar('')
transition.append(trans_var)
model.AddBoolOr(transition)
obj_vars.append(trans_var)
obj_coeffs.append(cost)
# Cover constraints
for s in range(1, num_shifts):
for w in range(num_weeks):
for d in range(7):
works = [work[e, s, w * 7 + d] for e in range(num_employees)]
min_demand = weekly_cover_demands[d][s - 1]
worked = model.NewIntVar(min_demand, num_employees, '')
model.Add(worked == sum(works))
over_penalty = excess_cover_penalties[s - 1]
if over_penalty > 0:
excess = model.NewIntVar(0, num_employees - min_demand, '')
model.Add(excess == worked - min_demand)
obj_vars.append(excess)
obj_coeffs.append(over_penalty)
# Objective
model.Minimize(sum(obj_vars[i] * obj_coeffs[i] for i in range(len(obj_vars))))
# Solve the model.
solver = cp_model.CpSolver()
solver.parameters.num_search_workers = 8
solution_printer = ObjectiveSolutionPrinter()
status = solver.SolveWithSolutionCallback(model, solution_printer)
# Print solution.
if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE:
print()
header = ' '
for w in range(num_weeks):
header += 'M T W T F S S '
print(header)
for e in range(num_employees):
schedule = ''
for d in range(num_days):
for s in range(num_shifts):
if solver.BooleanValue(work[e, s, d]):
schedule += shifts[s] + ' '
print('worker %i: %s' % (e, schedule))
print()
print('Statistics')
print(' - status : %s' % solver.StatusName(status))
print(' - conflicts : %i' % solver.NumConflicts())
print(' - branches : %i' % solver.NumBranches())
print(' - wall time : %f ms' % solver.WallTime())
print(' - solutions found | |
def module_extensions():
class H2OAutoEncoderEstimator(H2ODeepLearningEstimator):
"""
:examples:
>>> import h2o as ml
>>> from h2o.estimators.deeplearning import H2OAutoEncoderEstimator
>>> ml.init()
>>> rows = [[1,2,3,4,0]*50, [2,1,2,4,1]*50, [2,1,4,2,1]*50, [0,1,2,34,1]*50, [2,3,4,1,0]*50]
>>> fr = ml.H2OFrame(rows)
>>> fr[4] = fr[4].asfactor()
>>> model = H2OAutoEncoderEstimator()
>>> model.train(x=range(4), training_frame=fr)
"""
def __init__(self, **kwargs):
super(H2OAutoEncoderEstimator, self).__init__(**kwargs)
self._parms['autoencoder'] = True
extensions = dict(
__module__=module_extensions
)
overrides = dict(
initial_biases=dict(
setter="""
assert_is_type({pname}, None, [H2OFrame, None])
self._parms["{sname}"] = {pname}
"""
),
initial_weights=dict(
setter="""
assert_is_type({pname}, None, [H2OFrame, None])
self._parms["{sname}"] = {pname}
"""
),
)
doc = dict(
__class__="""
Build a Deep Neural Network model using CPUs
Builds a feed-forward multilayer artificial neural network on an H2OFrame
"""
)
examples = dict(
__class__="""
>>> from h2o.estimators.deeplearning import H2ODeepLearningEstimator
>>> rows = [[1,2,3,4,0], [2,1,2,4,1], [2,1,4,2,1],
... [0,1,2,34,1], [2,3,4,1,0]] * 50
>>> fr = h2o.H2OFrame(rows)
>>> fr[4] = fr[4].asfactor()
>>> model = H2ODeepLearningEstimator()
>>> model.train(x = range(4), y = 4, training_frame = fr)
>>> model.logloss()
""",
activation="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "cylinders"
>>> cars_dl = H2ODeepLearningEstimator(activation = "tanh")
>>> cars_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cars_dl.mse()
""",
adaptive_rate="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "cylinders"
>>> cars_dl = H2ODeepLearningEstimator(adaptive_rate = True)
>>> cars_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cars_dl.mse()
""",
autoencoder="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "cylinders"
>>> cars_dl = H2ODeepLearningEstimator(autoencoder = True)
>>> cars_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cars_dl.mse()
""",
average_activation="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "cylinders"
>>> cars_dl = H2ODeepLearningEstimator(average_activation = 1.5,
... seed = 1234)
>>> cars_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cars_dl.mse()
""",
balance_classes="""
>>> covtype = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/covtype/covtype.20k.data")
>>> covtype[54] = covtype[54].asfactor()
>>> predictors = covtype.columns[0:54]
>>> response = 'C55'
>>> train, valid = covtype.split_frame(ratios = [.8], seed = 1234)
>>> cov_dl = H2ODeepLearningEstimator(balance_classes = True,
... seed = 1234)
>>> cov_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cov_dl.mse()
""",
categorical_encoding="""
>>> airlines= h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip")
>>> airlines["Year"]= airlines["Year"].asfactor()
>>> airlines["Month"]= airlines["Month"].asfactor()
>>> airlines["DayOfWeek"] = airlines["DayOfWeek"].asfactor()
>>> airlines["Cancelled"] = airlines["Cancelled"].asfactor()
>>> airlines['FlightNum'] = airlines['FlightNum'].asfactor()
>>> predictors = ["Origin", "Dest", "Year", "UniqueCarrier",
... "DayOfWeek", "Month", "Distance", "FlightNum"]
>>> response = "IsDepDelayed"
>>> train, valid= airlines.split_frame(ratios = [.8], seed = 1234)
>>> encoding = "one_hot_internal"
>>> airlines_dl = H2ODeepLearningEstimator(categorical_encoding = encoding,
... seed = 1234)
>>> airlines_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> airlines_dl.mse()
""",
checkpoint="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> train, valid = cars.split_frame(ratios = [.8], seed = 1234)
>>> cars_dl = H2ODeepLearningEstimator(activation="tanh",
... autoencoder=True,
... seed = 1234,
... model_id="cars_dl")
>>> cars_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cars_dl.mse()
>>> cars_cont = H2ODeepLearningEstimator(checkpoint = cars_dl,
... seed = 1234)
>>> cars_cont.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cars_cont.mse()
""",
class_sampling_factors="""
>>> covtype = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/covtype/covtype.20k.data")
>>> covtype[54] = covtype[54].asfactor()
>>> predictors = covtype.columns[0:54]
>>> response = 'C55'
>>> train, valid = covtype.split_frame(ratios = [.8], seed = 1234)
>>> sample_factors = [1., 0.5, 1., 1., 1., 1., 1.]
>>> cars_dl = H2ODeepLearningEstimator(balance_classes = True,
... class_sampling_factors = sample_factors,
... seed = 1234)
>>> cov_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cov_dl.mse()
""",
classification_stop="""
>>> covtype = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/covtype/covtype.20k.data")
>>> covtype[54] = covtype[54].asfactor()
>>> predictors = covtype.columns[0:54]
>>> response = 'C55'
>>> train, valid = covtype.split_frame(ratios = [.8], seed = 1234)
>>> cars_dl = H2ODeepLearningEstimator(classification_stop = 1.5,
... seed = 1234)
>>> cov_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cov_dl.mse()
""",
diagnostics="""
>>> covtype = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/covtype/covtype.20k.data")
>>> covtype[54] = covtype[54].asfactor()
>>> predictors = covtype.columns[0:54]
>>> response = 'C55'
>>> train, valid = covtype.split_frame(ratios = [.8], seed = 1234)
>>> cars_dl = H2ODeepLearningEstimator(diagnostics = True,
... seed = 1234)
>>> cov_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cov_dl.mse()
""",
distribution="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "cylinders"
>>> train, valid = cars.split_frame(ratios = [.8], seed = 1234)
>>> cars_dl = H2ODeepLearningEstimator(distribution = "poisson",
... seed = 1234)
>>> cars_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cars_dl.mse()
""",
elastic_averaging="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "cylinders"
>>> train, valid = cars.split_frame(ratios = [.8], seed = 1234)
>>> cars_dl = H2ODeepLearningEstimator(elastic_averaging = True,
... seed = 1234)
>>> cars_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cars_dl.mse()
""",
elastic_averaging_moving_rate="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "cylinders"
>>> train, valid = cars.split_frame(ratios = [.8], seed = 1234)
>>> cars_dl = H2ODeepLearningEstimator(elastic_averaging_moving_rate = .8,
... seed = 1234)
>>> cars_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cars_dl.mse()
""",
elastic_averaging_regularization="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "cylinders"
>>> train, valid = cars.split_frame(ratios = [.8], seed = 1234)
>>> cars_dl = H2ODeepLearningEstimator(elastic_averaging_regularization = .008,
... seed = 1234)
>>> cars_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cars_dl.mse()
""",
epochs="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "cylinders"
>>> train, valid = cars.split_frame(ratios = [.8], seed = 1234)
>>> cars_dl = H2ODeepLearningEstimator(epochs = 15,
... seed = 1234)
>>> cars_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cars_dl.mse()
""",
epsilon="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "cylinders"
>>> train, valid = cars.split_frame(ratios = [.8], seed = 1234)
>>> cars_dl = H2ODeepLearningEstimator(epsilon = 1e-6,
... seed = 1234)
>>> cars_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cars_dl.mse()
""",
export_checkpoints_dir="""
>>> import tempfile
>>> from os import listdir
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "cylinders"
>>> train, valid = cars.split_frame(ratios = [.8], seed = 1234)
>>> checkpoints_dir = tempfile.mkdtemp()
>>> cars_dl = H2ODeepLearningEstimator(export_checkpoints_dir=checkpoints_dir,
... seed = 1234)
>>> cars_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> len(listdir(checkpoints_dir))
""",
export_weights_and_biases="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "cylinders"
>>> train, valid = cars.split_frame(ratios = [.8], seed = 1234)
>>> cars_dl = H2ODeepLearningEstimator(export_weights_and_biases = True,
... seed = 1234)
>>> cars_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cars_dl.mse()
""",
fast_mode="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "cylinders"
>>> train, valid = cars.split_frame(ratios = [.8], seed = 1234)
>>> cars_dl = H2ODeepLearningEstimator(fast_mode = False,
... seed = 1234)
>>> cars_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cars_dl.mse()
""",
fold_assignment="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "cylinders"
>>> train, valid = cars.split_frame(ratios = [.8], seed = 1234)
>>> cars_dl = H2ODeepLearningEstimator(fold_assignment = "Random",
... nfolds = 5,
... seed = 1234)
>>> cars_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cars_dl.mse()
""",
fold_column="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "cylinders"
>>> fold_numbers = cars.kfold_column(n_folds = 5, seed = 1234)
>>> fold_numbers.set_names(["fold_numbers"])
>>> cars = cars.cbind(fold_numbers)
>>> print(cars['fold_numbers'])
>>> cars_dl = H2ODeepLearningEstimator(seed = 1234)
>>> cars_dl.train(x = predictors,
... y = response,
... training_frame = cars,
... fold_column = "fold_numbers")
>>> cars_dl.mse()
""",
force_load_balance="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "cylinders"
>>> train, valid = cars.split_frame(ratios = [.8], seed = 1234)
>>> cars_dl = H2ODeepLearningEstimator(force_load_balance = False,
... seed = 1234)
>>> cars_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cars_dl.mse()
""",
hidden="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "cylinders"
>>> train, valid = cars.split_frame(ratios = [.8], seed = 1234)
>>> cars_dl = H2ODeepLearningEstimator(hidden = [100,100],
... seed = 1234)
>>> cars_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cars_dl.mse()
""",
hidden_dropout_ratios="""
>>> train = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/bigdata/laptop/mnist/train.csv.gz")
>>> valid = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/bigdata/laptop/mnist/test.csv.gz")
>>> features = list(range(0,784))
>>> target = 784
>>> train[target] = train[target].asfactor()
>>> valid[target] = valid[target].asfactor()
>>> model = H2ODeepLearningEstimator(epochs = 20,
... hidden = [200,200],
... hidden_dropout_ratios = [0.5,0.5],
... seed = 1234,
... activation='tanhwithdropout')
>>> model.train(x = features,
... y = target,
... training_frame = train,
... validation_frame = valid)
>>> model.mse()
""",
huber_alpha="""
>>> insurance = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/glm_test/insurance.csv")
>>> predictors = insurance.columns[0:4]
>>> response = 'Claims'
>>> insurance['Group'] = insurance['Group'].asfactor()
>>> insurance['Age'] = insurance['Age'].asfactor()
>>> train, valid = insurance.split_frame(ratios = [.8], seed = 1234)
>>> insurance_dl = H2ODeepLearningEstimator(distribution = "huber",
... huber_alpha = 0.9,
... seed = 1234)
>>> insurance_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> insurance_dl.mse()
""",
ignore_const_cols="""
>>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv")
>>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
>>> predictors = ["displacement","power","weight","acceleration","year"]
>>> response = "economy_20mpg"
>>> cars["const_1"] = 6
>>> cars["const_2"] = 7
>>> train, valid = cars.split_frame(ratios = [.8], seed = 1234)
>>> cars_dl = H2ODeepLearningEstimator(seed = 1234,
... ignore_const_cols = True)
>>> cars_dl.train(x = predictors,
... y = response,
... training_frame = train,
... validation_frame = valid)
>>> cars_dl.auc()
""",
initial_biases="""
>>> | |
api calls.
"""
if not is_enum(family, DeviceFamily):
raise ValueError('family Parameter must be of type int, str or DeviceFamily enumeration.')
family = decode_enum(family, DeviceFamily)
family = ctypes.c_int(family)
result = self._lib.NRFJPROG_select_family_inst(self._handle, family)
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
def is_coprocessor_enabled(self, coprocessor):
"""
Checks if chosen coprocessor is enabled.
@param CoProcessor coprocessor: Target coprocessor for connect_to_device() call.
@return bool: True if held in reset.
"""
if not is_enum(coprocessor, CoProcessor):
raise ValueError('CoProcessor Parameter must be of type int, str or CoProcessor enumeration.')
coprocessor = decode_enum(coprocessor, CoProcessor)
coprocessor = ctypes.c_int(coprocessor)
held = ctypes.c_bool()
result = self._lib.NRFJPROG_is_coprocessor_enabled_inst(self._handle, coprocessor, ctypes.byref(held))
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
return held.value
def enable_coprocessor(self, coprocessor):
"""
Enables chosen coprocessor
@param CoProcessor coprocessor: Target coprocessor for connect_to_device() call.
"""
if not is_enum(coprocessor, CoProcessor):
raise ValueError('CoProcessor Parameter must be of type int, str or CoProcessor enumeration.')
coprocessor = decode_enum(coprocessor, CoProcessor)
coprocessor = ctypes.c_int(coprocessor)
result = self._lib.NRFJPROG_enable_coprocessor_inst(self._handle, coprocessor)
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
def disable_coprocessor(self, coprocessor):
"""
Disables chosen coprocessor
@param CoProcessor coprocessor: Target coprocessor for connect_to_device() call.
"""
if not is_enum(coprocessor, CoProcessor):
raise ValueError('CoProcessor Parameter must be of type int, str or CoProcessor enumeration.')
coprocessor = decode_enum(coprocessor, CoProcessor)
coprocessor = ctypes.c_int(coprocessor)
result = self._lib.NRFJPROG_disable_coprocessor_inst(self._handle, coprocessor)
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
def select_coprocessor(self, coprocessor):
"""
Select coprocessor
@param CoProcessor coprocessor: Target coprocessor for connect_to_device() call.
"""
if not is_enum(coprocessor, CoProcessor):
raise ValueError('CoProcessor Parameter must be of type int, str or CoProcessor enumeration.')
coprocessor = decode_enum(coprocessor, CoProcessor)
coprocessor = ctypes.c_int(coprocessor)
result = self._lib.NRFJPROG_select_coprocessor_inst(self._handle, coprocessor)
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
def recover(self):
"""
Recovers the device.
"""
result = self._lib.NRFJPROG_recover_inst(self._handle)
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
def is_connected_to_device(self):
"""
Checks if the connected emulator has an established connection with an nRF device.
@return boolean: True if connected.
"""
is_connected_to_device = ctypes.c_bool()
result = self._lib.NRFJPROG_is_connected_to_device_inst(self._handle, ctypes.byref(is_connected_to_device))
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
return is_connected_to_device.value
def connect_to_device(self):
"""
Connects to the nRF device.
"""
result = self._lib.NRFJPROG_connect_to_device_inst(self._handle)
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
def disconnect_from_device(self):
"""
Disconnects from the device.
"""
result = self._lib.NRFJPROG_disconnect_from_device_inst(self._handle)
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
def readback_protect(self, desired_protection_level):
"""
Protects the device against read or debug.
@param int, str, or ReadbackProtection(IntEnum) desired_protection_level: Readback protection level for the target.
"""
if not is_enum(desired_protection_level, ReadbackProtection):
raise ValueError(
'Parameter desired_protection_level must be of type int, str or ReadbackProtection enumeration.')
desired_protection_level = decode_enum(desired_protection_level, ReadbackProtection)
if desired_protection_level is None:
raise ValueError(
'Parameter desired_protection_level must be of type int, str or ReadbackProtection enumeration.')
desired_protection_level = ctypes.c_int(desired_protection_level.value)
result = self._lib.NRFJPROG_readback_protect_inst(self._handle, desired_protection_level)
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
def readback_status(self):
"""
Returns the status of the readback protection.
@return str: Readback protection level of the target.
"""
status = ctypes.c_int()
result = self._lib.NRFJPROG_readback_status_inst(self._handle, ctypes.byref(status))
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
return ReadbackProtection(status.value).name
def enable_eraseprotect(self):
"""
Protects the device against erasing.
"""
result = self._lib.NRFJPROG_enable_eraseprotect_inst(self._handle)
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
def is_eraseprotect_enabled(self):
"""
Returns the status of the erase protection.
@return bool: True if erase protection is enabled.
"""
status = ctypes.c_bool()
result = self._lib.NRFJPROG_is_eraseprotect_enabled_inst(self._handle, ctypes.byref(status))
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
return status.value
def read_region_0_size_and_source(self):
"""
Returns the region 0 size and source of protection if any for nRF51 devices.
@return (int, str): Region size and configuration source of protection (either UICR of FICR).
"""
size = ctypes.c_uint32()
source = ctypes.c_int()
result = self._lib.NRFJPROG_read_region_0_size_and_source_inst(self._handle, ctypes.byref(size), ctypes.byref(source))
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
return size.value, Region0Source(source.value).name
def debug_reset(self):
"""
Executes a soft reset using the CTRL-AP for nRF52 and onward devices.
"""
result = self._lib.NRFJPROG_debug_reset_inst(self._handle)
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
def sys_reset(self):
"""
Executes a system reset request.
"""
result = self._lib.NRFJPROG_sys_reset_inst(self._handle)
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
def pin_reset(self):
"""
Executes a pin reset. If your device has a configurable pin reset, in order for the function execution to have the desired effect the pin reset must be enabled in UICR.PSELRESET[] registers.
"""
result = self._lib.NRFJPROG_pin_reset_inst(self._handle)
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
def is_bprot_enabled(self, address_start, length):
"""
Detects if memory block protection is enabled.
Any memory covered by the range [address_start : address_start + length] that is not non-volatile code memory is assumed not block protected.
@param int address_start: Query address range start.
@param int length: Query address range length.
@return boolean: True if bprot is enabled for address range.
"""
if not is_u32(address_start):
raise ValueError('The address_start parameter must be an unsigned 32-bit value.')
if not is_u32(length):
raise ValueError('The length parameter must be an unsigned 32-bit value.')
address_start = ctypes.c_uint32(address_start)
length = ctypes.c_uint32(length)
bprot_enabled = ctypes.c_bool(False)
result = self._lib.NRFJPROG_is_bprot_enabled_inst(self._handle, ctypes.byref(bprot_enabled), address_start, length)
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
return bprot_enabled.value
def disable_bprot(self):
"""
Disables BPROT, ACL or NVM protection blocks where appropriate depending on device.
"""
result = self._lib.NRFJPROG_disable_bprot_inst(self._handle)
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
def erase_all(self):
"""
Erases all code and UICR flash.
"""
result = self._lib.NRFJPROG_erase_all_inst(self._handle)
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
def erase_page(self, addr):
"""
Erases a page of code flash which contains address addr.
@param int addr: Address in the page of code flash to erase.
"""
if not is_u32(addr):
raise ValueError('The addr parameter must be an unsigned 32-bit value.')
addr = ctypes.c_uint32(addr)
result = self._lib.NRFJPROG_erase_page_inst(self._handle, addr)
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
def erase_uicr(self):
"""
Erases UICR info page.
"""
result = self._lib.NRFJPROG_erase_uicr_inst(self._handle)
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
def write_u32(self, addr, data, control):
"""
Writes one uint32_t data into the given address.
@param int addr: Address to write.
@param int data: Value to write.
@param boolean control: True for automatic control of NVMC by the function.
"""
if not is_u32(addr):
raise ValueError('The addr parameter must be an unsigned 32-bit value.')
if not is_u32(data):
raise ValueError('The data parameter must be an unsigned 32-bit value.')
if not is_bool(control):
raise ValueError('The control parameter must be a boolean value.')
addr = ctypes.c_uint32(addr)
data = ctypes.c_uint32(data)
control = ctypes.c_bool(control)
result = self._lib.NRFJPROG_write_u32_inst(self._handle, addr, data, control)
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
def read_u32(self, addr):
"""
Reads one uint32_t from the given address.
@param int addr: Address to read.
@return int: Value read.
"""
if not is_u32(addr):
raise ValueError('The addr parameter must be an unsigned 32-bit value.')
addr = ctypes.c_uint32(addr)
data = ctypes.c_uint32()
result = self._lib.NRFJPROG_read_u32_inst(self._handle, addr, ctypes.byref(data))
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
return data.value
def write(self, addr, data, control):
"""
Writes data from the array into the device starting at the given address.
@param int addr: Start address of the memory block to write.
@param sequence data: Data to write. Any type that implements the sequence API (i.e. string, list, bytearray...) is valid as input.
@param boolean control: True for automatic control of NVMC by the function.
"""
if not is_u32(addr):
raise ValueError('The addr parameter must be an unsigned 32-bit value.')
if not is_valid_buf(data):
raise ValueError('The data parameter must be a sequence type with at least one item.')
if not is_bool(control):
raise ValueError('The control parameter must be a boolean value.')
addr = ctypes.c_uint32(addr)
data_len = ctypes.c_uint32(len(data))
data = (ctypes.c_uint8 * data_len.value)(*data)
control = ctypes.c_bool(control)
result = self._lib.NRFJPROG_write_inst(self._handle, addr, ctypes.byref(data), data_len, control)
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
def read(self, addr, data_len):
"""
Reads data_len bytes from the device starting at the given address.
@param int addr: Start address of the memory block to read.
@param int data_len: Number of bytes to read.
@return [int]: List of values read.
"""
if not is_u32(addr):
raise ValueError('The addr parameter must be an unsigned 32-bit value.')
if not is_u32(data_len):
raise ValueError('The data_len parameter must be an unsigned 32-bit value.')
addr = ctypes.c_uint32(addr)
data_len = ctypes.c_uint32(data_len)
data = (ctypes.c_uint8 * data_len.value)()
result = self._lib.NRFJPROG_read_inst(self._handle, addr, ctypes.byref(data), data_len)
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
return list(data)
def is_halted(self):
"""
Checks if the device CPU is halted.
@return boolean: True if halted.
"""
is_halted | |
# -*- coding: utf-8 -*-
# Copyright 2019 the HERA Project
# Licensed under the MIT License
"""Module for applying calibration solutions to visibility data, both in memory and on disk."""
import numpy as np
import argparse
import copy
import warnings
from . import io
from . import version
from . import utils
from . import redcal
import pyuvdata.utils as uvutils
from pyuvdata import UVData
def _check_polarization_consistency(data, gains):
'''This fucntion raises an error if all the gain keys are cardinal but none of the data keys are cardinal
(e/n rather than x/y), or vice versa. In the mixed case, or if one is empty, no errors are raised.'''
if (len(data) > 0) and (len(gains) > 0):
data_keys_cardinal = [utils._is_cardinal(bl[2]) for bl in data.keys()]
gain_keys_cardinal = [utils._is_cardinal(ant[1]) for ant in gains.keys()]
if np.all(data_keys_cardinal) and not np.any(gain_keys_cardinal):
raise KeyError("All the data keys are cardinal (e.g. 'nn' or 'ee'), but none of the gain keys are.")
elif np.all(gain_keys_cardinal) and not np.any(data_keys_cardinal):
raise KeyError("All the gain keys are cardinal (e.g. 'Jnn' or 'Jee'), but none of the data keys are.")
def calibrate_redundant_solution(data, data_flags, new_gains, new_flags, all_reds,
old_gains=None, old_flags=None, gain_convention='divide'):
'''Update the calibrtion of a redundant visibility solution (or redundantly averaged visibilities).
This function averages together all gain ratios (old/new) within a redundant group (which should
ideally all be the same) to figure out the proper gain to apply/unapply to the visibilities. If all
gain ratios are flagged for a given time/frequency within a redundant group, the data_flags are
updated. Typical use is to use absolute/smooth_calibrated gains as new_gains, omnical gains as
old_gains, and omnical visibility solutions as data.
Arguments:
data: DataContainer containing baseline-pol complex visibility data. This is modified in place.
data_flags: DataContainer containing data flags. They are updated based on the flags of the
calibration solutions.
new_gains: Dictionary of complex calibration gains to apply with keys like (1,'Jnn')
new_flags: Dictionary with keys like (1,'Jnn') of per-antenna boolean flags to update data_flags
if either antenna in a visibility is flagged. Must have all keys in new_gains.
all_reds: list of lists of redundant baseline tuples, e.g. (0,1,'nn'). Must be a superset of
the reds used for producing cal
old_gains: Dictionary of complex calibration gains to take out with keys like (1,'Jnn').
Default of None implies means that the "old" gains are all 1s. Must be either None or
have all the same keys as new_gains.
old_flags: Dictionary with keys like (1,'Jnn') of per-antenna boolean flags to update data_flags
if either antenna in a visibility is flagged. Default of None all old_gains are unflagged.
Must be either None or have all the same keys as new_flags.
gain_convention: str, either 'divide' or 'multiply'. 'divide' means V_obs = gi gj* V_true,
'multiply' means V_true = gi gj* V_obs. Assumed to be the same for new_gains and old_gains.
'''
_check_polarization_consistency(data, new_gains)
_check_polarization_consistency(data_flags, new_flags)
exponent = {'divide': 1, 'multiply': -1}[gain_convention]
if old_gains is None:
old_gains = {ant: np.ones_like(new_gains[ant]) for ant in new_gains}
if old_flags is None:
old_flags = {ant: np.zeros_like(new_flags[ant]) for ant in new_flags}
# assert that all antennas in new_gains are also in new_flags, old_gains, and old_flags
assert np.all([ant in new_flags for ant in new_gains])
assert np.all([ant in old_gains for ant in new_gains])
assert np.all([ant in old_flags for ant in new_gains])
for red in all_reds:
# skip if there's nothing to calibrate
if np.all([bl not in data for bl in red]):
continue
# Fill in missing antennas with flags
for bl in red:
for ant in utils.split_bl(bl):
if ant not in new_gains:
new_gains[ant] = np.ones_like(list(new_gains.values())[0])
new_flags[ant] = np.ones_like(list(new_flags.values())[0])
if ant not in old_gains:
old_gains[ant] = np.ones_like(list(old_gains.values())[0])
old_flags[ant] = np.ones_like(list(old_flags.values())[0])
# Compute all gain ratios within a redundant baseline
gain_ratios = [old_gains[i, utils.split_pol(pol)[0]] * np.conj(old_gains[j, utils.split_pol(pol)[1]])
/ new_gains[i, utils.split_pol(pol)[0]] / np.conj(new_gains[j, utils.split_pol(pol)[1]])
for (i, j, pol) in red]
# Set flagged values to np.nan for those gain rations
for n, bl in enumerate(red):
ant1, ant2 = utils.split_bl(bl)
gain_ratios[n][new_flags[ant1] | new_flags[ant2] | old_flags[ant1] | old_flags[ant2]] = np.nan
# Average gain ratios using np.nanmean
avg_gains = np.nanmean(gain_ratios, axis=0)
avg_flags = ~np.isfinite(avg_gains)
avg_gains[avg_flags] = 1
# Apply average gains ratios and update flags
for bl in red:
if bl in data:
data_flags[bl] |= avg_flags
data[bl] *= avg_gains**exponent
def calibrate_in_place(data, new_gains, data_flags=None, cal_flags=None, old_gains=None,
gain_convention='divide', flags_are_wgts=False):
'''Update data and data_flags in place, taking out old calibration solutions, putting in new calibration
solutions, and updating flags from those calibration solutions. Previously flagged data is modified, but
left flagged. Missing antennas from either the new gains, the cal_flags, or (if it's not None) the old
gains are automatically flagged in the data's visibilities that involves those antennas.
Arguments:
data: DataContainer containing baseline-pol complex visibility data. This is modified in place.
new_gains: Dictionary of complex calibration gains to apply with keys like (1,'Jnn')
data_flags: DataContainer containing data flags. This is modified in place if its not None.
cal_flags: Dictionary with keys like (1,'Jnn') of per-antenna boolean flags to update data_flags
if either antenna in a visibility is flagged. Any missing antennas are assumed to be totally
flagged, so leaving this as None will result in input data_flags becoming totally flagged.
old_gains: Dictionary of complex calibration gains to take out with keys like (1,'Jnn').
Default of None implies that the data is raw (i.e. uncalibrated).
gain_convention: str, either 'divide' or 'multiply'. 'divide' means V_obs = gi gj* V_true,
'multiply' means V_true = gi gj* V_obs. Assumed to be the same for new_gains and old_gains.
flags_are_weights: bool, if True, treat data_flags as weights where 0s represent flags and
non-zero weights are unflagged data.
'''
_check_polarization_consistency(data, new_gains)
exponent = {'divide': 1, 'multiply': -1}[gain_convention]
# loop over baselines in data
for (i, j, pol) in data.keys():
ap1, ap2 = utils.split_pol(pol)
flag_all = False
# apply new gains for antennas i and j. If either is missing, flag the whole baseline
try:
data[(i, j, pol)] /= (new_gains[(i, ap1)])**exponent
except KeyError:
flag_all = True
try:
data[(i, j, pol)] /= np.conj(new_gains[(j, ap2)])**exponent
except KeyError:
flag_all = True
# unapply old gains for antennas i and j. If either is missing, flag the whole baseline
if old_gains is not None:
try:
data[(i, j, pol)] *= (old_gains[(i, ap1)])**exponent
except KeyError:
flag_all = True
try:
data[(i, j, pol)] *= np.conj(old_gains[(j, ap2)])**exponent
except KeyError:
flag_all = True
if data_flags is not None:
# update data_flags in the case where flags are weights, flag all if cal_flags are missing
if flags_are_wgts:
try:
data_flags[(i, j, pol)] *= (~cal_flags[(i, ap1)]).astype(np.float)
data_flags[(i, j, pol)] *= (~cal_flags[(j, ap2)]).astype(np.float)
except KeyError:
flag_all = True
# update data_flags in the case where flags are booleans, flag all if cal_flags are missing
else:
try:
data_flags[(i, j, pol)] += cal_flags[(i, ap1)]
data_flags[(i, j, pol)] += cal_flags[(j, ap2)]
except KeyError:
flag_all = True
# if the flag object is given, update it for this baseline to be totally flagged
if flag_all:
if flags_are_wgts:
data_flags[(i, j, pol)] = np.zeros_like(data[(i, j, pol)], dtype=np.float)
else:
data_flags[(i, j, pol)] = np.ones_like(data[(i, j, pol)], dtype=np.bool)
def apply_cal(data_infilename, data_outfilename, new_calibration, old_calibration=None, flag_file=None,
flag_filetype='h5', a_priori_flags_yaml=None, flag_nchan_low=0, flag_nchan_high=0, filetype_in='uvh5', filetype_out='uvh5',
nbl_per_load=None, gain_convention='divide', redundant_solution=False, bl_error_tol=1.0,
add_to_history='', clobber=False, redundant_average=False, redundant_weights=None,
freq_atol=1., redundant_groups=1, dont_red_average_flagged_data=False, spw_range=None,
exclude_from_redundant_mode="data", vis_units=None, **kwargs):
'''Update the calibration solution and flags on the data, writing to a new file. Takes out old calibration
and puts in new calibration solution, including its flags. Also enables appending to history.
Arguments:
data_infilename: filename of the data to be calibrated.
data_outfilename: filename of the resultant data file with the new calibration and flags.
new_calibration: filename of the calfits file (or a list of filenames) for the calibration
to be applied, along with its new flags (if any).
old_calibration: filename of the calfits file (or a list of filenames) for the calibration
to be unapplied. Default None means that the input data is raw (i.e. uncalibrated).
flag_file: optional path to file containing flags to be ORed with flags in input data. Must have
the same shape as the data.
flag_filetype: filetype of flag_file to pass into io.load_flags. Either | |
<reponame>CareerVillage/slack-moderation
# -*- coding: utf-8 -*-
from datetime import datetime
import json
import pprint
import re
import requests
import traceback
from django.http import HttpResponse
from accounts.models import AuthToken
from moderations.models import Moderation, ModerationAction
from moderations.utils import timedelta_to_str
from .tasks import async_get_request
pp = pprint.PrettyPrinter(indent=4)
# Code this logic in a separate class that can be overwritten.
PRO_TIP_QUESTIONS = [
"ProTip #1: Is this answer direct?",
"ProTip #2: Is this answer comprehensive?",
"ProTip #3: Does this answer use facts?",
"ProTip #4: Does this answer tell a personal story?",
"ProTip #5: Does this answer recommend next steps?",
"ProTip #6: Does this answer cite sources?",
"ProTip #7: Does this answer strike the right tone?",
"ProTip #8: Does this answer use proper grammar, formatting, and structure?",
"ProTip #9: Does this answer anticipate the student’s needs?",
"ProTip #10: Is this answer concise?",
]
BEST_OF_THE_VILLAGE_THRESHOLD = 9
SUMMARY_TITLE = "ProTip Rating:"
class SlackSdk(object):
@staticmethod
def get_channel_data(channel):
auth_token_object = AuthToken.objects.filter(
service_name='slack', service_entity_auth_name=channel
).first()
if auth_token_object:
channel_id = auth_token_object.service_entity_auth_id
token = auth_token_object.service_auth_token
return token, channel_id
else:
return None, None
@staticmethod
def get_all_messages_of_channel(channel):
token, channel_id = SlackSdk.get_channel_data(channel)
url='https://slack.com/api/conversations.history'
params = {
'token': token,
'channel': channel_id,
}
response = async_get_request(url, params)
return response.json()['messages']
@staticmethod
def post_moderation(text):
attachments = [
{
'fallback': "Moderator actions",
'callback_id': 'mod-inbox',
'attachment_type': 'default',
'actions': [
{
'name': 'approve',
'text': "Approve",
'type': 'button',
'value': 'approve',
'style': 'primary'
},
{
'name': 'reject',
'text': "Reject",
'type': 'button',
'value': 'reject'
}
]
}
]
token, channel_id = SlackSdk.get_channel_data('#mod-inbox')
if channel_id:
response = SlackSdk.create_message(token,
channel_id, text, attachments)
return response.json()
else:
data = {
'success': False,
'message': "{} is not a valid channel or "
"was not previously authorized".format(channel_id)
}
return data
@staticmethod
def post_leaderboard(leaderboard):
"""
leaderboard = [
{'@jared': 12,345},
]
"""
token, channel_id = SlackSdk.get_channel_data('#mod-leaderboard')
def post_leaderboard_on_slack(leaderboard, title, text=''):
if title == 'All Time':
text += (
'```\n'
'ALL TIME LEADERBOARD\n')
else:
text += (
'```\n'
'LAST WEEK LEADERBOARD\n')
text += (
'┌----------------------┬----------------------┐\n'
'│ {0: <20} | {1: <20} │\n'
).format('Mod', title)
sorted_leaderboard = sorted(leaderboard.items(),
key=lambda x: x[1],
reverse=True)
count = 0
for k, v in sorted_leaderboard:
if k and k != 'ModBot':
text += '├----------------------┼----------------------┤\n'
text += '│ {0: <20} │ {1: <20} │\n'.format(k, v)
# Divide the table in multiple messages because it fails if the text/table is too long
count += 1
if count >= 20:
text += '```\n'
SlackSdk.create_message(token, channel_id,
text, [], in_channel=True, async=True)
count = 0
text = '```\n'
text += '└----------------------┴----------------------┘\n'
text += '```\n'
return SlackSdk.create_message(token, channel_id,
text, [], in_channel=True, async=True)
# Post on slack both tables
post_leaderboard_on_slack(leaderboard['all_time'],
'All Time',
"LEADERBOARD as of {date}\n".format(date=datetime.utcnow())
)
post_leaderboard_on_slack(leaderboard['seven_days'],
'Last 7 Days'
)
def avg(a, b):
if b > 0.0:
return a/float(b) * 100.0
else:
return 0
# Post on slack both reports
text = 'MOD TEAM SPEED REPORT AS OF {} UTC\n'.format(datetime.utcnow())
text += '```\n'
text += 'Average time to first mod review (all-time): %s over %i pieces of content\n' \
% (timedelta_to_str(leaderboard['avg']['all_time']['review'][0]),
leaderboard['avg']['all_time']['review'][2])
text += '90th Percentile time to first mod review (all-time): %s over %i pieces of content\n' \
% (timedelta_to_str(leaderboard['avg']['all_time']['review'][1]),
leaderboard['avg']['all_time']['review'][2])
text += 'Average time to first mod review (last 7 days): %s over %i pieces of content\n' \
% (timedelta_to_str(leaderboard['avg']['seven_days']['review'][0]),
leaderboard['avg']['seven_days']['review'][2])
text += '90th Percentile time to first mod review (last 7 days): %s over %i pieces of content\n' \
% (timedelta_to_str(leaderboard['avg']['seven_days']['review'][1]),
leaderboard['avg']['seven_days']['review'][2])
text += 'Average time to first mod resolution (all-time): %s over %i pieces of content\n' \
% (timedelta_to_str(leaderboard['avg']['all_time']['resolution'][0]),
leaderboard['avg']['all_time']['resolution'][1])
text += 'Average time to first mod resolution (last 7 days): %s over %i pieces of content\n' \
% (timedelta_to_str(leaderboard['avg']['seven_days']['resolution'][0]),
leaderboard['avg']['seven_days']['resolution'][1])
text += 'The oldest unmoderated piece of content is from: %s\n' % (leaderboard['last_unmoderated_content_date'])
text += '```\n'
text += 'CONTENT QUALITY REPORT AS OF {} UTC\n'.format(datetime.utcnow())
counts = leaderboard['counts']
text += '```\n'
text += 'Past 7 days content: %i\n' \
% counts['total']
text += 'Past 7 days flagged by mods: %i (%.2f%%)\n' \
% (counts['total_flagged'],
avg(counts['total_flagged'], counts['total']))
text += 'Reason: Off topic: %i (%.2f%% of flags)\n' \
% (counts['off_topic'],
avg(counts['off_topic'], counts['total_flagged']))
text += 'Reason: Inappropriate: %i (%.2f%% of flags)\n' \
% (counts['inappropriate'],
avg(counts['inappropriate'], counts['total_flagged']))
text += 'Reason: Contact info: %i (%.2f%% of flags)\n' \
% (counts['contact_info'],
avg(counts['contact_info'], counts['total_flagged']))
text += 'Reason: Other: %i (%.2f%% of flags)\n' \
% (counts['other'],
avg(counts['other'], counts['total_flagged']))
text += '```\n'
return SlackSdk.create_message(token, channel_id,
text, [], in_channel=True, async=True)
@staticmethod
def create_message(access_token, channel_id,
text='', attachments=[], in_channel=False, async=False):
try:
is_image = False
if 'https://res.cloudinary.com/' in text:
is_image = True
if len(text) >= 3500:
search_text = re.findall(
'^(.* posted the) <(https://.*)\|(.*)>.*:\n',
text
)
if search_text:
new_content_text = search_text[0][0]
link = search_text[0][1]
new_content_type = search_text[0][2]
text = '%s %s. WARNING: this content cannot be displayed, ' \
'please read the complete content <%s|HERE>' \
% (new_content_text, new_content_type, link)
params = {
'token': access_token,
'channel': channel_id,
'text': text,
'attachments': json.dumps(attachments),
'unfurl_links': False,
'unfurl_media': is_image,
}
if in_channel:
params['response_type'] = 'in_channel'
if not async:
return requests.get(url='https://slack.com/api/chat.postMessage', params=params)
else:
return async_get_request(url='https://slack.com/api/chat.postMessage', params=params)
except:
print traceback.format_exe()
@staticmethod
def delete_message(access_token, channel_id, ts):
return async_get_request(url='https://slack.com/api/chat.delete',
params={
'token': access_token,
'ts': ts,
'channel': channel_id,
})
@staticmethod
def update_message(access_token, channel_id, ts,
text='', attachments=[]):
return async_get_request(url='https://slack.com/api/chat.update',
params={
'token': access_token,
'ts': ts,
'channel': channel_id,
'text': text,
'attachments': json.dumps(attachments),
'parse': 'none',
})
def is_answer(text):
return 'posted the' in text and \
'answer' in text and 'in response to' in text
def mod_inbox_approved(data, moderation):
original_message = data.get('original_message')
text = original_message.get('text')
approved_by = data.get('user').get('name')
approved_time = float(data.get('action_ts').split('.')[0])
approved_time = datetime.utcfromtimestamp(approved_time)
approved_time = approved_time.strftime('%Y-%m-%d %I:%M%p')
ts = data.get('message_ts')
attachments = [
{
"fallback": "Please moderate this.",
"text": ":white_check_mark: _Approved by @%s %s UTC_" %
(approved_by, approved_time),
"callback_id": "mod-approved",
"attachment_type": "default",
"mrkdwn_in": [
"text"
]
}
]
token, channel_id = SlackSdk.get_channel_data('#mod-approved')
print 'Channel: ', channel_id
response = SlackSdk.create_message(token, channel_id, text, attachments)
print 'Reponse: ', response.status_code
if response.status_code == 200:
data = response.json()
print 'Data: ', data
if data.get('ok'):
token, channel_id = SlackSdk.get_channel_data('#mod-inbox')
print 'save moderation action'
save_moderation_action(moderation, approved_by, channel_id,
'approve', data.get('ts'))
print 'Delete message -> '
response = SlackSdk.delete_message(token, channel_id, ts)
print 'Response ', response
if is_answer(text):
send_to_approved_advice(data, moderation)
return HttpResponse('')
def send_to_approved_advice(data, moderation):
try:
attachments = []
for question_index, question in enumerate(PRO_TIP_QUESTIONS):
attachment = {
'fallback': "Moderator actions",
'callback_id': 'mod-approved-advice',
'text': question,
'attachment_type': 'default',
'actions': [
{
'name': 'yes',
'text': "Yes",
'type': 'button',
'value': 'pro-tip-yes-{}'.format(question_index),
'style': 'primary'
},
{
'name': 'no',
'text': "No",
'type': 'button',
'value': 'pro-tip-no-{}'.format(question_index)
}
]
}
attachments.append(attachment)
attachment = {
'fallback': "Moderator actions",
'callback_id': 'mod-approved-advice',
'title': SUMMARY_TITLE,
'text': "You have currently positively marked {} out of {} ProTips".format(0, len(PRO_TIP_QUESTIONS)),
'attachment_type': 'default',
}
attachments.append(attachment)
text = data.get('message').get('text')
token, channel_id = SlackSdk.get_channel_data('#approved-advice')
response = SlackSdk.create_message(token, channel_id, text, attachments)
except:
print traceback.format_exc()
def mod_pro_tip(data, moderation, current_question_index, response):
try:
original_message = data.get('original_message')
text = original_message.get('text')
ts = data.get('message_ts')
attachments = []
summary = None
answer_count = 0
yes_count = 1 if response == 'yes' else 0
for question_index, attachment in enumerate(
original_message.get('attachments')):
# count partial results
actions = attachment.get('actions')
if actions:
action = actions[0]
value = action['value']
if 'pro-tip-change' in value:
answer_count += 1
if 'yes' in value:
yes_count += 1
print '--------------------'
pp.pprint(attachment)
if attachment.get('title') == SUMMARY_TITLE:
summary = attachment
if current_question_index == question_index:
if response in ['yes', 'no']:
actions = [
{
'name': 'change',
'text': "Change your response ({})".format(response),
'type': 'button',
'value': 'pro-tip-change-{}-{}'.format(response, question_index),
'style': 'primary'
}
]
else:
actions = [
{
'name': 'yes',
'text': "Yes",
'type': 'button',
'value': 'pro-tip-yes-{}'.format(question_index),
'style': 'primary'
},
{
'name': 'no',
'text': "No",
'type': 'button',
'value': 'pro-tip-no-{}'.format(question_index)
}
]
attachment_data = {
'fallback': "Moderator actions",
'callback_id': 'mod-approved-advice',
'text': PRO_TIP_QUESTIONS[question_index],
'attachment_type': 'default',
'actions': actions,
}
attachments.append(attachment_data)
else:
attachments.append(attachment)
print '-------------------------------'
print 'Attachments: (before lambda)'
pp.pprint(attachments)
attachments = filter(lambda item: item.get('title') != SUMMARY_TITLE,
attachments)
print '-------------------------------'
print 'Attachments: (before summary)'
pp.pprint(attachments)
print '******************** Yes Count:'
print yes_count
print '********* summary (old)'
print summary['text']
summary['text'] = "You have currently positively marked " + str(yes_count) + " out of 10 ProTips"
if answer_count == len(PRO_TIP_QUESTIONS) - 1 and \
response in ['yes', 'no']:
actions = [
{
'name': 'submit',
'text': "Submit your review",
'type': 'button',
'value': 'pro-tip-submit',
'style': 'primary'
}
| |
<reponame>devops786/awx-demo<filename>awx/main/models/rbac.py
# Copyright (c) 2016 Ansible, Inc.
# All Rights Reserved.
# Python
import logging
import threading
import contextlib
import re
# Django
from django.db import models, transaction, connection
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django.utils.translation import ugettext_lazy as _
# AWX
from awx.api.versioning import reverse
from django.contrib.auth.models import User # noqa
from awx.main.models.base import * # noqa
__all__ = [
'Role',
'batch_role_ancestor_rebuilding',
'get_roles_on_resource',
'ROLE_SINGLETON_SYSTEM_ADMINISTRATOR',
'ROLE_SINGLETON_SYSTEM_AUDITOR',
'role_summary_fields_generator'
]
logger = logging.getLogger('awx.main.models.rbac')
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR='system_administrator'
ROLE_SINGLETON_SYSTEM_AUDITOR='system_auditor'
role_names = {
'system_administrator': _('System Administrator'),
'system_auditor': _('System Auditor'),
'adhoc_role': _('Ad Hoc'),
'admin_role': _('Admin'),
'project_admin_role': _('Project Admin'),
'inventory_admin_role': _('Inventory Admin'),
'credential_admin_role': _('Credential Admin'),
'workflow_admin_role': _('Workflow Admin'),
'notification_admin_role': _('Notification Admin'),
'auditor_role': _('Auditor'),
'execute_role': _('Execute'),
'member_role': _('Member'),
'read_role': _('Read'),
'update_role': _('Update'),
'use_role': _('Use'),
}
role_descriptions = {
'system_administrator': _('Can manage all aspects of the system'),
'system_auditor': _('Can view all settings on the system'),
'adhoc_role': _('May run ad hoc commands on an inventory'),
'admin_role': _('Can manage all aspects of the %s'),
'project_admin_role': _('Can manage all projects of the %s'),
'inventory_admin_role': _('Can manage all inventories of the %s'),
'credential_admin_role': _('Can manage all credentials of the %s'),
'workflow_admin_role': _('Can manage all workflows of the %s'),
'notification_admin_role': _('Can manage all notifications of the %s'),
'auditor_role': _('Can view all settings for the %s'),
'execute_role': {
'organization': _('May run any executable resources in the organization'),
'default': _('May run the %s'),
},
'member_role': _('User is a member of the %s'),
'read_role': _('May view settings for the %s'),
'update_role': _('May update project or inventory or group using the configured source update system'),
'use_role': _('Can use the %s in a job template'),
}
tls = threading.local() # thread local storage
def check_singleton(func):
'''
check_singleton is a decorator that checks if a user given
to a `visible_roles` method is in either of our singleton roles (Admin, Auditor)
and if so, returns their full list of roles without filtering.
'''
def wrapper(*args, **kwargs):
sys_admin = Role.singleton(ROLE_SINGLETON_SYSTEM_ADMINISTRATOR)
sys_audit = Role.singleton(ROLE_SINGLETON_SYSTEM_AUDITOR)
user = args[0]
if user in sys_admin or user in sys_audit:
if len(args) == 2:
return args[1]
return Role.objects.all()
return func(*args, **kwargs)
return wrapper
@contextlib.contextmanager
def batch_role_ancestor_rebuilding(allow_nesting=False):
'''
Batches the role ancestor rebuild work necessary whenever role-role
relations change. This can result in a big speedup when performing
any bulk manipulation.
WARNING: Calls to anything related to checking access/permissions
while within the context of the batch_role_ancestor_rebuilding will
likely not work.
'''
batch_role_rebuilding = getattr(tls, 'batch_role_rebuilding', False)
try:
setattr(tls, 'batch_role_rebuilding', True)
if not batch_role_rebuilding:
setattr(tls, 'additions', set())
setattr(tls, 'removals', set())
yield
finally:
setattr(tls, 'batch_role_rebuilding', batch_role_rebuilding)
if not batch_role_rebuilding:
additions = getattr(tls, 'additions')
removals = getattr(tls, 'removals')
with transaction.atomic():
Role.rebuild_role_ancestor_list(list(additions), list(removals))
delattr(tls, 'additions')
delattr(tls, 'removals')
class Role(models.Model):
'''
Role model
'''
class Meta:
app_label = 'main'
verbose_name_plural = _('roles')
db_table = 'main_rbac_roles'
index_together = [
("content_type", "object_id")
]
role_field = models.TextField(null=False)
singleton_name = models.TextField(null=True, default=None, db_index=True, unique=True)
parents = models.ManyToManyField('Role', related_name='children')
implicit_parents = models.TextField(null=False, default='[]')
ancestors = models.ManyToManyField(
'Role',
through='RoleAncestorEntry',
through_fields=('descendent', 'ancestor'),
related_name='descendents'
) # auto-generated by `rebuild_role_ancestor_list`
members = models.ManyToManyField('auth.User', related_name='roles')
content_type = models.ForeignKey(ContentType, null=True, default=None)
object_id = models.PositiveIntegerField(null=True, default=None)
content_object = GenericForeignKey('content_type', 'object_id')
def __unicode__(self):
if 'role_field' in self.__dict__:
return u'%s-%s' % (self.name, self.pk)
else:
return u'%s-%s' % (self._meta.verbose_name, self.pk)
def save(self, *args, **kwargs):
super(Role, self).save(*args, **kwargs)
self.rebuild_role_ancestor_list([self.id], [])
def get_absolute_url(self, request=None):
return reverse('api:role_detail', kwargs={'pk': self.pk}, request=request)
def __contains__(self, accessor):
if type(accessor) == User:
return self.ancestors.filter(members=accessor).exists()
elif accessor.__class__.__name__ == 'Team':
return self.ancestors.filter(pk=accessor.member_role.id).exists()
elif type(accessor) == Role:
return self.ancestors.filter(pk=accessor.pk).exists()
else:
accessor_type = ContentType.objects.get_for_model(accessor)
roles = Role.objects.filter(content_type__pk=accessor_type.id,
object_id=accessor.id)
return self.ancestors.filter(pk__in=roles).exists()
@property
def name(self):
global role_names
return role_names[self.role_field]
@property
def description(self):
global role_descriptions
description = role_descriptions[self.role_field]
content_type = self.content_type
model_name = None
if content_type:
model = content_type.model_class()
model_name = re.sub(r'([a-z])([A-Z])', r'\1 \2', model.__name__).lower()
value = description
if type(description) == dict:
value = description.get(model_name)
if value is None:
value = description.get('default')
if '%s' in value and content_type:
value = value % model_name
return value
@staticmethod
def rebuild_role_ancestor_list(additions, removals):
'''
Updates our `ancestors` map to accurately reflect all of the ancestors for a role
You should never need to call this. Signal handlers should be calling
this method when the role hierachy changes automatically.
'''
# The ancestry table
# =================================================
#
# The role ancestors table denormalizes the parental relations
# between all roles in the system. If you have role A which is a
# parent of B which is a parent of C, then the ancestors table will
# contain a row noting that B is a descendent of A, and two rows for
# denoting that C is a descendent of both A and B. In addition to
# storing entries for each descendent relationship, we also store an
# entry that states that C is a 'descendent' of itself, C. This makes
# usage of this table simple in our queries as it enables us to do
# straight joins where we would have to do unions otherwise.
#
# The simple version of what this function is doing
# =================================================
#
# When something changes in our role "hierarchy", we need to update
# the `Role.ancestors` mapping to reflect these changes. The basic
# idea, which the code in this method is modeled after, is to do
# this: When a change happens to a role's parents list, we update
# that role's ancestry list, then we recursively update any child
# roles ancestry lists. Because our role relationships are not
# strictly hierarchical, and can even have loops, this process may
# necessarily visit the same nodes more than once. To handle this
# without having to keep track of what should be updated (again) and
# in what order, we simply use the termination condition of stopping
# when our stored ancestry list matches what our list should be, eg,
# when nothing changes. This can be simply implemented:
#
# if actual_ancestors != stored_ancestors:
# for id in actual_ancestors - stored_ancestors:
# self.ancestors.add(id)
# for id in stored_ancestors - actual_ancestors:
# self.ancestors.remove(id)
#
# for child in self.children.all():
# child.rebuild_role_ancestor_list()
#
# However this results in a lot of calls to the database, so the
# optimized implementation below effectively does this same thing,
# but we update all children at once, so effectively we sweep down
# through our hierarchy one layer at a time instead of one node at a
# time. Because of how this method works, we can also start from many
# roots at once and sweep down a large set of roles, which we take
# advantage of when performing bulk operations.
#
#
# SQL Breakdown
# =============
# We operate under the assumption that our parent's ancestor list is
# correct, thus we can always compute what our ancestor list should
# be by taking the union of our parent's ancestor lists and adding
# our self reference entry where ancestor_id = descendent_id
#
# The DELETE query deletes all entries in the ancestor table that
# should no longer be there (as determined by the NOT EXISTS query,
# which checks to see if the ancestor is still an ancestor of one
# or more of our parents)
#
# The INSERT query computes the list of what our ancestor maps should
# be, and inserts any missing entries.
#
# Once complete, we select all of the children for the roles we are
# working with, this list becomes the new role list we are working
# with.
#
# When our delete or insert query return that they have not performed
# any work, then we know that our children will also not need to be
# updated, and so we can terminate our loop.
#
#
if len(additions) == 0 and len(removals) == 0:
return
global tls
batch_role_rebuilding = getattr(tls, 'batch_role_rebuilding', False)
if batch_role_rebuilding:
getattr(tls, 'additions').update(set(additions))
getattr(tls, 'removals').update(set(removals))
return
cursor = connection.cursor()
loop_ct = 0
sql_params = {
'ancestors_table': Role.ancestors.through._meta.db_table,
'parents_table': Role.parents.through._meta.db_table,
'roles_table': Role._meta.db_table,
}
# SQLlite has a 1M sql statement limit.. since the django sqllite
# driver isn't letting us | |
return render_template("accountmgmt/updatename.html", openActions=countActions(), firstname=firstname)
if newName == firstname:
flash('that is not a new name...')
return render_template("accountmgmt/updatename.html", openActions=countActions(), firstname=firstname)
# Update the user's name
db.execute("UPDATE users SET firstname = :newName WHERE uuid = :userUUID;", newName=newName, userUUID=userUUID)
session["firstname"] = newName
# log an event in the history DB table: >>logHistory(historyType, action, seconduuid, toolid, neighborhoodid, comment)<<
logHistory("other", "editusername", "", "", "", "")
# show confirmation to user
flash('Name changed successfully.')
return redirect("/manageaccount")
else:
return apology("Misc Error")
@users_bp.route("/updateemail", methods=["GET", "POST"])
@login_required
def updateemail():
db = app.config["database_object"]
'''change your email'''
userUUID = session.get("user_uuid")
miscInfo = db.execute("SELECT firstname, email FROM users WHERE uuid = :userUUID;", userUUID=userUUID)[0]
firstname = miscInfo["firstname"]
oldEmail = miscInfo["email"]
if request.method == "GET":
return render_template("accountmgmt/updateemail.html", openActions=countActions(), firstname=firstname, oldEmail=oldEmail)
else:
formAction = request.form.get("returnedAction")
if formAction == "returnHome":
return redirect("/manageaccount")
elif formAction == "changeEmail":
# get the form data
newEmail = request.form.get("newEmail")
if newEmail == "":
flash('Must enter an email address.')
return render_template("accountmgmt/updateemail.html", openActions=countActions(), firstname=firstname, oldEmail=oldEmail)
if newEmail == oldEmail:
flash("that's the same email address...")
return render_template("accountmgmt/updateemail.html", openActions=countActions(), firstname=firstname, oldEmail=oldEmail)
# Update the user's email
db.execute("UPDATE users SET email = :newEmail WHERE uuid = :userUUID;", newEmail=newEmail, userUUID=userUUID)
#require the user validate the new email address:
authcode = generate_new_authcode(newEmail)
# log an event in the history DB table: >>logHistory(historyType, action, seconduuid, toolid, neighborhoodid, comment)<<
logHistory("other", "editemail", "", "", "", "")
# show confirmation to user
session.clear()
session["firstname"] = firstname
#flash('Email changed successfully.')
return redirect(f"/validateemail?email={newEmail}")
else:
return apology("Misc Error")
@users_bp.route("/deleteaccount", methods=["GET", "POST"])
@login_required
def deleteaccount():
db = app.config["database_object"]
'''confirm account deletion'''
userUUID = session.get("user_uuid")
firstname = session.get("firstname")
if request.method == "GET":
return render_template("/accountmgmt/confirmdelete.html", openActions=countActions(), firstname=firstname)
else:
formAction = request.form.get("returnedAction")
if formAction == "deleteAccount":
# first confirm password...
password = request.form.get("password")
if not password:
return apology("You must enter your password to delete your account", "403")
# current password
getCurrPW = db.execute("SELECT hash FROM users WHERE uuid = :userUUID", userUUID=userUUID)
if len(getCurrPW) != 1 or not check_password_hash(getCurrPW[0]["hash"], password):
return apology("Incorrect password", 403)
# execute the following to "delete" the user. The delete is just a soft delete.
db.execute("UPDATE users SET deleted = 1 WHERE uuid = :userUUID;", userUUID=userUUID)
# log an event in the history DB table: >>logHistory(historyType, action, seconduuid, toolid, neighborhoodid, comment)<<
logHistory("other", "deleteuser", "", "", "", "")
session.clear()
flash('Your account was deleted.')
return apology("to see you go!", "sorry")
elif formAction == "returnHome":
return redirect("/manageaccount")
else:
return apology("Misc Error")
@users_bp.route("/history", methods=["GET", "POST"])
@login_required
def history():
db = app.config["database_object"]
userUUID = session.get("user_uuid")
firstname = session.get("firstname")
'''Show history of all tool and neighborhood related actions of the user'''
if request.method == "GET":
toolhistoryDB = db.execute("SELECT * FROM history WHERE type = 'tool' AND (useruuid = :userUUID OR seconduuid = :userUUID);", userUUID=userUUID)
counter = 1
toolhistory = {}
for event in toolhistoryDB[::-1]:#itearate through in reverse order
refnumber = counter
d = datetime.datetime.strptime(event["timestamp"], '%Y-%m-%d %H:%M:%S')
date = d.strftime('%b %d')
timestamp = d.strftime('%H:%M:%S - %b %d, %Y (UTC)')
action = event["action"]
toolid = event["toolid"]
toolname_db = db.execute("SELECT toolname FROM tools WHERE toolid = :toolid;", toolid=toolid)
if len(toolname_db) != 0:
toolname = toolname_db[0]['toolname']
else:
toolname = "Tool no longer exists: (ID:" + toolid + ")"
comment = event["comment"]
firstuuid = event["useruuid"]
firstuser = ""
seconduuid = event["seconduuid"]
seconduser = ""
if firstuuid != "":
firstuser = db.execute("SELECT username FROM users WHERE uuid = :uuid;", uuid=firstuuid)[0]['username']
if seconduuid != "":
seconduser = db.execute("SELECT username FROM users WHERE uuid = :uuid;", uuid=seconduuid)[0]['username']
description = getDescriptionTool(action, firstuser, seconduser, comment)
counter += 1
info = {"refnumber": refnumber, "date": date, "timestamp": timestamp, "action": action, "toolid": toolid, "toolname": toolname, "comment": description}
toolhistory[refnumber] = info
nbhhistoryDB = db.execute("SELECT * FROM history WHERE type = 'neighborhood' AND (useruuid = :userUUID OR seconduuid = :userUUID);", userUUID=userUUID)
counter = 1
nbhhistory = {}
for event in nbhhistoryDB[::-1]:#itearate through in reverse order
refnumber = counter
d = datetime.datetime.strptime(event["timestamp"], '%Y-%m-%d %H:%M:%S')
date = d.strftime('%b %d')
timestamp = d.strftime('%H:%M:%S - %b %d, %Y (UTC)')
action = event["action"]
neighborhoodid = event["neighborhoodid"]
neighborhoodnameDB = db.execute("SELECT neighborhood FROM neighborhoods WHERE neighborhoodid = :neighborhoodid;", neighborhoodid=neighborhoodid)
if len(neighborhoodnameDB) != 1:
neighborhoodname = "[deleted]"
else:
neighborhoodname = neighborhoodnameDB[0]['neighborhood']
comment = event["comment"]
firstuuid = event["useruuid"]
firstuser = ""
seconduuid = event["seconduuid"]
seconduser = ""
if firstuuid != "":
firstuser = db.execute("SELECT username FROM users WHERE uuid = :uuid;", uuid=firstuuid)[0]['username']
if seconduuid != "":
seconduser = db.execute("SELECT username FROM users WHERE uuid = :uuid;", uuid=seconduuid)[0]['username']
description = getDescriptionNBH(action, firstuser, seconduser, comment)
counter += 1
info = {"refnumber": refnumber, "date": date, "timestamp": timestamp, "action": action, "neighborhoodid": neighborhoodid, "neighborhoodname": neighborhoodname, "comment": description}
nbhhistory[refnumber] = info
return render_template("accountmgmt/history.html", openActions=countActions(), firstname=firstname, toolhistory=toolhistory, nbhhistory=nbhhistory)
else:
return redirect("/manageaccount")
@users_bp.route("/sharelink", methods=["GET", "POST"])
@login_required
def sharelink():
db = app.config["database_object"]
if session.get("user_uuid") is None:
return redirect("/")
userUUID = session.get("user_uuid")
firstname = session.get("firstname")
user_details = db.execute("SELECT * FROM users WHERE uuid = :uuid;", uuid=userUUID)[0]
if request.method == "GET":
link = request.args.get("link")
#requesttype can be "nbh" for neighborhood or "tool" for a tool link
print(link)
if link.find("neighborhood_details") != -1:
requesttype = "nbh"
elif link.find("tool_details") != -1:
requesttype = "tool"
else:
return apology("Misc error")
itemID = link[(link.find("id=") + 3):]
#the ID is the UUID for each item.
if requesttype == "nbh":
#ensure the active user is a member of the neighborhood
accesscheck = db.execute("SELECT * from MEMBERSHIPS where useruuid = :userUUID AND neighborhoodid = :itemID AND banned = 0;", userUUID=userUUID, itemID=itemID)
nbh_exists = db.execute("SELECT * from NEIGHBORHOODS where neighborhoodid = :itemID AND deleted = 0", itemID=itemID)
if len(nbh_exists) == 0:
accesscheck = []#make it an empty list to cancel the action
if len(accesscheck) != 0:
name = nbh_exists[0]["neighborhood"]
elif requesttype == "tool":
#ensure the active user is the tool owner
accesscheck = db.execute("SELECT * from TOOLS where owneruuid = :userUUID AND toolid = :itemID AND deleted = 0", userUUID=userUUID, itemID=itemID)
if len(accesscheck) != 0:
name = accesscheck[0]["toolname"]
if len(accesscheck) == 0:
# no access or doesn't exist
return redirect("/")
#generate the QR code based on the incomming link.
img = qrcode.make(link)
data = io.BytesIO()
img.save(data, "PNG")
encoded_qr_image = base64.b64encode(data.getvalue())
return render_template("general/sharelink.html", openActions=countActions(), firstname=firstname, qrcode_data=encoded_qr_image.decode('utf-8'), type=requesttype, name=name)
else: #post
return redirect("/tools")
@users_bp.route("/passwordrecovery", methods=["GET", "POST"])
def passwordrecovery():
db = app.config["database_object"]
'''Password Recovery'''
if session.get("user_uuid") is not None:
flash("Already logged in...")
return redirect("/manageaccount")
if request.method == "GET":
return render_template("accountmgmt/passwordrecovery.html")
else:
formAction = request.form.get("returnedAction")
if formAction == "resetPW":
# first confirm that the email address is linked to an active user
email = request.form.get("email")
userdeetz = db.execute("SELECT * FROM users WHERE email = :email", email=email)
if len(userdeetz) != 1:
# no email with this account... but don't release this information
return render_template("accountmgmt/passwordrecoverysent.html")
#return apology("email check fail")
# generate new recovery key, and set it to the user
recoverykey = uuid.uuid4().hex
db.execute("UPDATE users SET recoverykey = :key WHERE uuid = :userUUID;", key=recoverykey, userUUID=userdeetz[0]["uuid"])
# send email with authcode
recipients = [userdeetz[0]["email"]]
subject = "ToolShare Password Reset"
message = f'''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN" "http://www.w3.org/TR/REC-html40/loose.dtd">
<html>
<head>
<title style="font-family: arial;">Tool Share - Password Recovery</title>
</head>
<body style="margin: 0; background-color: #d3d3d3; border: 7px solid #d3d3d3;color: #000000;font-family: arial;">
<div style="background-color:#d3d3d3; width: 100%; height:max-content;">
<div style="width: 100%; height: 78px; background-color: #f8f9fa;font-family: arial;">
<a href="https://sharetools.tk" target="_blank" style="text-decoration: none;line-height: 78px;">
<img src='https://i.imgur.com/J5Rhl45.png' alt="icon" style="height: 58px;margin:10px 10px 10px 15px">
<span style="font-size:2.2em;display: inline;color: #cc5500;vertical-align: text-bottom;">ToolShare</span>
</a>
</div>
<div style="padding: 20px 10px 30px 10px; background-color: #ffffff;">
In order to reset the password to your ToolShare account, please click the link below.<br>
<span style="font-size: 0.8em;">If you did not request this password change, please log back in to confirm your account.</span>
<div style="padding: 25px;">
<span style="padding-left: 12px;">
<a href="https://sharetools.tk/changepassword?email={email}&recoverytoken={recoverykey}">Reset my password</a>
</span>
</div>
</div>
<div style="padding: 8px; width: 100%;">
<div style="font-size: 10px; color: #808080; text-align: center; width: 100%;">
#dontbeafoolborrowatool<br>
Copyright 2021 / ToolShare / All Rights Reserved
</div>
</div>
</div>
</body>
</html>
'''
message = message.replace('\n', ' ').replace('\r', ' ')
send_mail(recipients, subject, message)
# redirect back to confirmation
return render_template("accountmgmt/passwordrecoverysent.html")
elif formAction == "returnHome":
return redirect("/login")
else:
return apology("Misc Error")
@users_bp.route("/validatepwchange", methods=["GET", "POST"])
def validatePWchange():
db = app.config["database_object"]
code_timeout_limit = 2#minutes
if session.get("user_uuid") is not None:
flash("Already logged in...")
return redirect("/tools")
if request.method == "GET":
new_email = request.args.get("email")
error = request.args.get("error")
if new_email == "" or new_email == None:
return apology("not found","404")
new_user = db.execute("SELECT * FROM users WHERE email = :email", email=new_email)
if len(new_user) != 1:
| |
import json, zlib, time, random, sys, traceback
PY3 = sys.version_info >= (3,)
PY2 = sys.version_info < (3,)
if PY3:
from urllib.error import HTTPError, URLError
from urllib.request import urlopen, Request
import urllib.request, urllib.parse, urllib.error
from urllib.parse import quote
else:
from urllib2 import HTTPError, URLError, urlopen, Request
from urllib2 import quote
import numpy as np
#from femtocode.definitons import Dataset, ColumnName, Column, Segment, Schema
#from typesystem import Schema
from striped.common import synchronized, Lockable, parse_data
from striped.common import to_str, to_bytes
from striped.common.exceptions import StripedNotFoundException
from .DataCache import DataCache
MAX_URL_LENGTH = 1000
class RGInfoSegment(object):
def __init__(self, data):
self.NEvents = data["NEvents"]
self.FileName = data["FileName"]
self.FileIndex = data.get("FileIndex")
self.BeginEvent = data["BeginEvent"]
class RGInfo(object):
def __init__(self, data):
self._Version = data["_Version"]
self.NEvents = data["NEvents"]
self.RGID = data["RGID"]
self.BeginEventID = data.get("BeginEventID")
self.Segments = [RGInfoSegment(s) for s in data["Segments"]]
self.Profile = data.get("Profile", {}) or {}
self.Metadata = self.Profile # alias
def meta(self, name):
return self.Metadata.get(name)
def __str__(self):
return "RGInfo(RGID:%s, NEvenets:%s, BeginEventID:%s, Version:%s, segments:%s)" % (
self.RGID, self.NEvents, self.BeginEventID, self._Version, len(self.Segments))
class StripedColumnDescriptor(object):
def __init__(self, desc_dict):
#print type(desc_dict), desc_dict
self.Type = desc_dict.get("type")
self.NPType = desc_dict.get("type_np")
self.ConvertToNPType = desc_dict.get("convert_to_np")
self.Depth = desc_dict.get("depth")
self.SizeColumn = desc_dict.get("size_column")
self.ParentArray = desc_dict.get("parent_array")
self.Shape = tuple(desc_dict.get("shape", ()))
#print desc_dict
def __str__(self):
return "StripedColumnDescriptor(depth:%s, type:'%s', parent_array:'%s', size_column:'%s')" % \
(self.Depth, self.Type, self.ParentArray, self.SizeColumn)
@property
def fixedShape(self):
if not self.Shape: return ()
i = len(self.Shape) - 1
while i >= 0:
if self.Shape[i] is None:
return tuple(self.Shape[i+1:])
i -= 1
return tuple(self.Shape)
def asJSON(self):
return json.dumps({
"type": self.Type,
"type_np": self.NPType,
"convert_to_np": self.ConvertToNPType,
"depth": self.Depth,
"size_column": self.SizeColumn,
"shape": self.Shape,
"parent_array": self.ParentArray})
__repr__ = __str__
class SizeColumnDescriptor(object):
def __init__(self):
#print type(desc_dict), desc_dict
self.Type = "int"
self.NPType = "<i8"
self.ConvertToNPType = "i8"
self.Depth = 0
self.SizeColumn = None
self.ParentArray = None
self.Shape = ()
#print desc_dict
@property
def fixedShape(self):
return ()
def __str__(self):
return "SizeColumnDescriptor()"
__repr__ = __str__
class StripedColumn(object):
def __init__(self, client, dataset, name, descriptor=None):
self.Name = name
self.Dataset = dataset
self.DatasetName = dataset.Name
self.UseMetaCache = client.UseMetaCache
self.UseDataCache = client.UseDataCache
if isinstance(descriptor, dict): descriptor = StripedColumnDescriptor(descriptor)
self.Descriptor = descriptor # if preloaded
if self.issize and self.Descriptor is None:
self.Descriptor = SizeColumnDescriptor()
self.Client = client
def __str__(self):
return "StripedColumn(%s)" % (self.Name,)
__repr__ = __str__
@staticmethod
def isSizeName(column_name):
return column_name.endswith(".@size")
@property
def issize(self):
return StripedColumn.isSizeName(self.Name)
isSize = issize
@property
def sizeColumn(self):
#
# returns StripedColumn object for physical size column, e.g.:
# Muon.pt -> Muon.@size
# Muon.pt.@size -> None - menaingless
# EnvInfo.id -> None - None for level 0 columns
#
if self.descriptor.SizeColumn:
return self.Dataset.column(self.descriptor.SizeColumn)
else:
return None
@property
def descriptor(self):
if self.issize: return SizeColumnDescriptor()
if self.Descriptor is None:
data = self.Client.requestWithRetries("./column_desc?ds=%s&column=%s" %
(self.DatasetName, self.Name), bypass_cache=not self.UseMetaCache).read()
self.Descriptor = StripedColumnDescriptor(json.loads(data))
#print "column desc for %s:%s:" % (dataset, column), desc
return self.Descriptor
def _decode_list(self, size, data, depth):
if depth == 0:
return data, size, []
if depth == 1:
out = []
i = 0
for s in size:
out.append(data[i:i+s])
#print len(data[i:i+s].base)
i += s
return out, [], data[i:]
else:
out = []
while size:
n = size[0]
segment, size, data = self._decode_list(size[1:], data, depth-1)
out.append(segment)
return out, size, data
def assembleList(self, data, depth, size):
#print "assembleList(data:%d, depth=%d, size:%d)" % (len(data), depth, len(size))
if depth == 0: return data
out, size, data = self._decode_list(size, data, depth)
#if len(size) or len(data):
# print ("leftover: size:%d data:%d" % (len(size), len(data)))
assert len(size) == 0 and len(data) == 0 # make sure there are no leftovers
return out
def _____stripe(self, rgid, compress=False, assembled = False):
columns = [self]
if assembled:
sc = self.sizeColumn
if sc is not None:
columns.append(sc)
data = self.Client.stripes(self.DatasetName, columns, rgid, compress=compress)
coldata = data[self.Name]
if not assembled or sc is None:
return coldata
return self.assembleList(coldata, 1, # FIXIT !!
data[sc.Name])
def stripeSizeArray(self, rgid, compress=False):
c = self.sizeColumn
if c is None: return None
return c.stripe(rgid, compress=compress)
class StripedDataset(Lockable):
def __init__(self, client, name, preload_columns = []):
self.Client = client
self.UseMetaCache = client.UseMetaCache
self.UseDataCache = client.UseDataCache
self.Name = name
self.Schema = None
self.RGIDs = None
self.ColumnNames = None
self.ColumnsCache = client.ColumnsCache # {column name -> column object} cache
try:
self.columns(preload_columns, include_size_columns=True) # this will preload columns
except StripedNotFoundException:
raise StripedNotFoundException("Not all requested columns found in the dataset")
@property
def exists(self):
try: self.schema()
except StripedNotFoundException:
return False
else:
return True
def schema(self, use_cache=None):
if use_cache is None: use_cache = self.UseMetaCache
#print "use_cache:", use_cache
if not use_cache or not self.Schema:
url = "./dataset_schema?ds=%s" % (self.Name, )
schema = self.Client.requestWithRetries(url, bypass_cache=not use_cache).read()
self.Schema = json.loads(schema)
return self.Schema
@property
def rgids(self, use_cache=None):
#print "dataset.rgids: calling sever %s" % (self.Client.URLHead,)
if use_cache is None: use_cache = self.UseDataCache
if self.RGIDs is None or not use_cache:
rgids = self.Client.requestWithRetries("./rgids?ds=%s" % (self.Name,), bypass_cache=not use_cache).read()
self.RGIDs = json.loads(rgids)
return self.RGIDs
RGINFO_CHUNK_SIZE = 100
def makeRanges(self, lst):
# convert list of integers into list of tuples (i0,i1)
if len(lst) == 0: return []
lst = sorted(lst)
i0 = lst[0]
i1 = i0+1
ranges = []
for i in lst:
if i > i1:
# end of range
ranges.append((i0, i1-1))
i0, i1 = i, i+1
else:
i1 = i + 1
ranges.append((i0, i1-1))
return ranges
def rginfo(self, rgids, use_cache=None):
if use_cache is None: use_cache = self.UseMetaCache
def get_chunk(ranges):
text = ",".join(["%d" % (r0,) if r1 == r0 else "%d:%d" % (r0, r1) for r0, r1 in ranges])
url = "./rginfo?ds=%s&rgids=%s" % (self.Name, text)
#sys.stderr.write("url=%s\n" %(url,))
data = self.Client.requestWithRetries(url, bypass_cache=not use_cache).read()
lst = json.loads(data)
#print type(data[names[0]]), data[names[0]]
return lst
return_list = isinstance(rgids, (list, tuple))
if not isinstance(rgids, (list, tuple)): rgids = [rgids]
out = []
ranges = self.makeRanges(rgids)
#print "ranges:", ranges
for i in range(0, len(ranges), self.RGINFO_CHUNK_SIZE):
out += get_chunk(ranges[i:i+self.RGINFO_CHUNK_SIZE])
if not return_list: out = out[0]
return out
def rginfos(self, rgids):
if isinstance(rgids, int):
rgids = [rgids]
return list(map(RGInfo, self.rginfo(rgids)))
def nevents(self):
return sum([r.NEvents for r in self.rginfos(self.rgids)])
def columnsAndSizes(self, column_names):
# return complete list of all columns, which need to be retrieved to reconstruct given columns set
columns_dict = self.columns(column_names, include_size_columns=True)
return sorted(columns_dict.keys())
@property
def columnNames(self, use_cache=None):
if use_cache is None: use_cache = self.UseMetaCache
if self.ColumnNames is None or not use_cache:
names = self.Client.requestWithRetries("./columns?ds=%s" % (self.Name,), bypass_cache=not use_cache).read()
self.ColumnNames = json.loads(names)
return self.ColumnNames
def column(self, name):
if StripedColumn.isSizeName(name):
return StripedColumn(self.Client, self, name)
return self.ColumnsCache.get(self.Name, name) or self.columns([name])[name]
def sizeColumnFor(self, c):
if c.descriptor.SizeColumn:
return StripedColumn(self.Client, self, c.descriptor.SizeColumn)
else:
return None
def columns(self, names, use_cache = None, include_size_columns = False):
# names list can be long so that the resulting URL will exceed the URL size limit
# split the list into smaller chunks and send them in separate requests
if use_cache is None: use_cache = self.UseMetaCache
def get_chunk(names):
url = "./column_descs?ds=%s&columns=%s" % (self.Name, ",".join(names))
#sys.stderr.write("url=%s\n" %(url,))
data = self.Client.requestWithRetries(url, bypass_cache=not use_cache).read()
data = json.loads(data)
#print type(data[names[0]]), data[names[0]]
return dict(
[(name, StripedColumn(self.Client, self, name, descriptor=desc)) for name, desc in data.items()] )
names = sorted(names) # help the web cache
data_names = []
out_dict = {} # name -> StripedColumn object
uncached = []
for n in names:
cached = self.ColumnsCache.get(self.Name, n)
if cached is None:
uncached.append(n)
else:
out_dict[n] = cached
#sys.stderr.write("remaining columns: %s\n" % (purged_names,))
names = uncached
for n in names:
if StripedColumn.isSizeName(n): # do not ask for size column descriptors
out_dict[n] = StripedColumn(self.Client, self, n, SizeColumnDescriptor())
else:
data_names.append(n)
chunk = []
nchars = 0
for name in data_names:
if len(chunk)+nchars >= MAX_URL_LENGTH - 100: # expected length of the comma-separated list
out_dict.update(get_chunk(chunk))
nchars = 0
chunk=[]
chunk.append(name)
nchars += len(name)
if chunk:
out_dict.update(get_chunk(chunk))
# check if any columns are missing in the dataset
#print out_dict
missing = [cn for cn in names if not cn in out_dict]
if len(missing):
raise KeyError("The following columns are not found in the dataset: %s" % (",".join(missing),))
if include_size_columns:
for cn, cc in list(out_dict.items()):
sc = cc.sizeColumn
if sc is not None: out_dict[sc.Name] = sc
for cn, | |
allocate_public_address(
self,
) -> typing.Optional[typing.Union[builtins.bool, ros_cdk_core.IResolvable]]:
'''Property allocatePublicAddress: Whether to allocate a public network address.'''
result = self._values.get("allocate_public_address")
return typing.cast(typing.Optional[typing.Union[builtins.bool, ros_cdk_core.IResolvable]], result)
@builtins.property
def app_list(
self,
) -> typing.Optional[typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[ros_cdk_core.IResolvable, "RosInstance.AppListProperty"]]]]:
'''Property appList: App list.
This value is only valid when WorkMode is Application.
'''
result = self._values.get("app_list")
return typing.cast(typing.Optional[typing.Union[ros_cdk_core.IResolvable, typing.List[typing.Union[ros_cdk_core.IResolvable, "RosInstance.AppListProperty"]]]], result)
@builtins.property
def auto_renew(
self,
) -> typing.Optional[typing.Union[builtins.bool, ros_cdk_core.IResolvable]]:
'''Property autoRenew: Whether auto renew.'''
result = self._values.get("auto_renew")
return typing.cast(typing.Optional[typing.Union[builtins.bool, ros_cdk_core.IResolvable]], result)
@builtins.property
def instance_charge_type(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''Property instanceChargeType: Instance charge type: PostPaid (default): Pay-As-You-Go PrePaid: Subscription.'''
result = self._values.get("instance_charge_type")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def internet_charge_type(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''Property internetChargeType: Network charge type: PayByTraffic (default): Flow-per-use billing PayByBandwidth: fixed-bandwidth billing This value is only valid when AllocatePublicAddress is true.'''
result = self._values.get("internet_charge_type")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def internet_max_bandwidth_in(
self,
) -> typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]]:
'''Property internetMaxBandwidthIn: Maximum inbound bandwidth of the public network (in Mbps).
Value range: 1-200
Default: 200
This value is only valid when AllocatePublicAddress is true.
'''
result = self._values.get("internet_max_bandwidth_in")
return typing.cast(typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]], result)
@builtins.property
def internet_max_bandwidth_out(
self,
) -> typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]]:
'''Property internetMaxBandwidthOut: Maximum outbound bandwidth of the public network (in Mbps).
Value range: 1-200
Default: 200
This value is only valid when AllocatePublicAddress is true.
'''
result = self._values.get("internet_max_bandwidth_out")
return typing.cast(typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]], result)
@builtins.property
def name(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''Property name: Instance name.'''
result = self._values.get("name")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def period(
self,
) -> typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]]:
'''Property period: Period of subscription.
When PeriodUnit is Week, the value range is 1-4
When PeriodUnit is Month, the value range is 1-9, 12, 24, 36, 48, 60
This value is only valid when InstanceChargeType is PrePaid.
'''
result = self._values.get("period")
return typing.cast(typing.Optional[typing.Union[jsii.Number, ros_cdk_core.IResolvable]], result)
@builtins.property
def period_unit(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''Property periodUnit: Unit of period.
Week or Month.
This value is only valid when InstanceChargeType is PrePaid.
'''
result = self._values.get("period_unit")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def v_switch_id(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''Property vSwitchId: VSwitch id.'''
result = self._values.get("v_switch_id")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "InstanceProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class RosCluster(
ros_cdk_core.RosResource,
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-gws.RosCluster",
):
'''A ROS template type: ``ALIYUN::GWS::Cluster``.'''
def __init__(
self,
scope: ros_cdk_core.Construct,
id: builtins.str,
props: "RosClusterProps",
enable_resource_property_constraint: builtins.bool,
) -> None:
'''Create a new ``ALIYUN::GWS::Cluster``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param props: - resource properties.
:param enable_resource_property_constraint: -
'''
jsii.create(self.__class__, self, [scope, id, props, enable_resource_property_constraint])
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
'''
:param props: -
'''
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "renderProperties", [props]))
@jsii.python.classproperty # type: ignore[misc]
@jsii.member(jsii_name="ROS_RESOURCE_TYPE_NAME")
def ROS_RESOURCE_TYPE_NAME(cls) -> builtins.str:
'''The resource type name for this resource class.'''
return typing.cast(builtins.str, jsii.sget(cls, "ROS_RESOURCE_TYPE_NAME"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrClusterId")
def attr_cluster_id(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: ClusterId: Cluster id
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrClusterId"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrName")
def attr_name(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: Name: Cluster name
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrName"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="rosProperties")
def _ros_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.get(self, "rosProperties"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="clusterType")
def cluster_type(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property:
clusterType: Cluster Type:
gws.s1.standard
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "clusterType"))
@cluster_type.setter
def cluster_type(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "clusterType", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="enableResourcePropertyConstraint")
def enable_resource_property_constraint(self) -> builtins.bool:
return typing.cast(builtins.bool, jsii.get(self, "enableResourcePropertyConstraint"))
@enable_resource_property_constraint.setter
def enable_resource_property_constraint(self, value: builtins.bool) -> None:
jsii.set(self, "enableResourcePropertyConstraint", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="vpcId")
def vpc_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: vpcId: VPC id
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "vpcId"))
@vpc_id.setter
def vpc_id(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "vpcId", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="name")
def name(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: name: Cluster name
'''
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], jsii.get(self, "name"))
@name.setter
def name(
self,
value: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]],
) -> None:
jsii.set(self, "name", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="policy")
def policy(
self,
) -> typing.Optional[typing.Union[ros_cdk_core.IResolvable, "RosCluster.PolicyProperty"]]:
'''
:Property: policy: Cluster policy
'''
return typing.cast(typing.Optional[typing.Union[ros_cdk_core.IResolvable, "RosCluster.PolicyProperty"]], jsii.get(self, "policy"))
@policy.setter
def policy(
self,
value: typing.Optional[typing.Union[ros_cdk_core.IResolvable, "RosCluster.PolicyProperty"]],
) -> None:
jsii.set(self, "policy", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="vSwitchId")
def v_switch_id(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: vSwitchId: VSwitch id
'''
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], jsii.get(self, "vSwitchId"))
@v_switch_id.setter
def v_switch_id(
self,
value: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]],
) -> None:
jsii.set(self, "vSwitchId", value)
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-gws.RosCluster.PolicyProperty",
jsii_struct_bases=[],
name_mapping={
"clipboard": "clipboard",
"local_drive": "localDrive",
"usb_redirect": "usbRedirect",
"watermark": "watermark",
},
)
class PolicyProperty:
def __init__(
self,
*,
clipboard: typing.Union[builtins.str, ros_cdk_core.IResolvable],
local_drive: typing.Union[builtins.str, ros_cdk_core.IResolvable],
usb_redirect: typing.Union[builtins.str, ros_cdk_core.IResolvable],
watermark: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
'''
:param clipboard:
:param local_drive:
:param usb_redirect:
:param watermark:
'''
self._values: typing.Dict[str, typing.Any] = {
"clipboard": clipboard,
"local_drive": local_drive,
"usb_redirect": usb_redirect,
"watermark": watermark,
}
@builtins.property
def clipboard(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: clipboard: Clipboard
'''
result = self._values.get("clipboard")
assert result is not None, "Required property 'clipboard' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def local_drive(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: localDrive: Local drive
'''
result = self._values.get("local_drive")
assert result is not None, "Required property 'local_drive' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def usb_redirect(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: usbRedirect: USB redirect
'''
result = self._values.get("usb_redirect")
assert result is not None, "Required property 'usb_redirect' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def watermark(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: watermark: Watermark
'''
result = self._values.get("watermark")
assert result is not None, "Required property 'watermark' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "PolicyProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-gws.RosClusterProps",
jsii_struct_bases=[],
name_mapping={
"cluster_type": "clusterType",
"vpc_id": "vpcId",
"name": "name",
"policy": "policy",
"v_switch_id": "vSwitchId",
},
)
class RosClusterProps:
def __init__(
self,
*,
cluster_type: typing.Union[builtins.str, ros_cdk_core.IResolvable],
vpc_id: typing.Union[builtins.str, ros_cdk_core.IResolvable],
name: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
policy: typing.Optional[typing.Union[ros_cdk_core.IResolvable, RosCluster.PolicyProperty]] = None,
v_switch_id: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
) -> None:
'''Properties for defining a ``ALIYUN::GWS::Cluster``.
:param cluster_type:
:param vpc_id:
:param name:
:param policy:
:param v_switch_id:
'''
self._values: typing.Dict[str, typing.Any] = {
"cluster_type": cluster_type,
"vpc_id": vpc_id,
}
if name is not None:
self._values["name"] = name
if policy is not None:
self._values["policy"] = policy
if v_switch_id is not None:
self._values["v_switch_id"] = v_switch_id
@builtins.property
def cluster_type(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property:
clusterType: Cluster Type:
gws.s1.standard
'''
result = self._values.get("cluster_type")
assert result is not None, "Required property 'cluster_type' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def vpc_id(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: vpcId: VPC id
'''
result = self._values.get("vpc_id")
assert result is not None, "Required property 'vpc_id' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def name(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: name: Cluster name
'''
result = self._values.get("name")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def policy(
self,
) -> typing.Optional[typing.Union[ros_cdk_core.IResolvable, RosCluster.PolicyProperty]]:
'''
:Property: policy: Cluster policy
'''
result = self._values.get("policy")
return typing.cast(typing.Optional[typing.Union[ros_cdk_core.IResolvable, RosCluster.PolicyProperty]], result)
@builtins.property
def v_switch_id(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: vSwitchId: VSwitch id
'''
result = self._values.get("v_switch_id")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "RosClusterProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class RosInstance(
ros_cdk_core.RosResource,
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-gws.RosInstance",
):
'''A ROS template type: ``ALIYUN::GWS::Instance``.'''
def __init__(
self,
scope: ros_cdk_core.Construct,
id: builtins.str,
props: "RosInstanceProps",
enable_resource_property_constraint: builtins.bool,
) -> None:
'''Create a new ``ALIYUN::GWS::Instance``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param props: - resource properties.
:param enable_resource_property_constraint: -
'''
jsii.create(self.__class__, | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File: rename.py
# Author: ernitron (c) 2017
# Mit License
import os
import sys
import re
Version = "1.4.2"
# To print colored text on term
RED = ''
BLUE = ''
CYAN = ''
GREEN = ''
RESET = ''
BOLD = ''
REV = ''
def color():
global RED, BLUE, CYAN, GREEN, RESET, BOLD, REV
RED = '\033[1;31m'
BLUE = '\033[1;34m'
CYAN = '\033[1;36m'
GREEN = '\033[0;32m'
RESET = '\033[0;0m'
BOLD = '\033[;1m'
REV = '\033[;7m'
def nocolor():
global RED, BLUE, CYAN, GREEN, RESET, BOLD, REV
RED = ''
BLUE = ''
CYAN = ''
GREEN = ''
RESET = ''
BOLD = ''
REV = ''
def skip_name(filename, skip=''):
'''-k skip: strip chars at beginning of filename'''
try: elen = int(skip)
except: elen = len(skip)
return filename[elen:].strip()
def ztrip_name(filename, skip=0):
'''-z skip: strip chars at end of filename'''
try: elen = int(skip)
except: elen = len(skip)
if elen < len(filename):
return filename[:-elen].strip()
return filename
def start_name(filename, start, replace):
'''strip and replace string in filename'''
startlen = 0
filenamelower = filename.lower()
if start and filenamelower.startswith(start.lower()):
startlen = len(start)
return replace + filename[startlen:].strip()
def remove_underscore(filename):
filename = filename.replace('_', ' ')
return filename.replace('-', ' ')
def replace_blank(filename, fill_char='_'):
'''Replace blank or spaces with fill_char (default to '_') '''
return filename.replace(' ', fill_char)
def strip_name(filename):
f = filename.replace(' ', ' ')
return f.strip(' -._\t\n\r')
def space_case(filename):
'''Convert to Title Case inserting spaces where underscore'''
prec = ''
newname = ''
for char in filename:
if char == '_':
newname += ' '
continue
if prec.islower() and char.isupper() :
newname += ' '
newname += char
prec = char
return newname
def camel_case(filename):
'''Convert to CamelCase: from camel_case returns Camel Case'''
#tmpname = filename.replace('_', ' ')
tmpname = filename
prec = ''
newword = ''
for char in tmpname:
if prec.islower() and char.isupper() :
newword += ' '
newword += char
prec = char
tmpname = newword
modified_name = re.findall('[\w]+', tmpname.lower())
newname = ' '.join([word.title() for word in modified_name])
tmpname = newname.replace("L ", "L'")
return tmpname
def replace_content(filename, contains, replace):
'''Replace content with replace string :returns newname '''
if contains and contains in filename:
return filename.replace(contains, replace)
return filename
def delete_string(filename, contains):
'''Delete content in string: returns newname'''
if contains in filename:
return filename.replace(contains, '')
return filename
def lower_case(filename):
'''Lower filename: from NEWNAME returns newname'''
return filename.lower()
def upper_case(filename):
'''Upper filename: from newname returns NEWNAME'''
return filename.upper()
def title_case(filename):
'''Upper filename: from newname returns Newname'''
return filename.title()
def add_number(filename, counter, bottom):
'''Add a sequence 2digit at beginning of filename :returns newname '''
if bottom:
return '%s-%02d' % (filename, counter)
else:
return '%02d-%s' % (counter, filename)
def add_string(filename, string, start=True):
'''Add a string :returns string-newname '''
if start:
return '%s-%s' % (string, filename)
else:
return '%s-%s' % (filename, string)
def substitute(filename, pattern, replace):
if not pattern: return filename
if pattern[-1] == 'i':
flags = re.IGNORECASE
else:
flags = None
try:
spb = pattern.split('/')
return re.sub(spb[1], spb[2], filename, flags=flags)
except:
pass
return re.sub(pattern, replace, filename)
def timestamp_name(filename, newname, bottom):
from time import localtime, strftime
filestat = os.stat(filename)
timestring = strftime("%Y-%m-%d", localtime(filestat.st_mtime))
if bottom:
return f'{newname}-{timestring}'
else:
return f'{timestring}-{newname}'
def swap_name(filename, swap):
'''Swap name like Alfa Beta Gamma -> GAMMA, Alfa, Beta'''
'''Swap name like Alfa Beta-> BETA, Alfa'''
parts = filename.split(swap)
newname = parts[-1].upper() + ','
for part in parts[0:-1] :
part = part.strip(',')
newname += ' ' + part.title()
newname = newname.strip(',')
newname = newname.strip()
return newname
def sanitize_name(filename):
sanitize = """[]()%@"!$^&*,:;></?{}"""
for char in sanitize:
filename = filename.replace(char, '')
return strip_name(filename)
def hash_name(filename, hash='sha256'):
import hashlib
try:
h = hashlib.new(hash)
except:
print (hashlib.algorithms_available)
sys.exit(0)
filename = filename.encode('ascii', 'ignore')
h.update(filename)
return h.hexdigest()
def bulk_rename(a):
'''The loop on current dir to rename files based on requests'''
if a.files:
filelist = a.files
else:
filelist = os.listdir('.')
for filename in filelist:
if a.recursive and os.path.isdir(filename):
os.chdir(filename)
bulk_rename(a)
os.chdir('..')
if a.directory and not os.path.isdir(filename):
continue
if a.regular and not os.path.isfile(filename):
continue
if a.match and not re.match(a.match, filename):
continue
if a.contains and not a.contains in filename:
continue
newname, extension = os.path.splitext(filename)
if a.extlower:
extension = extension.lower()
if a.suffix and not a.suffix in extension:
continue
if a.remove:
remove_filename(filename, a.force, a.yes, a.verbose)
continue
if a.start:
newname = start_name(newname, a.start, a.replace)
if a.skip:
newname = skip_name(newname, a.skip)
if a.delete:
newname = delete_string(newname, a.delete)
if a.contains:
newname = replace_content(newname, a.contains, a.replace)
if a.expression:
newname = substitute(newname, a.expression, a.replace)
if a.camel:
newname = camel_case(newname)
if a.upper:
newname = upper_case(newname)
if a.lower:
newname = lower_case(newname)
if a.title:
newname = title_case(newname)
if a.blank:
newname = replace_blank(newname, fill_char=a.blank)
if a.sanitize:
newname = sanitize_name(newname)
if a.timestamp:
newname = timestamp_name(filename, newname, a.bottom)
if a.number:
newname = add_number(newname, a.number, a.bottom)
a.number += 1
if a.string:
newname = add_string(newname, a.string, a.start)
if a.strip:
newname = strip_name(newname)
if a.ztrip:
newname = ztrip_name(newname, a.ztrip)
if a.under:
newname = remove_underscore(newname)
if a.swap:
newname = swap_name(newname, a.swap)
if a.hash:
newname = hash_name(newname, a.hash)
if a.extension:
extension = a.extension
# Finally do the rename on file or directory
if not newname:
newname = timestamp_name(filename, 'ZZZZ-ToBeDefined', True)
newname = newname + extension
do_rename(filename, newname, a.force, a.yes, a.verbose)
def remove_filename(filename, force, yes, verbose):
if verbose:
print('File to be removed\t=>', CYAN, filename, RESET)
if not yes:
answer = input(f'remove {filename} " ? [y/n] ')
yes = answer.lower() == 'y'
if not yes: return
cwd = os.getcwd()
print('THIS FILE \t=>', GREEN, cwd, filename, RESET)
if yes and force:
try:
# Preserve creation and access time is default
os.unlink(filename)
print('WAS REMOVED \t=>', GREEN, filename, RESET)
except:
print('Cannot remove ', RED, filename, RESET)
else:
print('WILL BE REMOVED \t=>', CYAN, filename, RESET)
def do_rename(filename, newname, force, yes, verbose):
if not newname or filename == newname:
if verbose:
print('Nothing to do for\t=>', RED, filename, RESET)
return
if verbose:
print('File to be renamed\t=>', CYAN, filename, RESET)
if not yes:
answer = input(f'Rename {filename} to "{newname}" ? [y/n] ')
yes = answer.lower() == 'y'
if not yes: return
print('THIS FILE \t=>', GREEN, filename, RESET)
if newname and yes and force:
if os.path.isfile(newname):
print('FILE EXISTS \t=>', RED, newname, RESET)
if not force:
print('CANT RENAME\t=>', RED, newname, RESET)
return
try:
# Preserve creation and access time is default
stat = os.stat(filename)
os.rename(filename, newname)
os.utime(newname, (stat.st_atime, stat.st_mtime))
print('HAS BEEN RENAMED TO\t=>', GREEN, newname, RESET)
except:
print('Cannot rename ', RED, filename, RESET)
else:
print('WILL BE RENAMED TO\t=>', CYAN, newname, RESET)
if __name__ == '__main__':
import argparse
example_text = '''
Examples:
$ rename.py --skip start_of_file --skip 5 --contains This --replace That --number --suffix .mp3 --force
would rename a file like: start_of_file1234_Take_This.mp3
into: 01-Take_That.mp3
$ rename.py -s start_of_file -k 5 -e '/This/That/' -n -x mp3 -F
would do the same
'''
parser = argparse.ArgumentParser(description='rename files', epilog=example_text,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-a', '--string', help='add string')
parser.add_argument('-b', '--blank', help='Replace blank with _', nargs='?', const='_')
parser.add_argument('-c', '--contains', help='check for string in filename; works with -r')
parser.add_argument('-d', '--delete', help='delete string in filename')
parser.add_argument('-e', '--expression', help='pattern with regex')
parser.add_argument('-r', '--replace', help='replace string; works with -c and -p', default='')
parser.add_argument('-s', '--start', help='delete string from beginning of filename')
parser.add_argument('-z', '--ztrip', help='delete n chars from end of filename')
parser.add_argument('-k', '--skip', help='skip n char from start of filename')
parser.add_argument('-n', '--number', type=int, help='Add a 2 digit sequence', nargs='?', const='1')
parser.add_argument('-w', '--swap', help='swap names Alfa Beta->Beta Alfa', nargs='?', const=' ')
parser.add_argument('-ext', '--extension', help='change extension example to .mp3')
parser.add_argument('-exl', '--extlower', action='store_true', help='Transform extension into lower case')
# Applicability
parser.add_argument('--root', help='this will be the root directory', default='./')
parser.add_argument('-m', '--match', help='apply only to file that match pattern')
parser.add_argument('-x', '--suffix', help='apply only to file with suffix like .mp3')
parser.add_argument('-D', '--directory', action='store_true', help='Apply only to directory')
parser.add_argument('-G', '--regular', action='store_true', help='Apply only to regular files')
parser.add_argument('-R', '--recursive', action='store_true', help='Recursive into subdirs')
parser.add_argument('-Y', '--yes', action='store_false', help='Confirm before rename [y/n]')
parser.add_argument('-F', '--force', action='store_true', help='Force to rename (do it!)', default=False)
#parser.add_argument('-f', '--files', help='apply to list of files', nargs='*')
# Other Boolean Flags
parser.add_argument('-_', '--under', action='store_true', help='Remove underscores and minuses', default=False)
parser.add_argument('-B', '--bottom', action='store_true', help='Put number sequence at end')
parser.add_argument('-C', '--camel', action='store_true', help='Transform filename in CamelCase')
parser.add_argument('-L', '--lower', action='store_true', help='Transform filename into lower case')
parser.add_argument('-U', '--upper', action='store_true', help='Transform filename into upper case')
parser.add_argument('-T', '--title', action='store_true', help='Transform into Title case | |
from templates import *
import os
class NedTheoryFrame(EcceFrame):
def __init__(self, parent, title, app, helpURL=""):
EcceFrame.__init__(self, parent, title)
panel = NedTheoryPanel(self, helpURL)
self.Finalize()
class NedTheoryPanel(EccePanel):
def __init__(self,parent,helpURL=""):
EccePanel.__init__(self, parent, helpURL)
# GENERAL SETTING
generalSizer = EcceBoxSizer(self, style = 1, cols = 2)
SCFDirectName = "ES.Theory.SCF.Direct"
SCFDirectLabel = "SCF Computed:"
initGuessName = "ES.Theory.SCF.InitialGuess"
initGuessChoice = ["NWChem Atomic Guess", "Core Hamiltonian"]
if ((EcceGlobals.Category == "SCF") or
(EcceGlobals.Category == "MP") or
(EcceGlobals.Category == "CC")):
SCFDirectChoice = ["Direct", "Semi-Direct"]
SCFDirectDefault = 1
elif (EcceGlobals.Category == "DFT"):
SCFDirectChoice = ["In Core", "Direct"]
SCFDirectDefault = 0
# Start Eric Bylaska's planewave module integration
elif (EcceGlobals.Category == "NWPW"):
SCFDirectName = "ES.Theory.NWPW.Spintype"
SCFDirectLabel = "Spin Type:"
SCFDirectChoice = ["Restricted", "Unrestricted"]
SCFDirectDefault = 0
initGuessName = "ES.Theory.NWPW.InitialGuess"
initGuessChoice = ["LCAO", "LCAO_skip"]
# End Eric Bylaska's planewave module integration
else:
SCFDirectChoice = ["None"]
SCFDirectDefault = 0
self.SCFDirect = EcceComboBox(self,
choices = SCFDirectChoice,
name = SCFDirectName,
default = SCFDirectDefault,
label = SCFDirectLabel)
generalSizer.AddWidget(self.SCFDirect)
self.initGuess = EcceComboBox(self,
choices = initGuessChoice,
name = initGuessName,
default = 0,
label = " Orbital Initial Guess:")
generalSizer.AddWidget(self.initGuess)
self.panelSizer.Add(generalSizer)
# Special logic for DirDyVTST task, as determined by overloading
# the meaning of the RunType value
if EcceGlobals.RunType != "DirDyVTST":
geometrySizer = EcceBoxSizer(self, "Geometry", 2)
self.symmetryTog = EcceCheckBox(self,
label = " Use Available Symmetry",
name = "ES.Theory.UseSymmetry",
default = True,
export = 1)
geometrySizer.AddWidget(self.symmetryTog)
self.symmetryTol = EcceFloatInput(self,
name = "ES.Theory.SymmetryTol",
default = 1e-2,
hardRange = "(0..)",
unit = "Angstroms",
label = "Tolerance:")
geometrySizer.AddWidget(self.symmetryTol)
self.useAutoZ = EcceCheckBox(self,
label = " Use Automatic Z-matrix",
name = "ES.Theory.UseAutoZ",
default = True)
geometrySizer.AddWidget(self.useAutoZ)
self.panelSizer.Add(geometrySizer)
# MEMEORY/DISK LIMIT
if EcceGlobals.Category == "SCF":
memTitle = "Memory/Disk Limits"
else:
memTitle = "Memory Limit"
memorySizer = EcceBoxSizer(self, memTitle, 4)
self.memTog = EcceCheckBox(self,
label = " Memory:",
name = "ES.Theory.SCF.Memory",
default = False)
memorySizer.AddWidget(self.memTog)
self.memSize = EcceSpinCtrl(self,
hardRange = "[0..)",
unit = "Megawords",
name = "ES.Theory.SCF.MemorySize",
default = 2,
export = 1)
memorySizer.AddWidget(self.memSize)
if EcceGlobals.Category == "SCF":
self.diskTog = EcceCheckBox(self,
label = " Disk:",
name = "ES.Theory.SCF.Disk",
default = 0)
memorySizer.AddWidget(self.diskTog)
self.diskSize = EcceSpinCtrl(self,
hardRange = "[0..)",
unit = "Megawords",
name = "ES.Theory.SCF.DiskSize",
default = 64,
export = 1)
memorySizer.AddWidget(self.diskSize)
self.panelSizer.Add(memorySizer)
# SCF CONVERGENCE
scfSizer = EcceBoxSizer(self, "SCF Convergence", 2)
scfLeftSizer = EcceVBoxSizer()
scfRightSizer = EcceVBoxSizer()
if (EcceGlobals.Category == "SCF" or
EcceGlobals.Category == "MP" or
EcceGlobals.Category == "CC"):
algorithmChoice = ["Quadratic", "Conjugate Gradient"]
elif (EcceGlobals.Category == "DFT"):
algorithmChoice = ["DIIS", "Iterative SCF"]
# Start Eric Bylaska's planewave module integration
elif (EcceGlobals.Category == "NWPW"):
algorithmChoice = ["CG", "LMBFGS"]
# End Eric Bylaska's planewave module integration
else:
algorithmChoice = ["None"]
self.algorithm = EcceComboBox(self,
choices = algorithmChoice,
name = "ES.Theory.SCF.ConvergenceAlgorithm",
default = 0,
label = "Algorithm:")
scfRightSizer.AddWidget(self.algorithm,
border = EcceGlobals.BorderDefault)
if (EcceGlobals.Category == "SCF" or
EcceGlobals.Category == "DFT"):
if (EcceGlobals.Category == "SCF"):
gradientDefault = 1e-4
elif (EcceGlobals.Category == "DFT"):
gradientDefault = 5e-4
self.gradient = EcceFloatInput(self,
default = gradientDefault,
name = "ES.Theory.SCF.ConvergenceGradient.Value",
softRange = "[1e-10..1e-2]",
hardRange = "(0..)",
unit = "Hartree",
label = "Gradient:")
scfLeftSizer.AddWidget(self.gradient,
border = EcceGlobals.BorderDefault)
if (EcceGlobals.Category == "DFT"):
self.deltaDensity = EcceFloatInput(self,
default = 1e-5,
name = "ES.Theory.SCF.ConvergenceDensity.Value",
softRange = "[1e-10..1e-2]",
hardRange = "(0..)",
label = "Delta Density:")
scfRightSizer.AddWidget(self.deltaDensity,
border = EcceGlobals.BorderDefault)
self.deltaEnergy = EcceFloatInput(self,
default = 1e-6,
name = "ES.Theory.SCF.ConvergenceEnergy.Value",
softRange = "[1e-10..1e-2]",
hardRange = "(0..)",
unit = "Hartree",
label = "Delta Energy:")
scfRightSizer.AddWidget(self.deltaEnergy,
border = EcceGlobals.BorderDefault)
if (EcceGlobals.Category != "DFT"):
maxIterDefault = 20
else:
maxIterDefault = 30
self.maxIter = EcceSpinCtrl(self,
hardRange = "[0..)",
name = "ES.Theory.SCF.ConvergenceIterations",
default = maxIterDefault,
label = "Max. Iterations:")
scfLeftSizer.AddWidget(self.maxIter,
border = EcceGlobals.BorderDefault)
# Start Eric Bylaska's planewave module integration
if (EcceGlobals.Category == "NWPW"):
self.deltaDensity = EcceFloatInput(self,
default = 1e-7,
name = "ES.Theory.NWPW.ConvergenceDensity.Value",
softRange = "[1e-10..1e-2]",
hardRange = "(0..)",
label = "Delta Density:")
scfLeftSizer.AddWidget(self.deltaDensity,
border = EcceGlobals.BorderDefault)
self.deltaEnergy = EcceFloatInput(self,
default = 1e-7,
name = "ES.Theory.NWPW.ConvergenceEnergy.Value",
softRange = "[1e-10..1e-2]",
hardRange = "(0..)",
unit = "Hartree",
label = "Delta Energy:")
scfLeftSizer.AddWidget(self.deltaEnergy,
border = EcceGlobals.BorderDefault)
# End Eric Bylaska's planewave module integration
if (EcceGlobals.Category == "DFT"):
dampSizer = EcceHBoxSizer()
self.dampTog = EcceCheckBox(self,
label = " Damping Percentage:",
name = "ES.Theory.SCF.Damping",
default = False)
dampSizer.AddWidget(self.dampTog)
self.dampSize = EcceSpinCtrl(self,
default = 70,
name = "ES.Theory.SCF.DampingValue",
hardRange = "[0..100]",
export = 1)
dampSizer.AddWidget(self.dampSize)
scfRightSizer.AddWidget(dampSizer,
border = EcceGlobals.BorderDefault)
levelSizer = EcceHBoxSizer()
self.levelShiftTog = EcceCheckBox(self,
label = " Level Shift Size:",
name = "ES.Theory.SCF.LevelShift",
default = False)
levelSizer.AddWidget(self.levelShiftTog)
if (EcceGlobals.Category != "DFT"):
levelShiftSizeDefault = 20
else:
levelShiftSizeDefault = 0.5
self.levelShiftSize = EcceFloatInput(self,
default = levelShiftSizeDefault,
name = "ES.Theory.SCF.LevelShiftSize",
hardRange = "[0..)",
unit = "Hartree",
label = "",
export = 1)
levelSizer.AddWidget(self.levelShiftSize)
scfRightSizer.AddWidget(levelSizer,
border = EcceGlobals.BorderDefault)
if (EcceGlobals.Category == "SCF" or
EcceGlobals.Category == "MP" or
EcceGlobals.Category == "CC"):
newLevelSizer = EcceHBoxSizer()
self.newLevelTog = EcceCheckBox(self,
label = " New Level Shift After:",
name = "ES.Theory.SCF.NewLevelShift",
default = False)
newLevelSizer.AddWidget(self.newLevelTog)
self.newLevelCross = EcceFloatInput(self,
default = 0.5,
name = "ES.Theory.SCF.NewLevelShiftCrossover",
hardRange = "[0..)",
unit = "Hartree",
label = "",
export = 1)
newLevelSizer.AddWidget(self.newLevelCross)
scfRightSizer.AddWidget(newLevelSizer,
border = EcceGlobals.BorderDefault)
self.newLevelSize = EcceFloatInput(self,
default = 0,
name = "ES.Theory.SCF.NewLevelShiftSize",
hardRange = "[0..)",
unit = "Hartree",
label = " Size:")
scfRightSizer.AddWidget(self.newLevelSize,
border = EcceGlobals.BorderDefault)
scfSizer.AddWidget(scfLeftSizer,
flag = wx.ALL)
scfSizer.AddWidget(scfRightSizer,
flag = wx.ALL)
self.panelSizer.Add(scfSizer)
# Start <NAME>laska's planewave module integration
if (EcceGlobals.RunType != "DirDyVTST" and
EcceGlobals.Category != "NWPW"):
# End Eric Bylaska's planewave module integration
sovSizer = EcceBoxSizer(self,
label = "Solvation",
cols = 2)
sovLeftSizer = EcceVBoxSizer()
sovRightSizer = EcceVBoxSizer()
self.useCosmo = EcceCheckBox(self,
label = " Use COSMO Solvation",
name = "ES.Theory.SCF.UseCosmo",
default = False)
sovLeftSizer.AddWidget(self.useCosmo,
border = EcceGlobals.BorderDefault)
self.cosmoRadius = EcceFloatInput(self,
default = 0.5,
name = "ES.Theory.SCF.SolventRadius",
label = "Solvent Radius:",
hardRange = "[0..)",
unit = "Angstroms")
sovLeftSizer.AddWidget(self.cosmoRadius,
border = EcceGlobals.BorderDefault)
solventChoice = ["Water: 78.4 Debye",
"Methanol: 33.0 Debye",
"Cyclohexane: 2.0243 Debye",
"Benzene: 2.2825 Debye",
"Acetonitrile: 36.64 Debye",
"Other"]
self.solvent = EcceComboBox(self,
choices = solventChoice,
name = "ES.Theory.SCF.Solvent",
label = "Solvent:",
default = 0)
sovRightSizer.AddWidget(self.solvent,
border = EcceGlobals.BorderDefault)
self.cosmoDielec = EcceFloatInput(self,
default = 78.4,
name = "ES.Theory.SCF.Dielectric",
label = "Dielectric Constant:",
hardRange = "(0..)",
unit = "Debye")
sovRightSizer.AddWidget(self.cosmoDielec,
border = EcceGlobals.BorderDefault)
sovSizer.AddWidget(sovLeftSizer,
flag = wx.ALL)
sovSizer.AddWidget(sovRightSizer,
flag = wx.ALL)
self.panelSizer.Add(sovSizer)
# THEORY OPTIONS MP
if EcceGlobals.Category == "MP":
mpSizer = EcceBoxSizer(self,
label = "Theory Options - MP",
cols = 1)
self.tightConv = EcceCheckBox(self,
label = " Use Tight Convergence",
name = "ES.Theory.MP.UseTightConvergence",
default = False)
mpSizer.AddWidget(self.tightConv)
self.panelSizer.Add(mpSizer)
# THEORY OPTIONS CC
if EcceGlobals.Category == "CC":
ccSizer = EcceBoxSizer(self,
label = "Theory Options - CC",
cols = 2)
self.deltaEnergyCC = EcceFloatInput(self,
default = 1e-6,
name = "ES.Theory.CC.ConvergenceEnergy.Value",
softRange = "[1e-12..1e-2]",
hardRange = "[0..)",
unit = "Hartree",
label = "Delta Energy:")
ccSizer.AddWidget(self.deltaEnergyCC)
self.maxIterCC = EcceSpinCtrl(self,
default = 20,
name = "ES.Theory.CC.ConvergenceIterations",
hardRange = "[0..)",
label = "Max. Iterations:")
ccSizer.AddWidget(self.maxIterCC)
self.panelSizer.Add(ccSizer)
# Start <NAME>'s planewave module integration
# THEORY OPTIONS NWPW
if EcceGlobals.Category == "NWPW":
nwpwSizer = EcceBoxSizer(self,
label = "Theory Options - NWPW",
cols = 2)
nwpwLeftSizer = EcceVBoxSizer()
nwpwRightSizer = EcceVBoxSizer()
xcFuncChoice = ["lda",
"pbe96",
"revpbe",
"blyp",
"hf",
"pbe0",
"revpbe0",
"lda-sic",
"pbe96-sic",
"revpbe-sic",
"lda-0.5sic",
"pbe96-0.5sic",
"revpbe-0.5sic",
"lda-0.4sic",
"pbe96-0.4sic",
"revpbe-0.4sic"]
xcFuncDefault = 0
self.xcFunc = EcceComboBox(self,
choices = xcFuncChoice,
name = "ES.Theory.NWPW.XCFunctionals",
default = xcFuncDefault,
label = "Exchange-Correlation:")
nwpwLeftSizer.AddWidget(self.xcFunc)
cutoffSizer = EcceHBoxSizer()
self.usecutoff = EcceCheckBox(self,
label = "Cutoff Energy:",
name = "ES.Theory.NWPW.UseCutoff",
default = False)
cutoffSizer.AddWidget(self.usecutoff)
self.cutoff = EcceFloatInput(self,
default = 30.0,
name = "ES.Theory.NWPW.Cutoff",
unit = "Hartree",
hardRange = "[0..)",
label = "",
export = 1)
cutoffSizer.AddWidget(self.cutoff)
nwpwRightSizer.AddWidget(cutoffSizer, border=0)
npDimensionsSizer = EcceHBoxSizer()
self.useNpDimensions = EcceCheckBox(self,
label = "np_dimensions",
name = "ES.Theory.NWPW.UseNpDimensions",
default = False)
npDimensionsSizer.AddWidget(self.useNpDimensions)
self.npDimensionsRows = EcceSpinCtrl(self,
hardRange = "[-1..)",
name = "ES.Theory.NWPW.NpDimensionsRows",
default = -1,
label = "Rows:")
npDimensionsSizer.AddWidget(self.npDimensionsRows)
nwpwLeftSizer.AddWidget(npDimensionsSizer)
self.npDimensionsCols = EcceSpinCtrl(self,
hardRange = "[-1..)",
name = "ES.Theory.NWPW.NpDimensionsCols",
default = -1,
label = "Columns:")
nwpwRightSizer.AddWidget(self.npDimensionsCols)
rcutSizer = EcceHBoxSizer()
self.usercut = EcceCheckBox(self,
label = "Ewald rcut:",
name = "ES.Theory.NWPW.UseRcut",
default = False)
rcutSizer.AddWidget(self.usercut)
self.rcut = EcceFloatInput(self,
default = 0.0,
name = "ES.Theory.NWPW.EwaldRcut",
hardRange = "[0..)",
label = "")
rcutSizer.AddWidget(self.rcut)
nwpwLeftSizer.AddWidget(rcutSizer)
ncutSizer = EcceHBoxSizer()
self.usencut = EcceCheckBox(self,
label = "Ewald ncut:",
name = "ES.Theory.NWPW.UseNcut",
default | |
will be used!
"""
print 'writing an Aurelia User Info File:\n ', fileName
print 'We always use the following format for the User Info Files:'
print ' # 8.17 NH 7 2FMR'
print ' # ppm atomname residuenumber segid'
print ' segid should contain 4 letters or 4 spaces'
auihandle = TextFile.TextFile(fileName, 'w')
for EACH in self.atomlist:
#those with 999.000 don't have an assignment:
if EACH.shift != '999.000':
if EACH.segid == None:
outsegid = ' ' #4 spaces
else:
outsegid = EACH.segid
auihandle.write('# ' + EACH.shift + ' ' +\
EACH.atomname[0] + ' ' +\
EACH.residuenumber +\
outsegid + '\n')
def WriteAureliaUserInfo(self, fileName):
"""
writes an Aurelia User Info File
Does not use the chemical shifts with values 999.000
if there are more than one atomnames in the tuple, only the
first will be used!
"""
print 'writing an Aurelia User Info File:\n ', fileName
print 'We always use the following format for the User Info Files:'
print ' # 8.17 NH 7 2FMR'
print ' # ppm atomname residuenumber segid'
print ' segid should contain 4 letters or 4 spaces'
auihandle = TextFile.TextFile(fileName, 'w')
for EACH in self.atomlist:
#those with 999.000 don't have an assignment:
if EACH.shift != '999.000':
if EACH.segid == None:
outsegid = ' ' #4 spaces
else:
outsegid = EACH.segid
auihandle.write('# ' + EACH.shift + ' ' +\
EACH.atomname[0] + ' ' +\
EACH.residuenumber +\
outsegid + '\n')
def _Write_BioMagResBank_Polymer_Saveframe( self, fileHandle ):
"""write out the amino acid sequence in the future"""
## Number of residues in residue loop per line
## Please specify as a real
number_residues_per_line = 5.0
number_residues_per_line_1Letter = 20.0
## Tuple of three letter codes
aaSequence = ()
aaSequence1Letter = ""
oldResidueNumber = -999
aaSequenceCount = 0
tmpStr = """
##############################
# Monomeric polymers #
##############################
save_my_protein
_Saveframe_category monomeric_polymer
_Mol_type polymer
_Mol_polymer_class protein
"""
fileHandle.write( tmpStr )
for EACHR in self.residuelist:
aaSequenceCount = aaSequenceCount + 1
aaSequence = aaSequence + ( EACHR, )
aaSequence1Letter = aaSequence1Letter + AminoAcid.AminoAcid( EACHR )[0]
oldResidueNumber = aaSequenceCount
if ( math.modf( ( aaSequenceCount )/
number_residues_per_line_1Letter )[0] == 0.0 ):
aaSequence1Letter = aaSequence1Letter + '\n'
if ( aaSequence == '' ):
print 'ERROR: aminoacid sequence not known from input data!'
else:
resLoop = """
##############################
# Polymer residue sequence #
##############################
"""
resLoop = resLoop + " _Residue_count %s\n" % aaSequenceCount
resLoop = resLoop + " _Mol_residue_sequence\n;\n"
fileHandle.write( resLoop + aaSequence1Letter + "\n;\n\n" )
resLoop = """
loop_
_Residue_seq_code
_Residue_label
"""
iii = 1
for eachR in aaSequence:
resLoop = resLoop + ' %3d %3s ' % ( iii, eachR )
if math.modf( (iii)/number_residues_per_line )[0] == 0.0:
resLoop = resLoop + '\n '
iii = iii + 1
resLoop = resLoop + '\n\n stop_\n'
fileHandle.write( resLoop )
fileHandle.write("\n\nsave_\n\n")
def _WriteBioMagResBank_Molecular_System_Saveframe( self, fileHandle ):
tmpStr = """
##################################
# Molecular system description #
##################################
save_my_system
_Saveframe_category molecular_system
_Mol_system_name 'my system'
loop_
_Mol_system_component_name
_Mol_label
"my protein" $my_protein
stop_
save_
"""
fileHandle.write( tmpStr )
def WriteBioMagResBank(self, fileName):
"""
writes a file for BioMagResBank deposition
Does not use the chemical shifts with values 999.000
if there are more than one atomnames in the tuple, only the
first will be used!
"""
print 'Writing the BioMagResBank STAR formatted file', fileName
bioHandle = open(fileName, 'w')
# Header for STAR format
bioHandle.write( "data_chemical_shift_set\n" )
# Write a generic molecular system saveframe
self._WriteBioMagResBank_Molecular_System_Saveframe( bioHandle )
# Write the sequence in a polymer saveframe
self._Write_BioMagResBank_Polymer_Saveframe( bioHandle )
tmpStr = \
"""
###################################
# Assigned chemical shift lists #
###################################
###################################################################
# Chemical Shift Ambiguity Index Value Definitions #
# #
# Index Value Definition #
# #
# 1 Unique #
# 2 Ambiguity of geminal atoms or geminal methyl #
# proton groups #
# 3 Aromatic atoms on opposite sides of the ring #
# (e.g. Tyr HE1 and HE2 protons) #
# 4 Intraresidue ambiguities (e.g. Lys HG and #
# HD protons) #
# 5 Interresidue ambiguities (Lys 12 vs. Lys 27) #
# 9 Ambiguous, specific ambiguity not defined #
# #
###################################################################
save_chemical_shift_set
_Saveframe_category assigned_chemical_shifts
"""
# Write some comments
tmpStr = tmpStr + \
"""
_Details
;
Derived from the file: """
tmpStr = tmpStr + self.fileName + """
;
_Mol_system_component_name "my protein"
"""
bioHandle.write(tmpStr)
## Chemical shift table
## SegId left out for now
commentsForPpm = """
loop_
_Atom_shift_assign_ID
_Residue_seq_code
_Residue_label
_Atom_name
_Atom_type
_Chem_shift_value
_Chem_shift_value_error
_Chem_shift_ambiguity_code
"""
bioHandle.write(commentsForPpm)
atomShiftAssignCounter = 1
for EACH in self.atomlist:
if EACH.shift != '999.000' and EACH.shift != None:
# segid
if EACH.segid:
outSEGID = EACH.segid
else:
outSEGID = '.' # . instead of whitespace or None
# residue type
if EACH.aminoacid:
outAA = EACH.aminoacid
else:
outAA = '.' # . instead of whitespace or None
#for the ambiguity: #TODO : ambiguity in STAR files!!!
## if len(EACH.atomname) > 1:
## ambiguity = 9
## else:
## ambiguity = 1
ambiguity = "."
if EACH.shifterror:
outError = EACH.shifterror
else:
outError = '.' # . instead of whitespace or None
## All upper case please
tmpAtomName = EACH.atomname[0][:]
tmpAtomName = string.upper( tmpAtomName )
## Convert CNS pseudo atoms to IUPAC, e.g. LEU HD1# -> MD1
## or leave untouched if they are not CNS pseudo atom names
## for the standard amino acids and nucleic acids
tmpAtomName = Nomenclature.Convert_PseudoAtomName_CNS_2_IUPAC(
EACH.aminoacid, tmpAtomName )
## Convert IUPAC atoms of a methyl group it's constituing pseudo atom
## E.g. LEU HD11 -> MD1
## This does NOT convert anything else than methyl and
## amino groups since the atoms in these groups can not be assigned
## individually.
## The first argument is the type of pseudo atom as defined in the library
## file originating from the AQUA software.
tmpAtomName = Nomenclature.Convert_IUPAC_AtomName_2_PseudoAtomName(
2, EACH.aminoacid, tmpAtomName )
## Convert IUPAC pseudo atoms to BMRB, e.g. LEU MD1 -> HD1
## This does NOT convert anything else than methyl and
## amino groups. I.e. it doesn't convert methylene and some
## other groups. Those need multiple rows in BMRB
## chemical shift table.
tmpAtomName = Nomenclature.Convert_AtomName_IUPAC_2_BMRB_ChemShift(
EACH.aminoacid, tmpAtomName )
# Take care of special characters in STAR
if ( re.compile("#").match(tmpAtomName, 1) ):
tmpAtomName = "'%s'" % tmpAtomName
elif ( re.compile("\"").match(tmpAtomName, 1) ):
tmpAtomName = "'%s'" % tmpAtomName
if ( re.compile("'").match(tmpAtomName, 1) ):
tmpAtomName = "\"%s\"" % tmpAtomName
## Note that taking the atom nucleus from the first char is
## not garanteed to always work!
nucleus_type = string.upper( EACH.atomname[0][0] )
outString = " %6s %4s %3s %-4s %-2s %7s %6s %s\n" %\
( atomShiftAssignCounter,
## outSEGID,
EACH.residuenumber,
outAA,
tmpAtomName,
nucleus_type,
EACH.shift,
outError,
ambiguity )
bioHandle.write(outString)
atomShiftAssignCounter = atomShiftAssignCounter + 1
lessComments = """
stop_
"""
## Just leave it out for now if it's not used any way.
moreComments = """
# The following loop is used to define sets of Atom-shift assignment IDs that
# represent related ambiguous assignments taken from the above list of
# assigned chemical shifts. Each element in the set should be separated by a
# comma, as shown in the example below, and is the assignment ID for a chemical
# shift assignment that has been given as ambiguity code of 4 or 5. Each set
# indicates that the observed chemical shifts are related to the defined
# atoms, but have not been assigned uniquely to a specific atom in the set.
loop_
_Atom_shift_assign_ID_ambiguity
#
# Sets of Atom-shift Assignment Ambiguities
#
# ------------------------------------------
# Example: 5,4,7
#
@
stop_
"""
bioHandle.write(lessComments)
# write out a tail for STAR format
bioHandle.write("\nsave_\n\n")
bioHandle.close()
def WriteChem(self, fileName):
"""
writes a .chem list which can be read by cns
Does not use the chemical shifts with values 999.000
if there are more than one atomnames in the tuple, only the
first will be used!
"""
print 'writing a .chem file', fileName
chemhandle = TextFile.TextFile(fileName, 'w')
chemhandle.write('! derived from the file:\n')
chemhandle.write('! ' + self.fileName + '\n')
for EACH in self.atomlist:
#those with 999.000 don't have an assignment:
if EACH.shift and EACH.shift | |
<filename>src/Tools/CodeGenerator/Plugins/SharedLibraryPlugin.py
# ----------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License
# ----------------------------------------------------------------------
"""Contains the Plugin object"""
import copy
import itertools
import os
import sys
import textwrap
from collections import OrderedDict
import six
import CommonEnvironment
from CommonEnvironment.CallOnExit import CallOnExit
from CommonEnvironment import FileSystem
from CommonEnvironment import Interface
from CommonEnvironment import StringHelpers
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
sys.path.insert(0, os.path.join(_script_dir, ".."))
with CallOnExit(lambda: sys.path.pop(0)):
from Plugin import Plugin as PluginBase, TypeVisitor as TypeVisitorBase
# ----------------------------------------------------------------------
@Interface.staticderived
class Plugin(PluginBase):
# ----------------------------------------------------------------------
# | Properties
Name = Interface.DerivedProperty("SharedLibrary")
Description = Interface.DerivedProperty(
"Generates code used during the Shared Library import/export layer interfacing with the shared C++ functionality",
)
# ----------------------------------------------------------------------
# | Methods
@staticmethod
@Interface.override
def Generate(
open_file_func,
global_custom_structs,
global_custom_enums,
data,
output_dir,
status_stream,
):
result_code = 0
status_stream.write("Preprocessing data...")
with status_stream.DoneManager():
# Convert the types into the corresponding C types that will be used
# in the Shared Library interface.
c_data = []
for items in data:
c_data.append([CData(item, global_custom_structs, global_custom_enums) for item in items])
status_stream.write("Generating Common Files...")
with status_stream.DoneManager() as this_dm:
this_dm.result = _GenerateCommonFiles(open_file_func, output_dir, this_dm.stream)
if this_dm.result != 0:
return this_dm.result
for desc, func in [
("Generating .h files...", _GenerateHeaderFile),
("Generating .cpp files...", _GenerateCppFile),
# TODO: ("Generating .cpp wrappers...", _GenerateCppFile),
]:
status_stream.write(desc)
with status_stream.DoneManager(
suffix="\n",
) as dm:
for index, (items, items_c_data) in enumerate(zip(data, c_data)):
dm.stream.write(
"Processing '{}' ({} of {})...".format(
items[0].name,
index + 1,
len(data),
),
)
with dm.stream.DoneManager() as this_dm:
this_dm.result = func(
open_file_func,
output_dir,
items,
items_c_data,
this_dm.stream,
)
if dm.result < 0:
return dm.result
result_code = result_code or dm.result
return result_code
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def _CreateInterfaceSubstitutionDict(item, c_data):
template = getattr(item, "template", None)
if template is None:
suffix = "_"
type_desc = ""
cpp_template_suffix = "" if not item.featurizer_is_a_template else "<>"
else:
templates = []
suffix = "_{}_".format(template)
type_desc = " <{}>".format(template)
cpp_templates = []
if item.is_input_a_template:
cpp_templates.append(c_data.InputTypeInfoFactory.CppType)
if item.is_output_a_template:
cpp_templates.append(c_data.OutputTypeInfoFactory.CppType)
assert cpp_templates
cpp_templates = ", ".join(cpp_templates)
cpp_template_suffix = "<{}>".format(cpp_templates)
# ----------------------------------------------------------------------
def ToParamsString(params, arg_desc):
return ", ".join(
["/*{}*/ {} {}".format(arg_desc, param.Type, param.Name) for param in params],
)
# ----------------------------------------------------------------------
return {
"name": item.name,
"estimator_name": item.estimator_name,
"suffix": suffix,
"type_desc": type_desc,
"cpp_template_suffix": cpp_template_suffix,
}
# ----------------------------------------------------------------------
def _GenerateCommonFiles(open_file_func, output_dir, output_stream):
with open_file_func(os.path.join(output_dir, "SharedLibrary_Common.h"), "w") as f:
f.write(
textwrap.dedent(
"""\
/* ---------------------------------------------------------------------- */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* Licensed under the MIT License */
/* ---------------------------------------------------------------------- */
#pragma once
#include <cstddef>
#include <stdint.h>
#if (defined _MSC_VER)
# if (defined DLL_EXPORT_COMPILE)
# define FEATURIZER_LIBRARY_API __declspec(dllexport)
# else
# define FEATURIZER_LIBRARY_API __declspec(dllimport)
# endif
# define FEATURIZER_LIBRARY_API_PACK_PREFIX \\
__pragma(pack(push)) \\
__pragma(pack(1))
# define FEATURIZER_LIBRARY_API_PACK_SUFFIX __pragma(pack(pop))
# define FEATURIZER_LIBRARY_API_PACK_INLINE
#elif (defined __GNUC__ || defined __clang__)
# if (defined DLL_EXPORT_COMPILE)
# define FEATURIZER_LIBRARY_API __attribute__((visibility("default")))
# else
# define FEATURIZER_LIBRARY_API
# endif
# define FEATURIZER_LIBRARY_API_PACK_PREFIX
# define FEATURIZER_LIBRARY_API_PACK_SUFFIX
# define FEATURIZER_LIBRARY_API_PACK_INLINE __attribute__((packed))
#else
# error Unrecognized compiler!
#endif
#if (defined __clang__)
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wextern-c-compat" // empty struct has size 0 in C, size 1 in C++
#endif
extern "C" {
struct ErrorInfoHandle {};
FEATURIZER_LIBRARY_API bool GetErrorInfoString(/*in*/ ErrorInfoHandle *pHandle, /*out*/ char const **output_ptr, /*out*/ std::size_t *output_items);
FEATURIZER_LIBRARY_API bool DestroyErrorInfoString(/*in*/ char const *input_ptr, /*in*/ std::size_t input_items);
FEATURIZER_LIBRARY_API bool DestroyErrorInfo(/*in*/ ErrorInfoHandle *pHandle);
FEATURIZER_LIBRARY_API bool DestroyTransformerSaveData(/*in*/ unsigned char const *pBuffer, /*in*/ std::size_t cBufferSize, /*out*/ ErrorInfoHandle **ppErrorInfo);
FEATURIZER_LIBRARY_API_PACK_PREFIX;
struct DateTimeParameter {
// ----------------------------------------------------------------------
// | Public Types
enum DateTimeTypeValue {
DateTimeInt64 = 1, // Posix time
DateTimeString // ISO 8601
};
typedef unsigned char DateTimeType;
struct StringData {
char const * pBuffer;
size_t cBufferElements;
} FEATURIZER_LIBRARY_API_PACK_INLINE;
union DataType {
int64_t posix;
StringData isoStr;
};
// ----------------------------------------------------------------------
// | Public Data
DateTimeType dataType;
DataType data;
} FEATURIZER_LIBRARY_API_PACK_INLINE;
FEATURIZER_LIBRARY_API_PACK_SUFFIX;
// These values should match the values in Featurizer.h
enum TrainingStateValue {
Pending = 1,
Training,
Finished,
Completed
};
typedef unsigned char TrainingState;
// These values should match the values in Featurizer.h
enum FitResultValue {
Complete = 1,
Continue,
ResetAndContinue
};
typedef unsigned char FitResult;
// These values should match the values in Traits.h
enum TypeIdValue {
StringId = 0x00000001,
Int8Id,
Int16Id,
Int32Id,
Int64Id,
UInt8Id,
UInt16Id,
UInt32Id,
UInt64Id,
Float16Id,
Float32Id,
Float64Id,
Complex64Id,
Complex128Id,
BFloat16Id,
BoolId,
TimepointId,
DurationId,
LastStaticValueId,
TensorId = 0x1001 | (LastStaticValueId + 1),
SparseTensorId = 0x1001 | (LastStaticValueId + 2),
TabularId = 0x1001 | (LastStaticValueId + 3),
NullableId = 0x1001 | (LastStaticValueId + 4),
VectorId = 0x1001 | (LastStaticValueId + 5),
MapId = 0x1002 | (LastStaticValueId + 6)
};
typedef uint32_t TypeId;
} // extern "C"
""",
),
)
with open_file_func(os.path.join(output_dir, "SharedLibrary_Common.cpp"), "w") as f:
f.write(
textwrap.dedent(
"""\
/* ---------------------------------------------------------------------- */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* Licensed under the MIT License */
/* ---------------------------------------------------------------------- */
#define DLL_EXPORT_COMPILE
#include <memory>
#include <string>
#include "SharedLibrary_Common.h"
#include "SharedLibrary_PointerTable.h"
#include "Traits.h"
// Forward declaration for DestroyTransformerSaveData
ErrorInfoHandle* CreateErrorInfo(std::exception const &ex);
extern "C" {
FEATURIZER_LIBRARY_API bool GetErrorInfoString(/*in*/ ErrorInfoHandle *pHandle, /*out*/ char const **output_ptr, /*out*/ std::size_t *output_items) {
if(pHandle == nullptr || output_ptr == nullptr || output_items == nullptr)
return false;
std::string const & str(*g_pointerTable.Get<std::string>(reinterpret_cast<size_t>(pHandle)));
char * string_buffer(new char[str.size() + 1]);
std::copy(str.begin(), str.end(), string_buffer);
string_buffer[str.size()] = 0;
*output_ptr = string_buffer;
*output_items = str.size();
return true;
}
FEATURIZER_LIBRARY_API bool DestroyErrorInfoString(/*in*/ char const *input_ptr, /*in*/ std::size_t input_items) {
if(input_ptr == nullptr || input_items == 0)
return false;
delete [] input_ptr;
return true;
}
FEATURIZER_LIBRARY_API bool DestroyErrorInfo(/*in*/ ErrorInfoHandle *pHandle) {
if(pHandle == nullptr)
return false;
size_t index = reinterpret_cast<size_t>(pHandle);
std::string & str(*g_pointerTable.Get<std::string>(index));
g_pointerTable.Remove(index);
delete &str;
return true;
}
FEATURIZER_LIBRARY_API bool DestroyTransformerSaveData(/*in*/ unsigned char const *pBuffer, /*in*/ std::size_t cBufferSize, /*out*/ ErrorInfoHandle **ppErrorInfo) {
if(ppErrorInfo == nullptr)
return false;
try {
*ppErrorInfo = nullptr;
if(pBuffer == nullptr) throw std::invalid_argument("'pBuffer' is null");
if(cBufferSize == 0) throw std::invalid_argument("'cBufferSize' is 0");
delete [] pBuffer;
return true;
}
catch(std::exception const &ex) {
*ppErrorInfo = CreateErrorInfo(ex);
return false;
}
}
} // extern "C"
// These methods are used internally but not exported
ErrorInfoHandle * CreateErrorInfo(std::exception const &ex) {
std::unique_ptr<std::string> result(new std::string(ex.what()));
size_t index = g_pointerTable.Add(result.release());
return reinterpret_cast<ErrorInfoHandle *>(index);
}
std::chrono::system_clock::time_point CreateDateTime(DateTimeParameter const ¶m) {
if(param.dataType == DateTimeParameter::DateTimeTypeValue::DateTimeInt64)
return std::chrono::system_clock::from_time_t(param.data.posix);
if(param.dataType == DateTimeParameter::DateTimeTypeValue::DateTimeString)
return Microsoft::Featurizer::Traits<std::chrono::system_clock::time_point>::FromString(param.data.isoStr.pBuffer);
throw std::runtime_error("'type' is invalid");
}
""",
),
)
with open_file_func(os.path.join(output_dir, "SharedLibrary_Common.hpp"), "w") as f:
f.write(
textwrap.dedent(
"""\
// ----------------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License
// ----------------------------------------------------------------------
#pragma once
#include "SharedLibrary_Common.h"
DateTimeParameter CreateDateTimeParameter(int64_t const &value);
DateTimeParameter CreateDateTimeParameter(std::string const &value);
// ----------------------------------------------------------------------
// ----------------------------------------------------------------------
// ----------------------------------------------------------------------
inline DateTimeParameter CreateDateTimeParameter(int64_t const &value) {
DateTimeParameter result;
result.dataType = DateTimeParameter::DateTimeTypeValue::DateTimeInt64;
result.data.posix = value;
return result;
}
inline DateTimeParameter CreateDateTimeParameter(std::string const &value) {
DateTimeParameter result;
result.dataType = DateTimeParameter::DateTimeTypeValue::DateTimeString;
result.data.isoStr.pBuffer = value.c_str();
result.data.isoStr.cBufferElements = value.size();
return result;
}
""",
),
)
with open_file_func(os.path.join(output_dir, "SharedLibrary_PointerTable.h"), "w") as f:
f.write(
textwrap.dedent(
"""\
/* ---------------------------------------------------------------------- */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* Licensed under the MIT License */
/* ---------------------------------------------------------------------- */
#pragma once
#include "../PointerTable.h"
extern Microsoft::Featurizer::PointerTable g_pointerTable;
""",
),
)
with open_file_func(os.path.join(output_dir, "SharedLibrary_PointerTable.cpp"), "w") as f:
f.write(
textwrap.dedent(
"""\
/* ---------------------------------------------------------------------- */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* Licensed under the MIT License */
/* ---------------------------------------------------------------------- */
#include "SharedLibrary_PointerTable.h"
#if (defined __clang__)
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wexit-time-destructors"
#endif
Microsoft::Featurizer::PointerTable g_pointerTable;
#if (defined __clang__)
# pragma clang diagnostic pop
#endif
""",
),
)
return 0
# ----------------------------------------------------------------------
def _GenerateHeaderFile(open_file_func, output_dir, items, c_data_items, output_stream):
with open_file_func(
os.path.join(output_dir, "SharedLibrary_{}.h".format(items[0].name)),
"w",
) as f:
f.write(
textwrap.dedent(
"""\
/* ---------------------------------------------------------------------- */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* Licensed under the MIT License */
/* ---------------------------------------------------------------------- */
#pragma once
#include "SharedLibrary_Common.h"
extern "C" {
""",
),
)
wrote_custom_data = False
for item, c_data in zip(items, c_data_items):
template = getattr(item, "template", None)
d = _CreateInterfaceSubstitutionDict(item, c_data)
construct_params = []
if c_data.ConfigurationParamTypeInfoFactories:
for configuration_param, type_info in zip(
item.configuration_params,
c_data.ConfigurationParamTypeInfoFactories,
):
info = type_info.GetInputInfo(
configuration_param.name,
getattr(configuration_param, "is_optional", False),
"",
)
construct_params += info.ParameterDecl
delete_transformed_info = c_data.OutputTypeInfoFactory.GetDestroyOutputInfo()
if delete_transformed_info is not None:
delete_transformed_method = textwrap.dedent(
"""\
FEATURIZER_LIBRARY_API bool {name}{suffix}DestroyTransformedData({parameters}, /*out*/ ErrorInfoHandle **ppErrorInfo);
""",
).format(
parameters=", ".join(delete_transformed_info.ParameterDecl),
**d
)
else:
delete_transformed_method = ""
# Create the custom structs (if any)
| |
"BUNTS",
"BUNTY",
"BUNYA",
"BUOYS",
"BUPPY",
"BURAN",
"BURAS",
"BURBS",
"BURDS",
"BURET",
"BURFI",
"BURGH",
"BURGS",
"BURIN",
"BURKA",
"BURKE",
"BURKS",
"BURLS",
"BURNS",
"BUROO",
"BURPS",
"BURQA",
"BURRO",
"BURRS",
"BURRY",
"BURSA",
"BURSE",
"BUSBY",
"BUSES",
"BUSKS",
"BUSKY",
"BUSSU",
"BUSTI",
"BUSTS",
"BUSTY",
"BUTEO",
"BUTES",
"BUTLE",
"BUTOH",
"BUTTS",
"BUTTY",
"BUTUT",
"BUTYL",
"BUZZY",
"BWANA",
"BWAZI",
"BYDED",
"BYDES",
"BYKED",
"BYKES",
"BYRES",
"BYRLS",
"BYSSI",
"BYTES",
"BYWAY",
"CAAED",
"CABAS",
"CABER",
"CABOB",
"CABOC",
"CABRE",
"CACAS",
"CACKS",
"CACKY",
"CADEE",
"CADES",
"CADGE",
"CADGY",
"CADIE",
"CADIS",
"CADRE",
"CAECA",
"CAESE",
"CAFES",
"CAFFS",
"CAGED",
"CAGER",
"CAGES",
"CAGOT",
"CAHOW",
"CAIDS",
"CAINS",
"CAIRD",
"CAJON",
"CAJUN",
"CAKED",
"CAKES",
"CAKEY",
"CALFS",
"CALID",
"CALIF",
"CALIX",
"CALKS",
"CALLA",
"CALLS",
"CALMS",
"CALMY",
"CALOS",
"CALPA",
"CALPS",
"CALVE",
"CALYX",
"CAMAN",
"CAMAS",
"CAMES",
"CAMIS",
"CAMOS",
"CAMPI",
"CAMPO",
"CAMPS",
"CAMPY",
"CAMUS",
"CANED",
"CANEH",
"CANER",
"CANES",
"CANGS",
"CANID",
"CANNA",
"CANNS",
"CANSO",
"CANST",
"CANTO",
"CANTS",
"CANTY",
"CAPAS",
"CAPED",
"CAPES",
"CAPEX",
"CAPHS",
"CAPIZ",
"CAPLE",
"CAPON",
"CAPOS",
"CAPOT",
"CAPRI",
"CAPUL",
"CARAP",
"CARBO",
"CARBS",
"CARBY",
"CARDI",
"CARDS",
"CARDY",
"CARED",
"CARER",
"CARES",
"CARET",
"CAREX",
"CARKS",
"CARLE",
"CARLS",
"CARNS",
"CARNY",
"CAROB",
"CAROM",
"CARON",
"CARPI",
"CARPS",
"CARRS",
"CARSE",
"CARTA",
"CARTE",
"CARTS",
"CARVY",
"CASAS",
"CASCO",
"CASED",
"CASES",
"CASKS",
"CASKY",
"CASTS",
"CASUS",
"CATES",
"CAUDA",
"CAUKS",
"CAULD",
"CAULS",
"CAUMS",
"CAUPS",
"CAURI",
"CAUSA",
"CAVAS",
"CAVED",
"CAVEL",
"CAVER",
"CAVES",
"CAVIE",
"CAWED",
"CAWKS",
"CAXON",
"CEAZE",
"CEBID",
"CECAL",
"CECUM",
"CEDED",
"CEDER",
"CEDES",
"CEDIS",
"CEIBA",
"CEILI",
"CEILS",
"CELEB",
"CELLA",
"CELLI",
"CELLS",
"CELOM",
"CELTS",
"CENSE",
"CENTO",
"CENTS",
"CENTU",
"CEORL",
"CEPES",
"CERCI",
"CERED",
"CERES",
"CERGE",
"CERIA",
"CERIC",
"CERNE",
"CEROC",
"CEROS",
"CERTS",
"CERTY",
"CESSE",
"CESTA",
"CESTI",
"CETES",
"CETYL",
"CEZVE",
"CHACE",
"CHACK",
"CHACO",
"CHADO",
"CHADS",
"CHAFT",
"CHAIS",
"CHALS",
"CHAMS",
"CHANA",
"CHANG",
"CHANK",
"CHAPE",
"CHAPS",
"CHAPT",
"CHARA",
"CHARE",
"CHARK",
"CHARR",
"CHARS",
"CHARY",
"CHATS",
"CHAVE",
"CHAVS",
"CHAWK",
"CHAWS",
"CHAYA",
"CHAYS",
"CHEEP",
"CHEFS",
"CHEKA",
"CHELA",
"CHELP",
"CHEMO",
"CHEMS",
"CHERE",
"CHERT",
"CHETH",
"CHEVY",
"CHEWS",
"CHEWY",
"CHIAO",
"CHIAS",
"CHIBS",
"CHICA",
"CHICH",
"CHICO",
"CHICS",
"CHIEL",
"CHIKS",
"CHILE",
"CHIMB",
"CHIMO",
"CHIMP",
"CHINE",
"CHING",
"CHINK",
"CHINO",
"CHINS",
"CHIPS",
"CHIRK",
"CHIRL",
"CHIRM",
"CHIRO",
"CHIRR",
"CHIRT",
"CHIRU",
"CHITS",
"CHIVE",
"CHIVS",
"CHIVY",
"CHIZZ",
"CHOCO",
"CHOCS",
"CHODE",
"CHOGS",
"CHOIL",
"CHOKO",
"CHOKY",
"CHOLA",
"CHOLI",
"CHOLO",
"CHOMP",
"CHONS",
"CHOOF",
"CHOOK",
"CHOOM",
"CHOON",
"CHOPS",
"CHOTA",
"CHOTT",
"CHOUT",
"CHOUX",
"CHOWK",
"CHOWS",
"CHUBS",
"CHUFA",
"CHUFF",
"CHUGS",
"CHUMS",
"CHURL",
"CHURR",
"CHUSE",
"CHUTS",
"CHYLE",
"CHYME",
"CHYND",
"CIBOL",
"CIDED",
"CIDES",
"CIELS",
"CIGGY",
"CILIA",
"CILLS",
"CIMAR",
"CIMEX",
"CINCT",
"CINES",
"CINQS",
"CIONS",
"CIPPI",
"CIRCS",
"CIRES",
"CIRLS",
"CIRRI",
"CISCO",
"CISSY",
"CISTS",
"CITAL",
"CITED",
"CITER",
"CITES",
"CIVES",
"CIVET",
"CIVIE",
"CIVVY",
"CLACH",
"CLADE",
"CLADS",
"CLAES",
"CLAGS",
"CLAME",
"CLAMS",
"CLANS",
"CLAPS",
"CLAPT",
"CLARO",
"CLART",
"CLARY",
"CLAST",
"CLATS",
"CLAUT",
"CLAVE",
"CLAVI",
"CLAWS",
"CLAYS",
"CLECK",
"CLEEK",
"CLEEP",
"CLEFS",
"CLEGS",
"CLEIK",
"CLEMS",
"CLEPE",
"CLEPT",
"CLEVE",
"CLEWS",
"CLIED",
"CLIES",
"CLIFT",
"CLIME",
"CLINE",
"CLINT",
"CLIPE",
"CLIPS",
"CLIPT",
"CLITS",
"CLOAM",
"CLODS",
"CLOFF",
"CLOGS",
"CLOKE",
"CLOMB",
"CLOMP",
"CLONK",
"CLONS",
"CLOOP",
"CLOOT",
"CLOPS",
"CLOTE",
"CLOTS",
"CLOUR",
"CLOUS",
"CLOWS",
"CLOYE",
"CLOYS",
"CLOZE",
"CLUBS",
"CLUES",
"CLUEY",
"CLUNK",
"CLYPE",
"CNIDA",
"COACT",
"COADY",
"COALA",
"COALS",
"COALY",
"COAPT",
"COARB",
"COATE",
"COATI",
"COATS",
"COBBS",
"COBBY",
"COBIA",
"COBLE",
"COBZA",
"COCAS",
"COCCI",
"COCCO",
"COCKS",
"COCKY",
"COCOS",
"CODAS",
"CODEC",
"CODED",
"CODEN",
"CODER",
"CODES",
"CODEX",
"CODON",
"COEDS",
"COFFS",
"COGIE",
"COGON",
"COGUE",
"COHAB",
"COHEN",
"COHOE",
"COHOG",
"COHOS",
"COIFS",
"COIGN",
"COILS",
"COINS",
"COIRS",
"COITS",
"COKED",
"COKES",
"COLAS",
"COLBY",
"COLDS",
"COLED",
"COLES",
"COLEY",
"COLIC",
"COLIN",
"COLLS",
"COLLY",
"COLOG",
"COLTS",
"COLZA",
"COMAE",
"COMAL",
"COMAS",
"COMBE",
"COMBI",
"COMBO",
"COMBS",
"COMBY",
"COMER",
"COMES",
"COMIX",
"COMMO",
"COMMS",
"COMMY",
"COMPO",
"COMPS",
"COMPT",
"COMTE",
"COMUS",
"CONED",
"CONES",
"CONEY",
"CONFS",
"CONGA",
"CONGE",
"CONGO",
"CONIA",
"CONIN",
"CONKS",
"CONKY",
"CONNE",
"CONNS",
"CONTE",
"CONTO",
"CONUS",
"CONVO",
"COOCH",
"COOED",
"COOEE",
"COOER",
"COOEY",
"COOFS",
"COOKS",
"COOKY",
"COOLS",
"COOLY",
"COOMB",
"COOMS",
"COOMY",
"COONS",
"COOPS",
"COOPT",
"COOST",
"COOTS",
"COOZE",
"COPAL",
"COPAY",
"COPED",
"COPEN",
"COPER",
"COPES",
"COPPY",
"COPRA",
"COPSY",
"COQUI",
"CORAM",
"CORBE",
"CORBY",
"CORDS",
"CORED",
"CORES",
"COREY",
"CORGI",
"CORIA",
"CORKS",
"CORKY",
"CORMS",
"CORNI",
"CORNO",
"CORNS",
"CORNU",
"CORPS",
"CORSE",
"CORSO",
"COSEC",
"COSED",
"COSES",
"COSET",
"COSEY",
"COSIE",
"COSTA",
"COSTE",
"COSTS",
"COTAN",
"COTED",
"COTES",
"COTHS",
"COTTA",
"COTTS",
"COUDE",
"COUPS",
"COURB",
"COURD",
"COURE",
"COURS",
"COUTA",
"COUTH",
"COVED",
"COVES",
"COVIN",
"COWAL",
"COWAN",
"COWED",
"COWKS",
"COWLS",
"COWPS",
"COWRY",
"COXAE",
"COXAL",
"COXED",
"COXES",
"COXIB",
"COYAU",
"COYED",
"COYER",
"COYPU",
"COZED",
"COZEN",
"COZES",
"COZEY",
"COZIE",
"CRAAL",
"CRABS",
"CRAGS",
"CRAIC",
"CRAIG",
"CRAKE",
"CRAME",
"CRAMS",
"CRANS",
"CRAPE",
"CRAPS",
"CRAPY",
"CRARE",
"CRAWS",
"CRAYS",
"CREDS",
"CREEL",
"CREES",
"CREMS",
"CRENA",
"CREPS",
"CREPY",
"CREWE",
"CREWS",
"CRIAS",
"CRIBS",
"CRIES",
"CRIMS",
"CRINE",
"CRIOS",
"CRIPE",
"CRIPS",
"CRISE",
"CRITH",
"CRITS",
"CROCI",
"CROCS",
"CROFT",
"CROGS",
"CROMB",
"CROME",
"CRONK",
"CRONS",
"CROOL",
"CROON",
"CROPS",
"CRORE",
"CROST",
"CROUT",
"CROWS",
"CROZE",
"CRUCK",
"CRUDO",
"CRUDS",
"CRUDY",
"CRUES",
"CRUET",
"CRUFT",
"CRUNK",
"CRUOR",
"CRURA",
"CRUSE",
"CRUSY",
"CRUVE",
"CRWTH",
"CRYER",
"CTENE",
"CUBBY",
"CUBEB",
"CUBED",
"CUBER",
"CUBES",
"CUBIT",
"CUDDY",
"CUFFO",
"CUFFS",
"CUIFS",
"CUING",
"CUISH",
"CUITS",
"CUKES",
"CULCH",
"CULET",
"CULEX",
"CULLS",
"CULLY",
"CULMS",
"CULPA",
"CULTI",
"CULTS",
"CULTY",
"CUMEC",
"CUNDY",
"CUNEI",
"CUNIT",
"CUNTS",
"CUPEL",
"CUPID",
"CUPPA",
"CUPPY",
"CURAT",
"CURBS",
"CURCH",
"CURDS",
"CURDY",
"CURED",
"CURER",
"CURES",
"CURET",
"CURFS",
"CURIA",
"CURIE",
"CURLI",
"CURLS",
"CURNS",
"CURNY",
"CURRS",
"CURSI",
"CURST",
"CUSEC",
"CUSHY",
"CUSKS",
"CUSPS",
"CUSPY",
"CUSSO",
"CUSUM",
"CUTCH",
"CUTER",
"CUTES",
"CUTEY",
"CUTIN",
"CUTIS",
"CUTTO",
"CUTTY",
"CUTUP",
"CUVEE",
"CUZES",
"CWTCH",
"CYANO",
"CYANS",
"CYCAD",
"CYCAS",
"CYCLO",
"CYDER",
"CYLIX",
"CYMAE",
"CYMAR",
"CYMAS",
"CYMES",
"CYMOL",
"CYSTS",
"CYTES",
"CYTON",
"CZARS",
"DAALS",
"DABBA",
"DACES",
"DACHA",
"DACKS",
"DADAH",
"DADAS",
"DADOS",
"DAFFS",
"DAFFY",
"DAGGA",
"DAGGY",
"DAGOS",
"DAHLS",
"DAIKO",
"DAINE",
"DAINT",
"DAKER",
"DALED",
"DALES",
"DALIS",
"DALLE",
"DALTS",
"DAMAN",
"DAMAR",
"DAMES",
"DAMME",
"DAMNS",
"DAMPS",
"DAMPY",
"DANCY",
"DANGS",
"DANIO",
"DANKS",
"DANNY",
"DANTS",
"DARAF",
"DARBS",
"DARCY",
"DARED",
"DARER",
"DARES",
"DARGA",
"DARGS",
"DARIC",
"DARIS",
"DARKS",
"DARKY",
"DARNS",
"DARRE",
"DARTS",
"DARZI",
"DASHI",
"DASHY",
"DATAL",
"DATED",
"DATER",
"DATES",
"DATOS",
"DATTO",
"DAUBE",
"DAUBS",
"DAUBY",
"DAUDS",
"DAULT",
"DAURS",
"DAUTS",
"DAVEN",
"DAVIT",
"DAWAH",
"DAWDS",
"DAWED",
"DAWEN",
"DAWKS",
"DAWNS",
"DAWTS",
"DAYAN",
"DAYCH",
"DAYNT",
"DAZED",
"DAZER",
"DAZES",
"DEADS",
"DEAIR",
"DEALS",
"DEANS",
"DEARE",
"DEARN",
"DEARS",
"DEARY",
"DEASH",
"DEAVE",
"DEAWS",
"DEAWY",
"DEBAG",
"DEBBY",
"DEBEL",
"DEBES",
"DEBTS",
"DEBUD",
"DEBUR",
"DEBUS",
"DEBYE",
"DECAD",
"DECAF",
"DECAN",
"DECKO",
"DECKS",
"DECOS",
"DEDAL",
"DEEDS",
"DEEDY",
"DEELY",
"DEEMS",
"DEENS",
"DEEPS",
"DEERE",
"DEERS",
"DEETS",
"DEEVE",
"DEEVS",
"DEFAT",
"DEFFO",
"DEFIS",
"DEFOG",
"DEGAS",
"DEGUM",
"DEGUS",
"DEICE",
"DEIDS",
"DEIFY",
"DEILS",
"DEISM",
"DEIST",
"DEKED",
"DEKES",
"DEKKO",
"DELED",
"DELES",
"DELFS",
"DELFT",
"DELIS",
"DELLS",
"DELLY",
"DELOS",
"DELPH",
"DELTS",
"DEMAN",
"DEMES",
"DEMIC",
"DEMIT",
"DEMOB",
"DEMOI",
"DEMOS",
"DEMPT",
"DENAR",
"DENAY",
"DENCH",
"DENES",
"DENET",
"DENIS",
"DENTS",
"DEOXY",
"DERAT",
"DERAY",
"DERED",
"DERES",
"DERIG",
"DERMA",
"DERMS",
"DERNS",
"DERNY",
"DEROS",
"DERRO",
"DERRY",
"DERTH",
"DERVS",
"DESEX",
"DESHI",
"DESIS",
"DESKS",
"DESSE",
"DEVAS",
"DEVEL",
"DEVIS",
"DEVON",
"DEVOS",
"DEVOT",
"DEWAN",
"DEWAR",
"DEWAX",
"DEWED",
"DEXES",
"DEXIE",
"DHABA",
"DHAKS",
"DHALS",
"DHIKR",
"DHOBI",
"DHOLE",
"DHOLL",
"DHOLS",
"DHOTI",
"DHOWS",
"DHUTI",
"DIACT",
"DIALS",
"DIANE",
"DIAZO",
"DIBBS",
"DICED",
"DICER",
"DICES",
"DICHT",
"DICKS",
"DICKY",
"DICOT",
"DICTA",
"DICTS",
"DICTY",
"DIDDY",
"DIDIE",
"DIDOS",
"DIDST",
"DIEBS",
"DIELS",
"DIENE",
"DIETS",
"DIFFS",
"DIGHT",
"DIKAS",
"DIKED",
"DIKER",
"DIKES",
"DIKEY",
"DILDO",
"DILLI",
"DILLS",
"DIMBO",
"DIMER",
"DIMES",
"DIMPS",
"DINAR",
"DINED",
"DINES",
"DINGE",
"DINGS",
"DINIC",
"DINKS",
"DINKY",
"DINNA",
"DINOS",
"DINTS",
"DIOLS",
"DIOTA",
"DIPPY",
"DIPSO",
"DIRAM",
"DIRER",
"DIRKE",
"DIRKS",
"DIRLS",
"DIRTS",
"DISAS",
"DISCI",
"DISCS",
"DISHY",
"DISKS",
"DISME",
"DITAL",
"DITAS",
"DITED",
"DITES",
"DITSY",
"DITTS",
"DITZY",
"DIVAN",
"DIVAS",
"DIVED",
"DIVES",
"DIVIS",
"DIVNA",
"DIVOS",
"DIVOT",
"DIVVY",
"DIWAN",
"DIXIE",
"DIXIT",
"DIYAS",
"DIZEN",
"DJINN",
"DJINS",
"DOABS",
"DOATS",
"DOBBY",
"DOBES",
"DOBIE",
"DOBLA",
"DOBRA",
"DOBRO",
"DOCHT",
"DOCKS",
"DOCOS",
"DOCUS",
"DODDY",
"DODOS",
"DOEKS",
"DOERS",
"DOEST",
"DOETH",
"DOFFS",
"DOGAN",
"DOGES",
"DOGEY",
"DOGGO",
"DOGGY",
"DOGIE",
"DOHYO",
"DOILT",
"DOILY",
"DOITS",
"DOJOS",
"DOLCE",
"DOLCI",
"DOLED",
"DOLES",
"DOLIA",
"DOLLS",
"DOLMA",
"DOLOR",
"DOLOS",
"DOLTS",
"DOMAL",
"DOMED",
"DOMES",
"DOMIC",
"DONAH",
"DONAS",
"DONEE",
"DONER",
"DONGA",
"DONGS",
"DONKO",
"DONNA",
"DONNE",
"DONNY",
"DONSY",
"DOOBS",
"DOOCE",
"DOODY",
"DOOKS",
"DOOLE",
"DOOLS",
"DOOLY",
"DOOMS",
"DOOMY",
"DOONA",
"DOORN",
"DOORS",
"DOOZY",
"DOPAS",
"DOPED",
"DOPER",
"DOPES",
"DORAD",
"DORBA",
"DORBS",
"DOREE",
"DORES",
"DORIC",
"DORIS",
"DORKS",
"DORKY",
"DORMS",
"DORMY",
"DORPS",
"DORRS",
"DORSA",
"DORSE",
"DORTS",
"DORTY",
"DOSAI",
"DOSAS",
"DOSED",
"DOSEH",
"DOSER",
"DOSES",
"DOSHA",
"DOTAL",
"DOTED",
"DOTER",
"DOTES",
"DOTTY",
"DOUAR",
"DOUCE",
"DOUCS",
"DOUKS",
"DOULA",
"DOUMA",
"DOUMS",
"DOUPS",
"DOURA",
"DOUSE",
"DOUTS",
"DOVED",
"DOVEN",
"DOVER",
"DOVES",
"DOVIE",
"DOWAR",
"DOWDS",
"DOWED",
"DOWER",
"DOWIE",
"DOWLE",
"DOWLS",
"DOWLY",
"DOWNA",
"DOWNS",
"DOWPS",
"DOWSE",
"DOWTS",
"DOXED",
"DOXES",
"DOXIE",
"DOYEN",
"DOYLY",
"DOZED",
"DOZER",
"DOZES",
"DRABS",
"DRACK",
"DRACO",
"DRAFF",
"DRAGS",
"DRAIL",
"DRAMS",
"DRANT",
"DRAPS",
"DRATS",
"DRAVE",
"DRAWS",
"DRAYS",
"DREAR",
"DRECK",
"DREED",
"DREER",
"DREES",
"DREGS",
"DREKS",
"DRENT",
"DRERE",
"DREST",
"DREYS",
"DRIBS",
"DRICE",
"DRIES",
"DRILY",
"DRIPS",
"DRIPT",
"DROID",
"DROIL",
"DROKE",
"DROLE",
"DROME",
"DRONY",
"DROOB",
"DROOG",
"DROOK",
"DROPS",
"DROPT",
"DROUK",
"DROWS",
"DRUBS",
"DRUGS",
"DRUMS",
"DRUPE",
"DRUSE",
"DRUSY",
"DRUXY",
"DRYAD",
"DRYAS",
"DSOBO",
"DSOMO",
"DUADS",
"DUALS",
"DUANS",
"DUARS",
"DUBBO",
"DUCAL",
"DUCAT",
"DUCES",
"DUCKS",
"DUCKY",
"DUCTS",
"DUDDY",
"DUDED",
"DUDES",
"DUELS",
"DUETS",
"DUETT",
| |
error_checking.assert_is_numpy_array(
numpy.asarray(field_names_grib1),
exact_dimensions=numpy.array([num_fields]))
# Find grid points for model.
grid_point_x_metres, grid_point_y_metres = (
nwp_model_utils.get_xy_grid_points(
model_name=model_name, grid_name=grid_id)
)
# Project query points to model coords.
query_x_metres, query_y_metres = nwp_model_utils.project_latlng_to_xy(
latitudes_deg=query_point_table[QUERY_LAT_COLUMN].values,
longitudes_deg=query_point_table[QUERY_LNG_COLUMN].values,
model_name=model_name, grid_name=grid_id)
argument_dict = {
QUERY_X_COLUMN: query_x_metres,
QUERY_Y_COLUMN: query_y_metres
}
query_point_table = query_point_table.assign(**argument_dict)
# Create interp_table.
num_query_points = len(query_point_table.index)
nan_array = numpy.full(num_query_points, numpy.nan)
interp_dict = {}
for j in range(num_fields):
interp_dict.update({field_names[j]: nan_array})
interp_table = pandas.DataFrame.from_dict(interp_dict)
# Create the rest of the metadata.
metadata_dict = _get_wind_rotation_metadata(
field_names_grib1=field_names_grib1, model_name=model_name)
metadata_dict.update({
GRID_POINT_X_KEY: grid_point_x_metres,
GRID_POINT_Y_KEY: grid_point_y_metres
})
if numpy.any(metadata_dict[ROTATE_WIND_FLAGS_KEY]):
rotation_cosine_by_query_point, rotation_sine_by_query_point = (
nwp_model_utils.get_wind_rotation_angles(
latitudes_deg=query_point_table[QUERY_LAT_COLUMN].values,
longitudes_deg=query_point_table[QUERY_LNG_COLUMN].values,
model_name=model_name)
)
else:
rotation_cosine_by_query_point = None
rotation_sine_by_query_point = None
metadata_dict.update({
ROTATION_COSINES_KEY: rotation_cosine_by_query_point,
ROTATION_SINES_KEY: rotation_sine_by_query_point
})
query_point_table.drop(
[QUERY_LAT_COLUMN, QUERY_LNG_COLUMN], axis=1, inplace=True)
return query_point_table, interp_table, metadata_dict
def _find_heights_with_temperature(
warm_temperatures_kelvins, cold_temperatures_kelvins,
warm_heights_m_asl, cold_heights_m_asl, target_temperature_kelvins):
"""At each horizontal point, finds height with the target temperature.
P = number of horizontal points
:param warm_temperatures_kelvins: length-P numpy array of temperatures on
warm side.
:param cold_temperatures_kelvins: length-P numpy array of temperatures on
cold side.
:param warm_heights_m_asl: length-P numpy array of heights (metres above sea
level) corresponding to `warm_temperatures_kelvins`.
:param cold_heights_m_asl: length-P numpy array of heights (metres above sea
level) corresponding to `cold_temperatures_kelvins`.
:param target_temperature_kelvins: Target temperature.
:return: target_heights_m_asl: length-P numpy array of heights (metres above
sea level) with the target temperature, estimated by interpolation.
"""
bad_point_flags = numpy.logical_or(
numpy.isnan(warm_temperatures_kelvins),
numpy.isnan(cold_temperatures_kelvins))
num_points = len(warm_temperatures_kelvins)
target_heights_m_asl = numpy.full(num_points, numpy.nan)
if numpy.all(bad_point_flags):
return target_heights_m_asl
good_point_indices = numpy.where(numpy.invert(bad_point_flags))[0]
warm_minus_cold_kelvins = (
warm_temperatures_kelvins[good_point_indices] -
cold_temperatures_kelvins[good_point_indices])
target_minus_cold_kelvins = (
target_temperature_kelvins -
cold_temperatures_kelvins[good_point_indices])
warm_minus_cold_metres = (warm_heights_m_asl[good_point_indices] -
cold_heights_m_asl[good_point_indices])
target_minus_cold_metres = warm_minus_cold_metres * (
target_minus_cold_kelvins / warm_minus_cold_kelvins)
target_heights_m_asl[good_point_indices] = (
cold_heights_m_asl[good_point_indices] + target_minus_cold_metres)
return target_heights_m_asl
def check_temporal_interp_method(interp_method_string):
"""Ensures that temporal-interpolation method is valid.
:param interp_method_string: Interp method.
:raises: ValueError: if `interp_method_string not in
TEMPORAL_INTERP_METHOD_STRINGS`.
"""
error_checking.assert_is_string(interp_method_string)
if interp_method_string not in TEMPORAL_INTERP_METHOD_STRINGS:
error_string = (
'\n\n{0:s}\nValid temporal-interp methods (listed above) do not '
'include "{1:s}".'
).format(str(TEMPORAL_INTERP_METHOD_STRINGS), interp_method_string)
raise ValueError(error_string)
def check_spatial_interp_method(interp_method_string):
"""Ensures that spatial-interpolation method is valid.
:param interp_method_string: Interp method.
:raises: ValueError: if `interp_method_string not in
SPATIAL_INTERP_METHOD_STRINGS`.
"""
error_checking.assert_is_string(interp_method_string)
if interp_method_string not in SPATIAL_INTERP_METHOD_STRINGS:
error_string = (
'\n\n{0:s}\nValid spatial-interp methods (listed above) do not '
'include "{1:s}".'
).format(str(SPATIAL_INTERP_METHOD_STRINGS), interp_method_string)
raise ValueError(error_string)
def interp_in_time(
input_matrix, sorted_input_times_unix_sec, query_times_unix_sec,
method_string, extrapolate=False):
"""Temporal interpolation.
D = number of dimensions (for both input_matrix and interp_matrix)
N = number of input times
Q = number of query times
:param input_matrix: D-dimensional numpy array, where time increases along
the last axis (length N).
:param sorted_input_times_unix_sec: length-N numpy array of input times,
sorted in ascending order.
:param query_times_unix_sec: length-Q numpy array of output times.
:param method_string: Interp method (must be accepted by
`check_temporal_interp_method`).
:param extrapolate: Boolean flag. If True, will extrapolate to times
outside the range of `sorted_input_times_unix_sec`. If False, will
throw an error if `query_times_unix_sec` includes times outside the
range of `sorted_input_times_unix_sec`.
:return: interp_matrix: D-dimensional numpy array, where the last axis
represents time (length Q). The last axis is ordered in the same way as
`query_times_unix_sec`, so that query_times_unix_sec[i] corresponds to
`interp_matrix[..., i]`.
"""
# error_checking.assert_is_numpy_array_without_nan(input_matrix)
check_temporal_interp_method(method_string)
error_checking.assert_is_boolean(extrapolate)
error_checking.assert_is_integer_numpy_array(sorted_input_times_unix_sec)
error_checking.assert_is_numpy_array_without_nan(
sorted_input_times_unix_sec)
error_checking.assert_is_numpy_array(
sorted_input_times_unix_sec, num_dimensions=1)
error_checking.assert_is_integer_numpy_array(query_times_unix_sec)
error_checking.assert_is_numpy_array_without_nan(query_times_unix_sec)
error_checking.assert_is_numpy_array(query_times_unix_sec, num_dimensions=1)
if method_string == PREV_NEIGHBOUR_METHOD_STRING:
return _interp_to_previous_time(
input_matrix=input_matrix,
input_times_unix_sec=sorted_input_times_unix_sec,
query_times_unix_sec=query_times_unix_sec)
if method_string == NEXT_NEIGHBOUR_METHOD_STRING:
return _interp_to_next_time(
input_matrix=input_matrix,
input_times_unix_sec=sorted_input_times_unix_sec,
query_times_unix_sec=query_times_unix_sec)
if extrapolate:
interp_object = scipy.interpolate.interp1d(
sorted_input_times_unix_sec, input_matrix, kind=method_string,
bounds_error=False, fill_value='extrapolate', assume_sorted=True)
else:
interp_object = scipy.interpolate.interp1d(
sorted_input_times_unix_sec, input_matrix, kind=method_string,
bounds_error=True, assume_sorted=True)
return interp_object(query_times_unix_sec)
def interp_from_xy_grid_to_points(
input_matrix, sorted_grid_point_x_metres, sorted_grid_point_y_metres,
query_x_coords_metres, query_y_coords_metres,
method_string=NEAREST_NEIGHBOUR_METHOD_STRING,
spline_degree=DEFAULT_SPLINE_DEGREE, extrapolate=False):
"""Interpolation from x-y grid to scattered points.
M = number of rows (unique y-coordinates at grid points)
N = number of columns (unique x-coordinates at grid points)
Q = number of query points
:param input_matrix: M-by-N numpy array of gridded data.
:param sorted_grid_point_x_metres: length-N numpy array with x-coordinates
of grid points. Must be sorted in ascending order. Also,
sorted_grid_point_x_metres[j] must match input_matrix[:, j].
:param sorted_grid_point_y_metres: length-M numpy array with y-coordinates
of grid points. Must be sorted in ascending order. Also,
sorted_grid_point_y_metres[i] must match input_matrix[i, :].
:param query_x_coords_metres: length-Q numpy array with x-coordinates of
query points.
:param query_y_coords_metres: length-Q numpy array with y-coordinates of
query points.
:param method_string: Interpolation method (must be accepted by
`check_spatial_interp_method`).
:param spline_degree: [used only if method_string = "spline"]
Polynomial degree for spline interpolation (1 for linear, 2 for
quadratic, 3 for cubic).
:param extrapolate: Boolean flag. If True, will extrapolate to points
outside the domain (specified by `sorted_grid_point_x_metres` and
`sorted_grid_point_y_metres`). If False, will throw an error if there
are query points outside the domain.
:return: interp_values: length-Q numpy array of interpolated values.
"""
error_checking.assert_is_numpy_array_without_nan(sorted_grid_point_x_metres)
error_checking.assert_is_numpy_array(
sorted_grid_point_x_metres, num_dimensions=1)
num_grid_columns = len(sorted_grid_point_x_metres)
error_checking.assert_is_numpy_array_without_nan(sorted_grid_point_y_metres)
error_checking.assert_is_numpy_array(
sorted_grid_point_y_metres, num_dimensions=1)
num_grid_rows = len(sorted_grid_point_y_metres)
error_checking.assert_is_real_numpy_array(input_matrix)
error_checking.assert_is_numpy_array(
input_matrix, exact_dimensions=numpy.array(
[num_grid_rows, num_grid_columns]))
error_checking.assert_is_numpy_array_without_nan(query_x_coords_metres)
error_checking.assert_is_numpy_array(
query_x_coords_metres, num_dimensions=1)
num_query_points = len(query_x_coords_metres)
error_checking.assert_is_numpy_array_without_nan(query_y_coords_metres)
error_checking.assert_is_numpy_array(
query_y_coords_metres, exact_dimensions=numpy.array([num_query_points]))
error_checking.assert_is_boolean(extrapolate)
if not extrapolate:
error_checking.assert_is_geq_numpy_array(
query_x_coords_metres, numpy.min(sorted_grid_point_x_metres))
error_checking.assert_is_leq_numpy_array(
query_x_coords_metres, numpy.max(sorted_grid_point_x_metres))
error_checking.assert_is_geq_numpy_array(
query_y_coords_metres, numpy.min(sorted_grid_point_y_metres))
error_checking.assert_is_leq_numpy_array(
query_y_coords_metres, numpy.max(sorted_grid_point_y_metres))
check_spatial_interp_method(method_string)
if method_string == NEAREST_NEIGHBOUR_METHOD_STRING:
return _nn_interp_from_xy_grid_to_points(
input_matrix=input_matrix,
sorted_grid_point_x_metres=sorted_grid_point_x_metres,
sorted_grid_point_y_metres=sorted_grid_point_y_metres,
query_x_coords_metres=query_x_coords_metres,
query_y_coords_metres=query_y_coords_metres)
interp_object = scipy.interpolate.RectBivariateSpline(
sorted_grid_point_y_metres, sorted_grid_point_x_metres, input_matrix,
kx=spline_degree, ky=spline_degree,
s=SMOOTHING_FACTOR_FOR_SPATIAL_INTERP)
return interp_object(
query_y_coords_metres, query_x_coords_metres, grid=False)
def interp_nwp_from_xy_grid(
query_point_table, field_names, field_names_grib1, model_name,
top_grib_directory_name, use_all_grids=True, grid_id=None,
temporal_interp_method_string=PREV_NEIGHBOUR_METHOD_STRING,
spatial_interp_method_string=NEAREST_NEIGHBOUR_METHOD_STRING,
spline_degree=DEFAULT_SPLINE_DEGREE,
wgrib_exe_name=grib_io.WGRIB_EXE_NAME_DEFAULT,
wgrib2_exe_name=grib_io.WGRIB2_EXE_NAME_DEFAULT,
raise_error_if_missing=False):
"""Interpolates NWP data from x-y grid in both space and time.
Each query point consists of (latitude, longitude, time). Before
interpolation, query points will be projected to the same x-y space as the
model.
F = number of fields to interpolate
Q = number of query points
:param query_point_table: Q-row pandas DataFrame with the following columns.
query_point_table.unix_time_sec: Time.
query_point_table.latitude_deg: Latitude (deg N).
query_point_table.longitude_deg: Longitude (deg E).
:param field_names: length-F list of field names in GewitterGefahr format.
:param field_names_grib1: length-F list of field names in grib1 format.
:param model_name: Model name (must be accepted by
`nwp_model_utils.check_model_name`).
:param top_grib_directory_name: Name of top-level directory with grib files
containing NWP data.
:param use_all_grids: Boolean flag. If True, this method will interp from
the highest-resolution grid available at each model-initialization time.
If False, will interpolate from only one grid.
:param grid_id: [used only if use_all_grids = False]
Model grid (must be accepted by `nwp_model_utils.check_grid_name`).
:param temporal_interp_method_string: Temporal interp method (must be
accepted by `check_temporal_interp_method`).
:param spatial_interp_method_string: Spatial interp method (must be
accepted by `check_spatial_interp_method`).
:param spline_degree: See doc for `interp_from_xy_grid_to_points`.
:param wgrib_exe_name: Path to wgrib executable.
:param wgrib2_exe_name: Path to wgrib2 executable.
:param raise_error_if_missing: See doc for `_read_nwp_for_interp`.
:return: interp_table: pandas DataFrame, where each column is one field and
each row is one query point. Column names are taken directly from the
input list `field_names`.
"""
error_checking.assert_is_boolean(use_all_grids)
nwp_model_utils.check_model_name(model_name)
if model_name == nwp_model_utils.NARR_MODEL_NAME or use_all_grids:
grid_ids = _get_grids_for_model(model_name)
else:
grid_ids = [grid_id]
num_grids = len(grid_ids)
x_points_by_grid_metres = [numpy.array([])] * num_grids
y_points_by_grid_metres = [numpy.array([])] * num_grids
query_point_table_by_grid = [pandas.DataFrame()] * num_grids
for g in range(num_grids):
(query_point_table_by_grid[g], interp_table, metadata_dict
) = _prep_to_interp_nwp_from_xy_grid(
query_point_table=copy.deepcopy(query_point_table),
model_name=model_name, grid_id=grid_ids[g], field_names=field_names,
field_names_grib1=field_names_grib1)
x_points_by_grid_metres[g] = metadata_dict[GRID_POINT_X_KEY]
y_points_by_grid_metres[g] = metadata_dict[GRID_POINT_Y_KEY]
rotate_wind_flags = metadata_dict[ROTATE_WIND_FLAGS_KEY]
field_names_other_wind_component_grib1 = metadata_dict[
FIELD_NAMES_OTHER_COMPONENT_KEY]
other_wind_component_indices = metadata_dict[
OTHER_WIND_COMPONENT_INDICES_KEY]
rotation_sine_by_query_point = metadata_dict[ROTATION_SINES_KEY]
rotation_cosine_by_query_point = metadata_dict[ROTATION_COSINES_KEY]
_, init_time_step_hours = nwp_model_utils.get_time_steps(model_name)
init_times_unix_sec, query_to_model_times_table = (
nwp_model_utils.get_times_needed_for_interp(
query_times_unix_sec=query_point_table[QUERY_TIME_COLUMN].values,
model_time_step_hours=init_time_step_hours,
method_string=temporal_interp_method_string)
)
num_init_times = len(init_times_unix_sec)
num_query_time_ranges = len(query_to_model_times_table.index)
num_fields = len(field_names)
interp_done_by_field = numpy.full(num_fields, False, dtype=bool)
for j in range(num_fields):
if interp_done_by_field[j]:
continue
list_of_2d_grids = [None] * num_init_times
list_of_2d_grids_other_wind_component = [None] * num_init_times
for i in range(num_query_time_ranges):
if i == num_query_time_ranges - 1:
query_indices_in_this_range = numpy.where(
query_point_table[QUERY_TIME_COLUMN].values >=
query_to_model_times_table[
nwp_model_utils.MIN_QUERY_TIME_COLUMN].values[-1]
)[0]
else:
query_indices_in_this_range = numpy.where(numpy.logical_and(
query_point_table[QUERY_TIME_COLUMN].values >=
query_to_model_times_table[
nwp_model_utils.MIN_QUERY_TIME_COLUMN].values[i],
query_point_table[QUERY_TIME_COLUMN].values <
query_to_model_times_table[
nwp_model_utils.MAX_QUERY_TIME_COLUMN].values[i]
))[0]
(list_of_2d_grids, list_of_2d_grids_other_wind_component,
missing_data
) = _read_nwp_for_interp_any_grid(
init_times_unix_sec=init_times_unix_sec,
query_to_model_times_row=query_to_model_times_table.iloc[[i]],
field_name_grib1=field_names_grib1[j],
field_name_other_wind_component_grib1=
field_names_other_wind_component_grib1[j],
list_of_model_grids=list_of_2d_grids,
list_of_model_grids_other_wind_component=
list_of_2d_grids_other_wind_component, model_name=model_name,
top_grib_directory_name=top_grib_directory_name,
wgrib_exe_name=wgrib_exe_name,
wgrib2_exe_name=wgrib2_exe_name,
raise_error_if_missing=raise_error_if_missing)
if missing_data:
print('labeled_missing_data!!',i)
continue
list_of_spatial_interp_arrays = [numpy.array([])] * num_init_times
list_of_sinterp_arrays_other_wind_component = (
[numpy.array([])] * num_init_times)
init_time_needed_indices = numpy.where(
query_to_model_times_table[
nwp_model_utils.MODEL_TIMES_NEEDED_COLUMN
].values[i])[0]
for t in init_time_needed_indices:
this_grid_id = nwp_model_utils.dimensions_to_grid(
num_rows=list_of_2d_grids[t].shape[0],
num_columns=list_of_2d_grids[t].shape[1]
)
this_grid_index = grid_ids.index(this_grid_id)
list_of_spatial_interp_arrays[
t
] = interp_from_xy_grid_to_points(
input_matrix=list_of_2d_grids[t],
sorted_grid_point_x_metres=x_points_by_grid_metres[
this_grid_index],
sorted_grid_point_y_metres=y_points_by_grid_metres[
this_grid_index],
query_x_coords_metres=query_point_table_by_grid[
this_grid_index][QUERY_X_COLUMN].values[
query_indices_in_this_range],
query_y_coords_metres=query_point_table_by_grid[
this_grid_index][QUERY_Y_COLUMN].values[
query_indices_in_this_range],
method_string=spatial_interp_method_string,
spline_degree=spline_degree, extrapolate=True)
if rotate_wind_flags[j]:
list_of_sinterp_arrays_other_wind_component[
t
] = interp_from_xy_grid_to_points(
input_matrix=list_of_2d_grids_other_wind_component[t],
sorted_grid_point_x_metres=x_points_by_grid_metres[
this_grid_index],
sorted_grid_point_y_metres=y_points_by_grid_metres[
this_grid_index],
query_x_coords_metres=query_point_table_by_grid[
this_grid_index][QUERY_X_COLUMN].values[
query_indices_in_this_range],
query_y_coords_metres=query_point_table_by_grid[
this_grid_index][QUERY_Y_COLUMN].values[
query_indices_in_this_range],
method_string=spatial_interp_method_string,
spline_degree=spline_degree, extrapolate=True)
if grib_io.is_u_wind_field(field_names_grib1[j]):
(list_of_spatial_interp_arrays[t],
list_of_sinterp_arrays_other_wind_component[t]
) = nwp_model_utils.rotate_winds_to_earth_relative(
u_winds_grid_relative_m_s01=
list_of_spatial_interp_arrays[t],
v_winds_grid_relative_m_s01=
list_of_sinterp_arrays_other_wind_component[t],
| |
"""
@author: <NAME> <<EMAIL>>
"""
from array import array
import unittest
from genty import genty, genty_dataset
from auditok.io import (
AudioParameterError,
BufferAudioSource,
RawAudioSource,
WaveAudioSource,
)
from auditok.signal import FORMAT
from test_util import PURE_TONE_DICT, _sample_generator
def audio_source_read_all_gen(audio_source, size=None):
if size is None:
size = int(audio_source.sr * 0.1) # 100ms
while True:
data = audio_source.read(size)
if data is None:
break
yield data
@genty
class TestAudioSource(unittest.TestCase):
# TODO when use_channel is None, return samples from all channels
@genty_dataset(
mono=("mono_400Hz", (400,)),
multichannel=("3channel_400-800-1600Hz", (400, 800, 1600)),
)
def test_BufferAudioSource_read_all(self, file_suffix, frequencies):
file = "tests/data/test_16KHZ_{}.raw".format(file_suffix)
with open(file, "rb") as fp:
expected = fp.read()
channels = len(frequencies)
audio_source = BufferAudioSource(expected, 16000, 2, channels)
audio_source.open()
data = audio_source.read(None)
self.assertEqual(data, expected)
audio_source.rewind()
data = audio_source.read(-10)
self.assertEqual(data, expected)
audio_source.close()
@genty_dataset(
mono=("mono_400Hz", (400,)),
multichannel=("3channel_400-800-1600Hz", (400, 800, 1600)),
)
def test_RawAudioSource(self, file_suffix, frequencies):
file = "tests/data/test_16KHZ_{}.raw".format(file_suffix)
channels = len(frequencies)
audio_source = RawAudioSource(file, 16000, 2, channels)
audio_source.open()
data_read_all = b"".join(audio_source_read_all_gen(audio_source))
audio_source.close()
mono_channels = [PURE_TONE_DICT[freq] for freq in frequencies]
fmt = FORMAT[audio_source.sample_width]
expected = array(fmt, _sample_generator(*mono_channels)).tobytes()
self.assertEqual(data_read_all, expected)
# assert read all data with None
audio_source = RawAudioSource(file, 16000, 2, channels)
audio_source.open()
data_read_all = audio_source.read(None)
audio_source.close()
self.assertEqual(data_read_all, expected)
# assert read all data with a negative size
audio_source = RawAudioSource(file, 16000, 2, channels)
audio_source.open()
data_read_all = audio_source.read(-10)
audio_source.close()
self.assertEqual(data_read_all, expected)
@genty_dataset(
mono=("mono_400Hz", (400,)),
multichannel=("3channel_400-800-1600Hz", (400, 800, 1600)),
)
def test_WaveAudioSource(self, file_suffix, frequencies):
file = "tests/data/test_16KHZ_{}.wav".format(file_suffix)
audio_source = WaveAudioSource(file)
audio_source.open()
data = b"".join(audio_source_read_all_gen(audio_source))
audio_source.close()
mono_channels = [PURE_TONE_DICT[freq] for freq in frequencies]
fmt = FORMAT[audio_source.sample_width]
expected = array(fmt, _sample_generator(*mono_channels)).tobytes()
self.assertEqual(data, expected)
# assert read all data with None
audio_source = WaveAudioSource(file)
audio_source.open()
data_read_all = audio_source.read(None)
audio_source.close()
self.assertEqual(data_read_all, expected)
# assert read all data with a negative size
audio_source = WaveAudioSource(file)
audio_source.open()
data_read_all = audio_source.read(-10)
audio_source.close()
self.assertEqual(data_read_all, expected)
@genty
class TestBufferAudioSource_SR10_SW1_CH1(unittest.TestCase):
def setUp(self):
self.data = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ012345"
self.audio_source = BufferAudioSource(
data=self.data, sampling_rate=10, sample_width=1, channels=1
)
self.audio_source.open()
def tearDown(self):
self.audio_source.close()
def test_sr10_sw1_ch1_read_1(self):
block = self.audio_source.read(1)
exp = b"A"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
def test_sr10_sw1_ch1_read_6(self):
block = self.audio_source.read(6)
exp = b"ABCDEF"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
def test_sr10_sw1_ch1_read_multiple(self):
block = self.audio_source.read(1)
exp = b"A"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
block = self.audio_source.read(6)
exp = b"BCDEFG"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
block = self.audio_source.read(13)
exp = b"HIJKLMNOPQRST"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
block = self.audio_source.read(9999)
exp = b"UVWXYZ012345"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
def test_sr10_sw1_ch1_read_all(self):
block = self.audio_source.read(9999)
self.assertEqual(
block,
self.data,
msg="wrong block, expected: {}, found: {} ".format(
self.data, block
),
)
block = self.audio_source.read(1)
self.assertEqual(
block,
None,
msg="wrong block, expected: {}, found: {} ".format(None, block),
)
def test_sr10_sw1_ch1_sampling_rate(self):
srate = self.audio_source.sampling_rate
self.assertEqual(
srate,
10,
msg="wrong sampling rate, expected: 10, found: {0} ".format(srate),
)
def test_sr10_sw1_ch1_sample_width(self):
swidth = self.audio_source.sample_width
self.assertEqual(
swidth,
1,
msg="wrong sample width, expected: 1, found: {0} ".format(swidth),
)
def test_sr10_sw1_ch1_channels(self):
channels = self.audio_source.channels
self.assertEqual(
channels,
1,
msg="wrong number of channels, expected: 1, found: {0} ".format(
channels
),
)
@genty_dataset(
empty=([], 0, 0, 0),
zero=([0], 0, 0, 0),
five=([5], 5, 0.5, 500),
multiple=([5, 20], 25, 2.5, 2500),
)
def test_position(
self, block_sizes, expected_sample, expected_second, expected_ms
):
for block_size in block_sizes:
self.audio_source.read(block_size)
position = self.audio_source.position
self.assertEqual(
position,
expected_sample,
msg="wrong stream position, expected: {}, found: {}".format(
expected_sample, position
),
)
position_s = self.audio_source.position_s
self.assertEqual(
position_s,
expected_second,
msg="wrong stream position_s, expected: {}, found: {}".format(
expected_second, position_s
),
)
position_ms = self.audio_source.position_ms
self.assertEqual(
position_ms,
expected_ms,
msg="wrong stream position_s, expected: {}, found: {}".format(
expected_ms, position_ms
),
)
@genty_dataset(
zero=(0, 0, 0, 0),
one=(1, 1, 0.1, 100),
ten=(10, 10, 1, 1000),
negative_1=(-1, 31, 3.1, 3100),
negative_2=(-7, 25, 2.5, 2500),
)
def test_position_setter(
self, position, expected_sample, expected_second, expected_ms
):
self.audio_source.position = position
position = self.audio_source.position
self.assertEqual(
position,
expected_sample,
msg="wrong stream position, expected: {}, found: {}".format(
expected_sample, position
),
)
position_s = self.audio_source.position_s
self.assertEqual(
position_s,
expected_second,
msg="wrong stream position_s, expected: {}, found: {}".format(
expected_second, position_s
),
)
position_ms = self.audio_source.position_ms
self.assertEqual(
position_ms,
expected_ms,
msg="wrong stream position_s, expected: {}, found: {}".format(
expected_ms, position_ms
),
)
@genty_dataset(
zero=(0, 0, 0, 0),
one=(0.1, 1, 0.1, 100),
ten=(1, 10, 1, 1000),
negative_1=(-0.1, 31, 3.1, 3100),
negative_2=(-0.7, 25, 2.5, 2500),
)
def test_position_s_setter(
self, position_s, expected_sample, expected_second, expected_ms
):
self.audio_source.position_s = position_s
position = self.audio_source.position
self.assertEqual(
position,
expected_sample,
msg="wrong stream position, expected: {}, found: {}".format(
expected_sample, position
),
)
position_s = self.audio_source.position_s
self.assertEqual(
position_s,
expected_second,
msg="wrong stream position_s, expected: {}, found: {}".format(
expected_second, position_s
),
)
position_ms = self.audio_source.position_ms
self.assertEqual(
position_ms,
expected_ms,
msg="wrong stream position_s, expected: {}, found: {}".format(
expected_ms, position_ms
),
)
@genty_dataset(
zero=(0, 0, 0, 0),
one=(100, 1, 0.1, 100),
ten=(1000, 10, 1, 1000),
negative_1=(-100, 31, 3.1, 3100),
negative_2=(-700, 25, 2.5, 2500),
)
def test_position_ms_setter(
self, position_ms, expected_sample, expected_second, expected_ms
):
self.audio_source.position_ms = position_ms
position = self.audio_source.position
self.assertEqual(
position,
expected_sample,
msg="wrong stream position, expected: {}, found: {}".format(
expected_sample, position
),
)
position_s = self.audio_source.position_s
self.assertEqual(
position_s,
expected_second,
msg="wrong stream position_s, expected: {}, found: {}".format(
expected_second, position_s
),
)
position_ms = self.audio_source.position_ms
self.assertEqual(
position_ms,
expected_ms,
msg="wrong stream position_s, expected: {}, found: {}".format(
expected_ms, position_ms
),
)
@genty_dataset(positive=((100,)), negative=(-100,))
def test_position_setter_out_of_range(self, position):
with self.assertRaises(IndexError):
self.audio_source.position = position
@genty_dataset(positive=((100,)), negative=(-100,))
def test_position_s_setter_out_of_range(self, position_s):
with self.assertRaises(IndexError):
self.audio_source.position_s = position_s
@genty_dataset(positive=((10000,)), negative=(-10000,))
def test_position_ms_setter_out_of_range(self, position_ms):
with self.assertRaises(IndexError):
self.audio_source.position_ms = position_ms
def test_sr10_sw1_ch1_initial_position_s_0(self):
tp = self.audio_source.position_s
self.assertEqual(
tp,
0.0,
msg="wrong time position, expected: 0.0, found: {0} ".format(tp),
)
def test_sr10_sw1_ch1_position_s_1_after_read(self):
srate = self.audio_source.sampling_rate
# read one second
self.audio_source.read(srate)
tp = self.audio_source.position_s
self.assertEqual(
tp,
1.0,
msg="wrong time position, expected: 1.0, found: {0} ".format(tp),
)
def test_sr10_sw1_ch1_position_s_2_5(self):
# read 2.5 seconds
self.audio_source.read(25)
tp = self.audio_source.position_s
self.assertEqual(
tp,
2.5,
msg="wrong time position, expected: 2.5, found: {0} ".format(tp),
)
def test_sr10_sw1_ch1_position_s_0(self):
self.audio_source.read(10)
self.audio_source.position_s = 0
tp = self.audio_source.position_s
self.assertEqual(
tp,
0.0,
msg="wrong time position, expected: 0.0, found: {0} ".format(tp),
)
def test_sr10_sw1_ch1_position_s_1(self):
self.audio_source.position_s = 1
tp = self.audio_source.position_s
self.assertEqual(
tp,
1.0,
msg="wrong time position, expected: 1.0, found: {0} ".format(tp),
)
def test_sr10_sw1_ch1_rewind(self):
self.audio_source.read(10)
self.audio_source.rewind()
tp = self.audio_source.position
self.assertEqual(
tp, 0, msg="wrong position, expected: 0.0, found: {0} ".format(tp)
)
def test_sr10_sw1_ch1_read_closed(self):
self.audio_source.close()
with self.assertRaises(Exception):
self.audio_source.read(1)
@genty
class TestBufferAudioSource_SR16_SW2_CH1(unittest.TestCase):
def setUp(self):
self.data = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ012345"
self.audio_source = BufferAudioSource(
data=self.data, sampling_rate=16, sample_width=2, channels=1
)
self.audio_source.open()
def tearDown(self):
self.audio_source.close()
def test_sr16_sw2_ch1_read_1(self):
block = self.audio_source.read(1)
exp = b"AB"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
def test_sr16_sw2_ch1_read_6(self):
block = self.audio_source.read(6)
exp = b"ABCDEFGHIJKL"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
def test_sr16_sw2_ch1_read_multiple(self):
block = self.audio_source.read(1)
exp = b"AB"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
block = self.audio_source.read(6)
exp = b"CDEFGHIJKLMN"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
block = self.audio_source.read(5)
exp = b"OPQRSTUVWX"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
block = self.audio_source.read(9999)
exp = b"YZ012345"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
def test_sr16_sw2_ch1_read_all(self):
block = self.audio_source.read(9999)
self.assertEqual(
block,
self.data,
msg="wrong block, expected: {0}, found: {1} ".format(
self.data, block
),
)
block = self.audio_source.read(1)
self.assertEqual(
block,
None,
msg="wrong block, expected: {0}, found: {1} ".format(None, block),
)
def test_sr16_sw2_ch1_sampling_rate(self):
srate = self.audio_source.sampling_rate
self.assertEqual(
srate,
16,
msg="wrong sampling rate, expected: 10, found: {0} ".format(srate),
)
def test_sr16_sw2_ch1_sample_width(self):
swidth = self.audio_source.sample_width
self.assertEqual(
swidth,
2,
msg="wrong sample width, expected: 1, found: {0} ".format(swidth),
)
def test_sr16_sw2_ch1_channels(self):
channels = self.audio_source.channels
self.assertEqual(
channels,
1,
msg="wrong number of channels, expected: 1, found: {0} ".format(
channels
),
)
@genty_dataset(
empty=([], 0, 0, 0),
zero=([0], 0, 0, 0),
two=([2], 2, 2 / 16, int(2000 / 16)),
eleven=([11], 11, 11 / 16, int(11 * 1000 / 16)),
multiple=([4, 8], 12, 0.75, 750),
)
def test_position(
self, block_sizes, expected_sample, expected_second, expected_ms
):
for block_size in block_sizes:
self.audio_source.read(block_size)
position = self.audio_source.position
self.assertEqual(
position,
expected_sample,
msg="wrong stream position, expected: {}, found: {}".format(
expected_sample, position
),
)
position_s = self.audio_source.position_s
self.assertEqual(
position_s,
expected_second,
msg="wrong stream position_s, expected: {}, found: {}".format(
expected_second, | |
return parse_float_vector(self._raw_data.get('flare05_sizes', ""))
@property
def Flare05_color(self):
return parse_int_vector(self._raw_data.get('flare05_color', ""))
@property
def Flare06_texture(self):
return self._raw_data.get('flare06_texture', "")
@property
def Flare06_params(self):
return self._raw_data.get('flare06_params', "")
@property
def Flare06_intensity(self):
return parse_float_vector(self._raw_data.get('flare06_intensity', ""))
@property
def Flare06_sizes(self):
return parse_float_vector(self._raw_data.get('flare06_sizes', ""))
@property
def Flare06_color(self):
return parse_int_vector(self._raw_data.get('flare06_color', ""))
@property
def Flare07_texture(self):
return self._raw_data.get('flare07_texture', "")
@property
def Flare07_params(self):
return self._raw_data.get('flare07_params', "")
@property
def Flare07_intensity(self):
return parse_float_vector(self._raw_data.get('flare07_intensity', ""))
@property
def Flare07_sizes(self):
return parse_float_vector(self._raw_data.get('flare07_sizes', ""))
@property
def Flare07_color(self):
return parse_int_vector(self._raw_data.get('flare07_color', ""))
@property
def Flare08_texture(self):
return self._raw_data.get('flare08_texture', "")
@property
def Flare08_params(self):
return self._raw_data.get('flare08_params', "")
@property
def Flare08_intensity(self):
return parse_float_vector(self._raw_data.get('flare08_intensity', ""))
@property
def Flare08_sizes(self):
return parse_float_vector(self._raw_data.get('flare08_sizes', ""))
@property
def Flare08_color(self):
return parse_int_vector(self._raw_data.get('flare08_color', ""))
@property
def Flare09_texture(self):
return self._raw_data.get('flare09_texture', "")
@property
def Flare09_params(self):
return self._raw_data.get('flare09_params', "")
@property
def Flare09_intensity(self):
return parse_float_vector(self._raw_data.get('flare09_intensity', ""))
@property
def Flare09_sizes(self):
return parse_float_vector(self._raw_data.get('flare09_sizes', ""))
@property
def Flare09_color(self):
return parse_int_vector(self._raw_data.get('flare09_color', ""))
@property
def Flare10_texture(self):
return self._raw_data.get('flare10_texture', "")
@property
def Flare10_params(self):
return self._raw_data.get('flare10_params', "")
@property
def Flare10_intensity(self):
return parse_float_vector(self._raw_data.get('flare10_intensity', ""))
@property
def Flare10_sizes(self):
return parse_float_vector(self._raw_data.get('flare10_sizes', ""))
@property
def Flare10_color(self):
return parse_int_vector(self._raw_data.get('flare10_color', ""))
@property
def Flare11_texture(self):
return self._raw_data.get('flare11_texture', "")
@property
def Flare11_params(self):
return self._raw_data.get('flare11_params', "")
@property
def Flare11_intensity(self):
return parse_float_vector(self._raw_data.get('flare11_intensity', ""))
@property
def Flare11_sizes(self):
return parse_float_vector(self._raw_data.get('flare11_sizes', ""))
@property
def Flare11_color(self):
return parse_int_vector(self._raw_data.get('flare11_color', ""))
@property
def Flare12_texture(self):
return self._raw_data.get('flare12_texture', "")
@property
def Flare12_params(self):
return self._raw_data.get('flare12_params', "")
@property
def Flare12_intensity(self):
return parse_float_vector(self._raw_data.get('flare12_intensity', ""))
@property
def Flare12_sizes(self):
return parse_float_vector(self._raw_data.get('flare12_sizes', ""))
@property
def Flare12_color(self):
return parse_int_vector(self._raw_data.get('flare12_color', ""))
@property
def Flare13_texture(self):
return self._raw_data.get('flare13_texture', "")
@property
def Flare13_params(self):
return self._raw_data.get('flare13_params', "")
@property
def Flare13_intensity(self):
return parse_float_vector(self._raw_data.get('flare13_intensity', ""))
@property
def Flare13_sizes(self):
return parse_float_vector(self._raw_data.get('flare13_sizes', ""))
@property
def Flare13_color(self):
return parse_int_vector(self._raw_data.get('flare13_color', ""))
@property
def Flare14_texture(self):
return self._raw_data.get('flare14_texture', "")
@property
def Flare14_params(self):
return self._raw_data.get('flare14_params', "")
@property
def Flare14_intensity(self):
return parse_float_vector(self._raw_data.get('flare14_intensity', ""))
@property
def Flare14_sizes(self):
return parse_float_vector(self._raw_data.get('flare14_sizes', ""))
@property
def Flare14_color(self):
return parse_int_vector(self._raw_data.get('flare14_color', ""))
@property
def Flare15_texture(self):
return self._raw_data.get('flare15_texture', "")
@property
def Flare15_params(self):
return self._raw_data.get('flare15_params', "")
@property
def Flare15_intensity(self):
return parse_float_vector(self._raw_data.get('flare15_intensity', ""))
@property
def Flare15_sizes(self):
return parse_float_vector(self._raw_data.get('flare15_sizes', ""))
@property
def Flare15_color(self):
return parse_int_vector(self._raw_data.get('flare15_color', ""))
@property
def Flare16_texture(self):
return self._raw_data.get('flare16_texture', "")
@property
def Flare16_params(self):
return self._raw_data.get('flare16_params', "")
@property
def Flare16_intensity(self):
return parse_float_vector(self._raw_data.get('flare16_intensity', ""))
@property
def Flare16_sizes(self):
return parse_float_vector(self._raw_data.get('flare16_sizes', ""))
@property
def Flare16_color(self):
return parse_int_vector(self._raw_data.get('flare16_color', ""))
@property
def Flare17_texture(self):
return self._raw_data.get('flare17_texture', "")
@property
def Flare17_params(self):
return self._raw_data.get('flare17_params', "")
@property
def Flare17_intensity(self):
return parse_float_vector(self._raw_data.get('flare17_intensity', ""))
@property
def Flare17_sizes(self):
return parse_float_vector(self._raw_data.get('flare17_sizes', ""))
@property
def Flare17_color(self):
return parse_int_vector(self._raw_data.get('flare17_color', ""))
@property
def Flare18_texture(self):
return self._raw_data.get('flare18_texture', "")
@property
def Flare18_params(self):
return self._raw_data.get('flare18_params', "")
@property
def Flare18_intensity(self):
return parse_float_vector(self._raw_data.get('flare18_intensity', ""))
@property
def Flare18_sizes(self):
return parse_float_vector(self._raw_data.get('flare18_sizes', ""))
@property
def Flare18_color(self):
return parse_int_vector(self._raw_data.get('flare18_color', ""))
@property
def Flare19_texture(self):
return self._raw_data.get('flare19_texture', "")
@property
def Flare19_params(self):
return self._raw_data.get('flare19_params', "")
@property
def Flare19_intensity(self):
return parse_float_vector(self._raw_data.get('flare19_intensity', ""))
@property
def Flare19_sizes(self):
return parse_float_vector(self._raw_data.get('flare19_sizes', ""))
@property
def Flare19_color(self):
return parse_int_vector(self._raw_data.get('flare19_color', ""))
@property
def Flare20_texture(self):
return self._raw_data.get('flare20_texture', "")
@property
def Flare20_params(self):
return self._raw_data.get('flare20_params', "")
@property
def Flare20_intensity(self):
return parse_float_vector(self._raw_data.get('flare20_intensity', ""))
@property
def Flare20_sizes(self):
return parse_float_vector(self._raw_data.get('flare20_sizes', ""))
@property
def Flare20_color(self):
return parse_int_vector(self._raw_data.get('flare20_color', ""))
class env_fumer(EnableDisable, Targetname, Parentname, Angles, BaseFadeProp, Studiomodel):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def DetectionRadius(self):
return parse_source_value(self._raw_data.get('detectionradius', 128))
@property
def ExplodeRaius(self):
return parse_source_value(self._raw_data.get('exploderaius', 128))
@property
def ExplodeDmg(self):
return parse_source_value(self._raw_data.get('explodedmg', 30))
@property
def ExplodeForce(self):
return parse_source_value(self._raw_data.get('explodeforce', 1))
@property
def FlameTime(self):
return parse_source_value(self._raw_data.get('flametime', 10))
class trigger_apply_impulse(Trigger):
@property
def impulse_dir(self):
return parse_float_vector(self._raw_data.get('impulse_dir', "0 0 0"))
@property
def force(self):
return parse_source_value(self._raw_data.get('force', 300))
class info_nihilanth_summon(Angles, Targetname, Parentname):
icon_sprite = "editor/info_target.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class point_weaponstrip(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def Weapon(self):
return self._raw_data.get('weapon', "2")
class misc_marionettist(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def innerdestinationradius(self):
return parse_source_value(self._raw_data.get('innerdestinationradius', 32))
@property
def innerpullspeed(self):
return parse_source_value(self._raw_data.get('innerpullspeed', 448))
@property
def outerdestinationradius(self):
return parse_source_value(self._raw_data.get('outerdestinationradius', 128))
@property
def outerpullspeed(self):
return parse_source_value(self._raw_data.get('outerpullspeed', 512))
@property
def ignorecollisions(self):
return self._raw_data.get('ignorecollisions', "0")
@property
def target01(self):
return self._raw_data.get('target01', "")
@property
def target02(self):
return self._raw_data.get('target02', "")
@property
def target03(self):
return self._raw_data.get('target03', "")
@property
def target04(self):
return self._raw_data.get('target04', "")
@property
def target05(self):
return self._raw_data.get('target05', "")
@property
def target06(self):
return self._raw_data.get('target06', "")
@property
def target07(self):
return self._raw_data.get('target07', "")
@property
def target08(self):
return self._raw_data.get('target08', "")
@property
def target09(self):
return self._raw_data.get('target09', "")
@property
def target10(self):
return self._raw_data.get('target10', "")
@property
def target11(self):
return self._raw_data.get('target11', "")
@property
def target12(self):
return self._raw_data.get('target12', "")
@property
def target13(self):
return self._raw_data.get('target13', "")
@property
def target14(self):
return self._raw_data.get('target14', "")
@property
def target15(self):
return self._raw_data.get('target15', "")
@property
def target16(self):
return self._raw_data.get('target16', "")
@property
def soundscriptstart(self):
return self._raw_data.get('soundscriptstart', "")
@property
def soundscriptloop(self):
return self._raw_data.get('soundscriptloop', "")
@property
def soundscriptend(self):
return self._raw_data.get('soundscriptend', "")
class misc_xen_healing_pylon(Angles, Targetname, Studiomodel):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def max_health(self):
return parse_source_value(self._raw_data.get('max_health', 200))
@property
def can_be_damaged_only_healing(self):
return self._raw_data.get('can_be_damaged_only_healing', "0")
@property
def danger_recovering_duration(self):
return parse_source_value(self._raw_data.get('danger_recovering_duration', 5.0))
@property
def healing_request_duration(self):
return parse_source_value(self._raw_data.get('healing_request_duration', 5.0))
@property
def healing_request_hp_per_tick(self):
return parse_source_value(self._raw_data.get('healing_request_hp_per_tick', 16))
@property
def healing_request_tick_delta(self):
return parse_source_value(self._raw_data.get('healing_request_tick_delta', 0.125))
@property
def healing_beam_attachment_name(self):
return self._raw_data.get('healing_beam_attachment_name', "")
@property
def healing_beam_spread_radius(self):
return parse_source_value(self._raw_data.get('healing_beam_spread_radius', 16.0))
@property
def healing_beam_sprite_model(self):
return self._raw_data.get('healing_beam_sprite_model', "sprites/rollermine_shock.vmt")
@property
def healing_beam_noise_amplitude(self):
return parse_source_value(self._raw_data.get('healing_beam_noise_amplitude', 4.0))
@property
def healing_beam_starting_width(self):
return parse_source_value(self._raw_data.get('healing_beam_starting_width', 8.0))
@property
def healing_beam_ending_width(self):
return parse_source_value(self._raw_data.get('healing_beam_ending_width', 32.0))
@property
def healing_beam_color(self):
return parse_int_vector(self._raw_data.get('healing_beam_color', "255 255 255 255"))
@property
def healing_beam_starting_pfx(self):
return self._raw_data.get('healing_beam_starting_pfx', "gloun_zap")
@property
def healing_beam_ending_pfx(self):
return self._raw_data.get('healing_beam_ending_pfx', "gloun_zap")
@property
def pylon_sequence_opening(self):
return self._raw_data.get('pylon_sequence_opening', "deploy")
@property
def pylon_sequence_opened_idle(self):
return self._raw_data.get('pylon_sequence_opened_idle', "idle_deploy")
@property
def pylon_sequence_closing(self):
return self._raw_data.get('pylon_sequence_closing', "retract")
@property
def pylon_sequence_closed_idle(self):
return self._raw_data.get('pylon_sequence_closed_idle', "idle_retract")
@property
def pylon_sequence_dying(self):
return self._raw_data.get('pylon_sequence_dying', "explode")
@property
def pylon_sequence_died_idle(self):
return self._raw_data.get('pylon_sequence_died_idle', "idle_explode")
@property
def trace_targetname_filter(self):
return self._raw_data.get('trace_targetname_filter', "")
class misc_xen_shield(Angles, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def panel_modelname_template(self):
return self._raw_data.get('panel_modelname_template', "models/xenians/shield/pentagonal.hexecontahedron/nihilanth/panel.%03d.mdl")
@property
def panels_amount(self):
return parse_source_value(self._raw_data.get('panels_amount', 60))
@property
def max_health_for_panel(self):
return parse_source_value(self._raw_data.get('max_health_for_panel', 75))
@property
def max_health(self):
return parse_source_value(self._raw_data.get('max_health', 3000))
@property
def healing_per_tick_for_panel(self):
return parse_source_value(self._raw_data.get('healing_per_tick_for_panel', 2))
@property
def healing_tick_delta_for_panel(self):
return parse_source_value(self._raw_data.get('healing_tick_delta_for_panel', 0.125))
@property
def healing_request_cooldown(self):
return parse_source_value(self._raw_data.get('healing_request_cooldown', 7.5))
@property
def hp_amount_to_request_heal(self):
return parse_source_value(self._raw_data.get('hp_amount_to_request_heal', 0.85))
@property
def pylon01(self):
return self._raw_data.get('pylon01', "")
@property
def pylon02(self):
return self._raw_data.get('pylon02', "")
@property
def pylon03(self):
return self._raw_data.get('pylon03', "")
@property
def pylon04(self):
return self._raw_data.get('pylon04', "")
@property
def pylon05(self):
return self._raw_data.get('pylon05', "")
@property
def pylon06(self):
return self._raw_data.get('pylon06', "")
@property
def pylon07(self):
return self._raw_data.get('pylon07', "")
@property
def pylon08(self):
return self._raw_data.get('pylon08', "")
@property
def pylon09(self):
return self._raw_data.get('pylon09', "")
@property
def pylon10(self):
return self._raw_data.get('pylon10', "")
@property
def pylon11(self):
return self._raw_data.get('pylon11', "")
@property
def pylon12(self):
return self._raw_data.get('pylon12', "")
@property
def pylon13(self):
return self._raw_data.get('pylon13', "")
@property
def pylon14(self):
return self._raw_data.get('pylon14', "")
@property
def pylon15(self):
return self._raw_data.get('pylon15', "")
@property
def pylon16(self):
return self._raw_data.get('pylon16', "")
@property
def angular_velocity_value01(self):
return parse_float_vector(self._raw_data.get('angular_velocity_value01', "5.0 30.0 15.0"))
@property
def angular_velocity_value02(self):
return parse_float_vector(self._raw_data.get('angular_velocity_value02', "-25.0 45.0 -5.0"))
@property
def angular_velocity_value03(self):
return parse_float_vector(self._raw_data.get('angular_velocity_value03', "5.0 60.0 15.0"))
@property
def angular_velocity_value04(self):
return parse_float_vector(self._raw_data.get('angular_velocity_value04', "25.0 45.0 0.0"))
@property
def angular_velocity_value05(self):
return parse_float_vector(self._raw_data.get('angular_velocity_value05', "-5.0 15.0 -15.0"))
@property
def angular_velocity_value06(self):
return parse_float_vector(self._raw_data.get('angular_velocity_value06', ""))
@property
def angular_velocity_value07(self):
return parse_float_vector(self._raw_data.get('angular_velocity_value07', ""))
@property
def angular_velocity_value08(self):
return parse_float_vector(self._raw_data.get('angular_velocity_value08', ""))
@property
def angular_velocity_value09(self):
return parse_float_vector(self._raw_data.get('angular_velocity_value09', ""))
@property
def angular_velocity_value10(self):
return parse_float_vector(self._raw_data.get('angular_velocity_value10', ""))
@property
def angular_velocity_value11(self):
return parse_float_vector(self._raw_data.get('angular_velocity_value11', ""))
@property
def angular_velocity_value12(self):
return parse_float_vector(self._raw_data.get('angular_velocity_value12', ""))
@property
def angular_velocity_value13(self):
return parse_float_vector(self._raw_data.get('angular_velocity_value13', ""))
@property
def angular_velocity_value14(self):
return parse_float_vector(self._raw_data.get('angular_velocity_value14', ""))
@property
def angular_velocity_value15(self):
return parse_float_vector(self._raw_data.get('angular_velocity_value15', ""))
@property
def angular_velocity_value16(self):
return parse_float_vector(self._raw_data.get('angular_velocity_value16', ""))
@property
def angular_velocity_values_used(self):
return parse_source_value(self._raw_data.get('angular_velocity_values_used', 5))
@property
def health_color01(self):
return parse_float_vector(self._raw_data.get('health_color01', "1.0 0.0 0.0"))
@property
def health_color02(self):
return parse_float_vector(self._raw_data.get('health_color02', "1.0 1.0 0.0"))
@property
def health_color03(self):
return parse_float_vector(self._raw_data.get('health_color03', "0.0 1.0 0.0"))
@property
def health_color04(self):
return parse_float_vector(self._raw_data.get('health_color04', "0.0 1.0 1.0"))
@property
def health_color05(self):
return parse_float_vector(self._raw_data.get('health_color05', "0.0 0.0 1.0"))
@property
def health_color06(self):
return parse_float_vector(self._raw_data.get('health_color06', ""))
@property
def health_color07(self):
return parse_float_vector(self._raw_data.get('health_color07', ""))
@property
def health_color08(self):
return parse_float_vector(self._raw_data.get('health_color08', ""))
@property
def health_color09(self):
return parse_float_vector(self._raw_data.get('health_color09', ""))
@property
def health_color10(self):
return parse_float_vector(self._raw_data.get('health_color10', ""))
@property
def health_color11(self):
return parse_float_vector(self._raw_data.get('health_color11', ""))
@property
def health_color12(self):
return parse_float_vector(self._raw_data.get('health_color12', ""))
@property
def health_color13(self):
return parse_float_vector(self._raw_data.get('health_color13', ""))
@property
def health_color14(self):
return parse_float_vector(self._raw_data.get('health_color14', ""))
@property
def health_color15(self):
return parse_float_vector(self._raw_data.get('health_color15', ""))
@property
def health_color16(self):
return parse_float_vector(self._raw_data.get('health_color16', ""))
@property
def health_colors_used(self):
return parse_source_value(self._raw_data.get('health_colors_used', 5))
@property
def intro_for_panel_minimum(self):
return parse_source_value(self._raw_data.get('intro_for_panel_minimum', 2.5))
@property
def intro_for_panel_maximum(self):
return parse_source_value(self._raw_data.get('intro_for_panel_maximum', 5.0))
@property
def pause_for_panel_minimum(self):
return parse_source_value(self._raw_data.get('pause_for_panel_minimum', 0.5))
@property
def pause_for_panel_maximum(self):
return parse_source_value(self._raw_data.get('pause_for_panel_maximum', 2.5))
@property
def death_for_panel(self):
return parse_source_value(self._raw_data.get('death_for_panel', 1.5))
@property
def per_panel_color_scheme(self):
return self._raw_data.get('per_panel_color_scheme', "0")
@property
def no_impact_on_alive_pylons(self):
return self._raw_data.get('no_impact_on_alive_pylons', "1")
class prop_physics_psychokinesis(BasePropPhysics):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class nihiportalsbase(Angles, Targetname, Studiomodel):
| |
<reponame>paaksing/ayolo<filename>ayolo/window.py
import sys
from functools import partial
from pathlib import Path
from typing import List, Tuple
from PyQt5 import QtWidgets, QtCore, QtGui
from darktheme.widget_template import DarkPalette
from .background import Background
from .utilities import PropagableLineEdit
class Annotator(QtWidgets.QWidget):
'''Widget for annotator section (center)'''
current_annotations: List[Tuple[int, int, int, int, int]]
current_img_path: Path = None
def __init__(self, background: Background):
super().__init__()
self.background = background
self.background.annotator = self
self.begin = QtCore.QPoint()
self.end = QtCore.QPoint()
self.img_bounds = (0, 0, 0, 0)
self.ratio = (1, 1)
self.drawing = False
self.deleted = False
self.nulled = False
self.current_annotations = []
self.setMouseTracking(True)
self.setCursor(QtCore.Qt.CursorShape.BlankCursor)
self.show()
def paintEvent(self, event):
painter = QtGui.QPainter(self)
if self.current_img_path:
self.draw_img(painter, self.current_img_path)
for x1, y1, x2, y2, clas in self.current_annotations:
rgb = self.background.get_color(clas, self.background.control_panel.classlength)
self.modify_painter_brush_and_pen(painter, rgb)
painter.drawRect(QtCore.QRect(self.get_scaled_coordinate(x1, y1), self.get_scaled_coordinate(x2, y2)))
rgb = self.background.get_color(self.background.control_panel.get_selected_class_id(), self.background.control_panel.classlength)
self.modify_painter_brush_and_pen(painter, rgb)
if self.drawing:
painter.drawRect(QtCore.QRect(self.begin, self.end))
self.force_bounded_pos(self.end)
painter.pen().setWidth(1)
painter.drawLine(self.end.x() + 1, self.img_bounds[1], self.end.x() + 1, self.img_bounds[3])
painter.drawLine(self.img_bounds[0], self.end.y() + 1, self.img_bounds[2], self.end.y() + 1)
coordinate_text_pos = QtCore.QPoint(self.end.x() + 1 if self.end.x() < self.img_bounds[2] - 75 else self.end.x() - 75, self.end.y() - 1 if self.end.y() > 20 else self.end.y() + 12)
real_pos = self.get_real_coordinate(self.end)
painter.drawText(coordinate_text_pos, f'({real_pos[0]}, {real_pos[1]})')
def mousePressEvent(self, event: QtGui.QMouseEvent):
if event.button() == QtCore.Qt.MouseButton.RightButton:
if not self.drawing:
self.undo_annotation()
else:
self.begin = QtCore.QPoint()
self.end = QtCore.QPoint()
self.end = event.pos()
self.drawing = not self.drawing
self.update()
elif event.button() == QtCore.Qt.MouseButton.LeftButton:
pos = self.force_bounded_pos(event.pos())
if not self.drawing:
self.begin = pos
else:
self.current_annotations.append(
(
*self.force_top_left_corner(self.get_real_coordinate(self.begin), self.get_real_coordinate(self.end)),
self.background.control_panel.get_selected_class_id()
)
)
self.background.control_panel.update_current_annotations_lw(self.current_annotations)
self.background.control_panel.search.setFocus()
self.background.control_panel.search.selectAll()
self.begin = QtCore.QPoint()
self.end = QtCore.QPoint()
self.end = pos
self.drawing = not self.drawing
self.update()
elif event.button() == QtCore.Qt.MouseButton.ForwardButton:
self.background.image_browser.navigate_next()
elif event.button() == QtCore.Qt.MouseButton.BackButton:
self.background.image_browser.navigate_prev()
def mouseMoveEvent(self, event: QtGui.QMouseEvent):
pos = self.force_bounded_pos(event.pos())
self.end = pos
self.update()
def wheelEvent(self, event: QtGui.QWheelEvent) -> None:
wheelcounter = event.angleDelta()
current_row = self.background.control_panel.classes_lw.currentRow()
if wheelcounter.y() / 120 == -1:
if current_row < self.background.control_panel.classes_lw.count() - 1:
self.background.control_panel.classes_lw.setCurrentRow(current_row + 1)
elif wheelcounter.y() / 120 == 1:
if current_row > 0:
self.background.control_panel.classes_lw.setCurrentRow(current_row - 1)
def resizeEvent(self, _):
self.begin = QtCore.QPoint(self.img_bounds[0], self.img_bounds[1])
self.end = QtCore.QPoint(self.img_bounds[0], self.img_bounds[1])
self.drawing = False
self.update()
def get_real_coordinate(self, pos: QtCore.QPoint):
return round((pos.x() - self.img_bounds[0]) * self.ratio[0]), round((pos.y() - self.img_bounds[1]) * self.ratio[1])
def get_scaled_coordinate(self, x: int, y: int):
return QtCore.QPoint(round(x / self.ratio[0] + self.img_bounds[0]), round(y / self.ratio[1] + self.img_bounds[1]))
def force_top_left_corner(self, x: Tuple[int, int], y: Tuple[int, int]):
return min(x[0], y[0]), min(x[1], y[1]), max(x[0], y[0]), max(x[1], y[1])
def modify_painter_brush_and_pen(self, painter: QtGui.QPainter, rgb):
painter.setBrush(QtGui.QBrush(QtGui.QColor(*rgb, 70)))
pen = QtGui.QPen(QtGui.QColor(*rgb))
pen.setWidth(2)
painter.setPen(pen)
def draw_img(self, painter: QtGui.QPainter, path: Path):
size = self.size()
point = QtCore.QPoint(0, 0)
pixmap = QtGui.QPixmap(str(path))
original_size = (pixmap.size().width(), pixmap.size().height())
scaledPix = pixmap.scaled(size, QtCore.Qt.AspectRatioMode.KeepAspectRatio, transformMode=QtCore.Qt.TransformationMode.SmoothTransformation)
point.setX((size.width() - scaledPix.width())/2)
point.setY((size.height() - scaledPix.height())/2)
self.img_bounds = (point.x(), point.y(), point.x() + scaledPix.size().width(), point.y() + scaledPix.size().height())
try:
self.ratio = (original_size[0] / scaledPix.width(), original_size[1] / scaledPix.height())
except ZeroDivisionError:
msg_box = QtWidgets.QtWidgets.Qtclose = QtWidgets.QMessageBox()
msg_box.critical(self.background.image_browser, 'Error loading image', "Image not found or it's dimension could not be resolved.", QtWidgets.QMessageBox.StandardButton.Ok)
painter.end()
self.background.image_browser.navigate_next()
painter.drawPixmap(point, scaledPix)
def force_bounded_pos(self, pos: QtCore.QPoint):
if pos.x() < self.img_bounds[0]:
pos.setX(self.img_bounds[0])
elif pos.x() > self.img_bounds[2]:
pos.setX(self.img_bounds[2])
if pos.y() < self.img_bounds[1]:
pos.setY(self.img_bounds[1])
elif pos.y() > self.img_bounds[3]:
pos.setY(self.img_bounds[3])
return pos
def clear_annotations(self):
self.drawing = False
self.current_annotations = []
self.background.control_panel.update_current_annotations_lw(self.current_annotations)
self.update()
def save_annotations(self, null=False):
if self.deleted or self.current_img_path is None:
return
if null:
self.background.images.save(self.current_img_path.name, self.current_annotations)
self.nulled = True
self.background.image_browser.navigate_next()
elif len(self.current_annotations):
self.background.images.save(self.current_img_path.name, self.current_annotations)
elif not self.nulled:
self.background.images.pop(self.current_img_path.name)
def undo_annotation(self):
try:
self.current_annotations.pop()
self.background.control_panel.update_current_annotations_lw(self.current_annotations)
self.update()
except IndexError:
pass
def open_image(self, path: Path, annotations):
self.current_img_path = path
self.current_annotations = annotations
self.background.control_panel.update_current_annotations_lw(self.current_annotations)
self.deleted = False
self.drawing = False
self.nulled = False
self.update()
def update_last_annotation_class(self, clas):
# try:
# last = list(self.current_annotations[-1])
# last[-1] = clas
# self.current_annotations[-1] = tuple(last)
# self.background.control_panel.update_current_annotations_lw(self.current_annotations)
# except IndexError:
# pass
# THIS WAS REMOVED SINCE v0.2.0 AS YOU SHOULD SELECT CLASS BEFORE ANNOTATING
self.update()
def push_back_annotation(self, ind):
self.current_annotations.append(self.current_annotations.pop(ind))
self.background.control_panel.update_current_annotations_lw(self.current_annotations)
self.update()
class ControlPanel(QtWidgets.QWidget):
'''Widget for control panel section (right)'''
def __init__(self, background: Background):
super().__init__()
self.background = background
self.background.control_panel = self
self.classnames = self.background.classes
self.classnames_lower_set = set(clas.lower() for clas in self.classnames)
self.classlength = len(self.classnames)
self.sorted_class_ids_by_name = sorted(list(range(self.classlength)), key=lambda x: self.classnames[x])
self.search_result_class_ids = list(range(self.classlength))
layout = QtWidgets.QVBoxLayout()
self.search = PropagableLineEdit()
self.classes_lw = QtWidgets.QListWidget()
self.current_annotations_lb = QtWidgets.QLabel('Current Annotations')
self.current_annotations_lw = QtWidgets.QListWidget()
self.current_annotations_lw.currentRowChanged.connect(self.current_annotations_row_changed)
self.save_btn = QtWidgets.QPushButton('Save (S)')
self.save_btn.clicked.connect(self.background.annotator.save_annotations)
self.null_btn = QtWidgets.QPushButton('Null (N)')
self.null_btn.clicked.connect(partial(self.background.annotator.save_annotations, null=True))
self.undo_btn = QtWidgets.QPushButton('Undo (Z)')
self.undo_btn.clicked.connect(self.background.annotator.undo_annotation)
self.delete_btn = QtWidgets.QPushButton('Delete (D)')
self.delete_btn.clicked.connect(self.delete_current_image)
self.clear_btn = QtWidgets.QPushButton('Clear (X)')
self.clear_btn.clicked.connect(self.background.annotator.clear_annotations)
layout.addWidget(self.current_annotations_lb)
layout.addWidget(self.current_annotations_lw)
layout.addWidget(self.save_btn)
layout.addWidget(self.null_btn)
layout.addWidget(self.undo_btn)
layout.addWidget(self.delete_btn)
layout.addWidget(self.clear_btn)
layout.addWidget(QtWidgets.QLabel('Class Search'))
layout.addWidget(self.search)
layout.addWidget(self.classes_lw)
self.search.textChanged.connect(self.update_search_results)
self.classes_lw.currentRowChanged.connect(self.selected_class_id_changed)
self.update_search_results("")
layout.setContentsMargins(20, 0, 20, 0)
self.setLayout(layout)
def keyPressEvent(self, event: QtGui.QKeyEvent) -> None:
current_row = self.classes_lw.currentRow()
if event.key() == QtCore.Qt.Key.Key_Down:
if current_row < self.classes_lw.count() - 1:
self.classes_lw.setCurrentRow(current_row + 1)
elif event.key() == QtCore.Qt.Key.Key_Up:
if current_row > 0:
self.classes_lw.setCurrentRow(current_row - 1)
elif event.key() in (QtCore.Qt.Key.Key_Return, QtCore.Qt.Key.Key_Enter):
if current_row == self.classes_lw.count() - 1 and "Create class " in self.classes_lw.currentItem().text():
self.background.classes.create_class(self.search.text())
self.update_classes_vars()
self.update_search_results(self.search.text())
def update_classes_vars(self):
self.classnames_lower_set = set(clas.lower() for clas in self.classnames)
self.classlength = len(self.classnames)
self.sorted_class_ids_by_name = sorted(list(range(self.classlength)), key=lambda x: self.classnames[x])
self.search_result_class_ids = list(range(self.classlength))
def update_search_results(self, text: str):
self.classes_lw.currentRowChanged.disconnect()
self.classes_lw.clear()
self.search_result_class_ids = []
for clas in (self.sorted_class_ids_by_name if text else range(self.classlength)):
if self.classnames[clas].lower().startswith(text.lower()):
list_item = QtWidgets.QListWidgetItem(f"{self.classnames[clas]} (#{clas})")
color = self.background.get_color(clas, self.classlength)
list_item.setForeground(QtGui.QColor(*color))
self.classes_lw.addItem(list_item)
self.search_result_class_ids.append(clas)
if text not in self.classnames_lower_set and "Create class " not in self.search.text():
list_item = QtWidgets.QListWidgetItem(f'Create class "{self.search.text()}" (Enter)')
self.classes_lw.addItem(list_item)
self.classes_lw.setCurrentRow(0)
self.classes_lw.currentRowChanged.connect(self.selected_class_id_changed)
self.selected_class_id_changed()
self.update()
def update_current_annotations_lw(self, annotations):
self.current_annotations_lw.clear()
for x1, y1, x2, y2, clas in annotations:
list_item = QtWidgets.QListWidgetItem(f"{self.classnames[clas]} ({x1}, {y1}) * ({x2}, {y2})")
color = self.background.get_color(clas, self.classlength)
list_item.setForeground(QtGui.QColor(*color))
self.current_annotations_lw.addItem(list_item)
self.update()
def current_annotations_row_changed(self, ind: int):
if ind == self.current_annotations_lw.count() - 1:
return
self.background.annotator.push_back_annotation(ind)
self.current_annotations_lw.setCurrentRow(self.current_annotations_lw.count() - 1)
def get_selected_class_id(self):
row = self.classes_lw.currentRow()
if row < self.classes_lw.count() and "Create class " not in self.classes_lw.currentItem().text():
return self.search_result_class_ids[self.classes_lw.currentRow()]
return -1
def selected_class_id_changed(self):
self.background.annotator.update_last_annotation_class(self.get_selected_class_id())
def navigate_prev_image(self):
self.background.image_browser.navigate_prev()
def navigate_next_image(self):
self.background.image_browser.navigate_next()
def delete_current_image(self):
self.background.annotator.deleted = True
self.background.images.remove(self.background.annotator.current_img_path.name)
self.navigate_next_image()
def mark_current_image_unannotate(self):
self.background.images.remove(self.background.annotator.current_img_path.name)
self.background.annotator.clear_annotations()
class ImageBrowser(QtWidgets.QWidget):
'''Widget for image browser section (left)'''
def __init__(self, background: Background):
super().__init__()
self.background = background
self.background.image_browser = self
self.images_states = self.background.images.image_annotation_counts
self.image_names = {}
self.current_image_name = None
self.updating = False
self.current_lw = None
self.current_lw_index = None
layout = QtWidgets.QVBoxLayout()
self.annotated_lw = QtWidgets.QListWidget()
self.unannotated_lw = QtWidgets.QListWidget()
self.prev_btn = QtWidgets.QPushButton('Prev <<')
self.prev_btn.clicked.connect(self.navigate_prev)
self.next_btn = QtWidgets.QPushButton('Next >>')
self.next_btn.clicked.connect(self.navigate_next)
self.annotated_lw.currentRowChanged.connect(partial(self.selected_image_changed, 0))
self.unannotated_lw.currentRowChanged.connect(partial(self.selected_image_changed, 1))
self.annotated_lb = QtWidgets.QLabel('Annotated')
self.unannotated_lb = QtWidgets.QLabel('Unannotated')
self.lw_list: List[QtWidgets.QListWidget] = [self.annotated_lw, self.unannotated_lw]
layout.addWidget(self.prev_btn)
layout.addWidget(self.next_btn)
layout.addWidget(self.annotated_lb)
layout.addWidget(self.annotated_lw)
layout.addWidget(self.unannotated_lb)
layout.addWidget(self.unannotated_lw)
layout.setContentsMargins(20, 0, 20, 0)
self.setLayout(layout)
self.update_list_widgets()
if self.unannotated_lw.count():
self.unannotated_lw.setCurrentRow(0)
elif self.annotated_lw.count():
self.annotated_lw.setCurrentRow(0)
def selected_image_changed(self, lw_index: int, row: int):
if row == -1 or self.updating:
return
self.background.annotator.save_annotations()
self.background.annotator.clear_annotations()
this_lw = self.lw_list[lw_index]
img_name = self.image_names[this_lw.currentItem().text()] # Switch back (when item name != item path)
other_ind = (lw_index + 1) % len(self.lw_list)
other_lw = self.lw_list[other_ind]
if other_lw.currentRow() != -1:
other_lw.clearSelection()
self.load_image(img_name)
def update_list_widgets(self):
self.updating = True
self.annotated_lw.clear()
self.unannotated_lw.clear()
self.image_names = {}
for img_name, count in self.images_states.items():
if count is None:
title = img_name
self.unannotated_lw.addItem(QtWidgets.QListWidgetItem(title))
self.image_names[title] = img_name
if img_name == self.current_image_name:
self.unannotated_lw.setCurrentRow(self.unannotated_lw.count() - 1)
self.current_lw = self.unannotated_lw
self.current_lw_index = 1
else:
title = f"{img_name} ({count})"
self.annotated_lw.addItem(QtWidgets.QListWidgetItem(title))
self.image_names[title] = img_name
if img_name == self.current_image_name:
self.annotated_lw.setCurrentRow(self.annotated_lw.count() - 1)
self.current_lw = self.annotated_lw
self.current_lw_index = 0
self.annotated_lb.setText(f"Annotated ({self.annotated_lw.count()})")
self.unannotated_lb.setText(f"Unannotated ({self.unannotated_lw.count()})")
self.update()
self.updating = False
def load_image(self, img_name):
self.background.annotator.open_image(self.background.dir_path / img_name, self.background.images.annotations.get(img_name, []))
self.current_image_name = img_name
self.update_list_widgets()
def other_lw(self, index):
return self.lw_list[(index + 1) % len(self.lw_list)]
def navigate_prev(self):
other_lw = self.other_lw(self.current_lw_index)
if self.current_lw.currentRow() - 1 < 0 and other_lw.count():
other_lw.setCurrentRow(other_lw.count() - 1)
else:
self.current_lw.setCurrentRow((self.current_lw.currentRow() - 1) % self.current_lw.count())
def navigate_next(self):
other_lw = self.other_lw(self.current_lw_index)
if self.current_lw.currentRow() + 1 >= self.current_lw.count() and other_lw.count():
other_lw.setCurrentRow(0)
else:
self.current_lw.setCurrentRow((self.current_lw.currentRow() + 1) % self.current_lw.count())
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, dir_path: str, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setWindowTitle("Ayolo - Annotating tool for yolo v4 datasets")
widget = QtWidgets.QWidget(self)
self.resize(1800, 900)
self.setCentralWidget(widget)
self.center_self()
layout = QtWidgets.QHBoxLayout()
self.background = Background(dir_path)
self.annotator = Annotator(self.background)
self.control_panel = ControlPanel(self.background)
self.image_browser = ImageBrowser(self.background)
self.save_sc = QtWidgets.QShortcut(QtGui.QKeySequence("Ctrl+S"), self)
self.save_sc.activated.connect(self.control_panel.save_btn.animateClick)
self.null_sc = QtWidgets.QShortcut(QtGui.QKeySequence("Ctrl+N"), self)
self.null_sc.activated.connect(self.control_panel.null_btn.animateClick)
self.undo_sc = QtWidgets.QShortcut(QtGui.QKeySequence("Ctrl+Z"), self)
self.undo_sc.activated.connect(self.control_panel.undo_btn.animateClick)
self.delete_sc = QtWidgets.QShortcut(QtGui.QKeySequence("Ctrl+D"), self)
self.delete_sc.activated.connect(self.control_panel.delete_btn.animateClick)
self.clear_sc = QtWidgets.QShortcut(QtGui.QKeySequence("Ctrl+X"), self)
self.clear_sc.activated.connect(self.control_panel.save_btn.animateClick)
self.nav_prev_sc = QtWidgets.QShortcut(QtCore.Qt.Key.Key_PageUp, self)
self.nav_prev_sc.activated.connect(self.image_browser.prev_btn.animateClick)
self.nav_next_sc = QtWidgets.QShortcut(QtCore.Qt.Key.Key_PageDown, self)
self.nav_next_sc.activated.connect(self.image_browser.next_btn.animateClick)
layout.setContentsMargins(0, 20, 0, 20)
layout.addWidget(self.image_browser, 2)
layout.addWidget(self.annotator, 8)
layout.addWidget(self.control_panel, 2)
widget.setLayout(layout)
self.background.control_panel.search.setFocus()
self.activateWindow()
def closeEvent(self, event):
close = QtWidgets.QMessageBox()
close.setText("Are you sure you want to exit ?")
close.setWindowTitle("Exit")
close.setIcon(QtWidgets.QMessageBox.Icon.Question)
close.setStandardButtons(QtWidgets.QMessageBox.StandardButton.Yes | QtWidgets.QMessageBox.StandardButton.No)
close = close.exec()
if close == QtWidgets.QMessageBox.Yes:
self.annotator.save_annotations()
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
:Autors: <NAME> & <NAME>
Module encapsulating all the classes required to run a simulation.
"""
import sys
sys.path.append("../solver")
import numpy as np
import math
import random as rand
from weatherTLKT import Weather
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from matplotlib import animation
from math import sin, cos, asin, atan2
from math import radians as rad
from utils import Player
import matplotlib
from matplotlib import animation
matplotlib.rcParams.update({'font.size': 14})
#: Actions that are authorized i.e. headings the boat can follow. Must be sorted.
ACTIONS = tuple(np.arange(0, 360, 45))
A_DICT = {}
for i, a in enumerate(ACTIONS):
A_DICT[a] = i
#: Constant to convert days in seconds.
DAY_TO_SEC = 24 * 60 * 60
SEC_TO_DAYS = 1 / DAY_TO_SEC
#: Earth radius in meters.
EARTH_RADIUS = 6371e3
#: Angular margin that characterizes the destination point.
DESTINATION_ANGLE = rad(0.005)
#: Constant to convert hours to days
HOURS_TO_DAY = 1 / 24
class Simulator:
"""
Class embedding the boat and weather interactions with also the tools required\
to do projection on earth surface. For now, only used by the MCTS tree search.
:ivar numpy.array times: Vector of the instants used for the simulation \
in days.
:ivar numpy.array lons: Longitudes in degree in [0 , 360].
:ivar numpy.array lats: Latitudes in degree in [-90 : 90].
:ivar list state: Current state [time index, lat, lon] of the boat in \
(int,degree,degree).
:ivar list prevState: Previous state [time index, lat, lon] of the boat in \
(int,degree,degree).
:ivar uWindAvg: Interpolator for the wind velocity blowing toward West. Generated at initialisation with :py:meth:`WeatherTLKT.Weather.Interpolators`.
:vartype uWindAvg: `Interpolator`_
:ivar vWindAvg: Interpolator for the wind velocity blowing toward North. Generated at initialisation with :py:meth:`WeatherTLKT.Weather.Interpolators`.
:vartype vWindAvg: `Interpolator`_
"""
def __init__(self, times, lats, lons, WeatherAvg, stateInit):
"""
Class constructor
"""
self.times = times
self.lats = lats
self.lons = lons
self.state = list(stateInit)
self.prevState = list(stateInit)
WeatherAvg.Interpolators()
self.uWindAvg = WeatherAvg.uInterpolator
self.vWindAvg = WeatherAvg.vInterpolator
def reset(self, stateInit):
"""
Reset the simulated boat to a specific state.
:param list stateInit: State to which the simulator is reinitialized.
"""
self.state = list(stateInit)
self.prevState = list(stateInit)
def getDistAndBearing(self, position, destination):
"""
Returns the distance and the initial bearing to follow to go to\
a destination following a great circle trajectory (orthodrome). `Link to documentation`_
:param position: Current position of the boat.
:type position: list(float : lat, float : lon)
:param destination: Point toward which the distance and initial bearing\
are computed.
:type destination: list(float : lat, float : lon)
:return: Shortest distance between the two points in meters, and \
initial bearing of the orthodrome trajectory in degrees.
:rtype: float: distance, float: bearing
"""
latDest, lonDest = [rad(destination[0]), rad(destination[1])]
latPos, lonPos = [rad(position[0]), rad(position[1])]
a = (sin((latDest - latPos) / 2)) ** 2 + cos(latPos) * cos(latDest) * (sin((lonDest - lonPos) / 2)) ** 2
distance = 2 * EARTH_RADIUS * atan2(a ** 0.5, (1 - a) ** 0.5)
x = math.cos(latDest) * math.sin(lonDest - lonPos)
y = math.cos(latPos) * math.sin(latDest) - math.sin(latPos) * math.cos(latDest) \
* math.cos(lonDest - lonPos)
bearing = (math.atan2(x, y) * 180 / math.pi + 360) % 360
return distance, bearing
def getDestination(self, distance, bearing, departure):
"""
Returns the destination point following a orthodrome trajectory for a\
given bearing and distance. `Link to
documentation <http://www.movable-type.co.uk/scripts/latlong.html>`_.
:param float distance: Distance in meters to the destination.
:param float bearing: Initial bearing of the orthodrome trajectory\
starting at departure and ending at destination. In degrees.
:param departure: Departure point of the trajectory.
:type departure: list(float : lat, float : lon)
:return: Destination reached following the othodrome trajectory.
:rtype: [float : lat, float : lon]
"""
latDep, lonDep = [rad(departure[0]), rad(departure[1])]
bearing = rad(bearing)
latDest = asin(sin(latDep) * cos(distance / EARTH_RADIUS) + \
cos(latDep) * sin(distance / EARTH_RADIUS) * cos(bearing))
lonDest = lonDep + atan2(sin(bearing) * sin(distance / EARTH_RADIUS) \
* cos(latDep), cos(distance / EARTH_RADIUS) \
- sin(latDep) * sin(latDest))
latDest = (latDest * 180 / math.pi)
lonDest = (lonDest * 180 / math.pi)
return [latDest, lonDest]
def getWind(self):
"""
Returns the wind at the current simulator state.
:return: Wind toward the East in m/s and wind toward the North in m/s.
:rtype: float : uAvg, float : vAvg
"""
uAvg = self.uWindAvg([self.times[self.state[0]], self.state[1], self.state[2]])
vAvg = self.vWindAvg([self.times[self.state[0]], self.state[1], self.state[2]])
return uAvg, vAvg
def doStep(self, action):
"""
Does one iteration of the simulation (one time step) following a provided action.\
Updates and returns the new boat state. Supposes constant wind during a time step. Also\
constant boat speed. Assumes that the boat moves following orthodomes trajectories. This \
is correct if the distances covered during the time step is relatively small (and we\
are not close to the poles): the orthodrome's headings do not vary much.
:param float action: Boat's heading in degree.
:return: Reference toward the updated simulator's state.
:rtype: self.state
"""
# we copy the current state into the previous one
self.prevState = list(self.state)
# we get the wind at current state
uWind, vWind = self.getWind()
windMag, windAng = Weather.returnPolarVel(uWind, vWind)
# We get speed from wind on sail
pOfSail = abs((windAng + 180) % 360 - action)
boatSpeedDet = Boat.getDeterDyn(pOfSail, windMag, Boat.FIT_VELOCITY)
boatSpeed = Boat.addUncertainty(boatSpeedDet)
# We integrate it
Dt = (self.times[self.state[0] + 1] - self.times[self.state[0]]) * DAY_TO_SEC
DL = boatSpeed * Dt # distance travelled
# new position, correct if the boat follows an orthodrome
newLat, newLon = self.getDestination(DL, action, [self.state[1], self.state[2]])
self.state[0], self.state[1], self.state[2] = self.state[0] + 1, newLat, newLon
return self.state
@staticmethod
def fromGeoToCartesian(coordinates):
"""
Transforms geographic coordinates to cartesian coordinates. The cartesian frame \
has its origin at the center of the earth. Its orientation and so on is not explicitely given\
since the function is only used to create a plane.
:param coordinates: Coordinates in geographical frame.
:type coordinates: [float : lat, float : lon]
:return: x,y,z coordinates.
:rtype: [float, float, float]
"""
lat, lon = coordinates[:]
lat, lon = rad(lat), rad(lon)
x = sin(lat) * cos(lon)
y = sin(lat) * sin(lon)
z = cos(lat)
return [x, y, z]
def prepareBaseMap(self, proj='mill', res='i', Dline=5, dl=1.5, dh=1, centerOfMap=None):
"""
Prepares the figure to plot a trajectory. Based on mpl_toolkits.basemap.Basemap.
:param str proj: Name of the projection (default Miller) (see `Basemap <https://matplotlib.org/basemap/api/basemap_api.html#module-mpl_toolkits.basemap>`_ doc).
:param str res: Resolution (see `Basemap <https://matplotlib.org/basemap/api/basemap_api.html#module-mpl_toolkits.basemap>`_ doc)
:param int Dline: sampling size for the lats and lons arrays (reduce dimensions)
:return: The initialized basemap.
:rtype: `Basemap <https://matplotlib.org/basemap/api/basemap_api.html#module-mpl_toolkits.basemap>`_
"""
if proj == 'mill':
plt.figure()
basemap = Basemap(projection=proj, llcrnrlon=self.lons.min(), \
urcrnrlon=self.lons.max(), llcrnrlat=self.lats.min(), urcrnrlat=self.lats.max(), \
resolution=res)
basemap.drawcoastlines()
basemap.fillcontinents()
basemap.drawmapboundary()
basemap.drawparallels(self.lats[0::Dline], labels=[1, 0, 0, 0])
basemap.drawmeridians(self.lons[0::2 * Dline], labels=[0, 0, 0, 1])
elif proj == 'aeqd':
plt.figure()
wdth = (self.lons[-1] - self.lons[0]) * dh * math.pi / 180 * EARTH_RADIUS
hght = (self.lats[-1] - self.lats[0]) * dl * math.pi / 180 * EARTH_RADIUS
basemap = Basemap(width=wdth, height=hght, projection='aeqd', lat_0=centerOfMap[0], lon_0=centerOfMap[1],
resolution=res)
basemap.drawcoastlines()
basemap.fillcontinents()
basemap.drawmapboundary()
basemap.drawparallels(self.lats[0::Dline], labels=[1, 0, 0, 0])
basemap.drawmeridians(self.lons[0::2 * Dline], labels=[0, 0, 0, 1])
return basemap
def plotTraj(self, states, basemap, quiv=False, scatter=False, color='black', label=''):
"""
Draw the states on the map either as a trajectory and/or scatter of points. Can also plot mean wind
for each state and return it.
:param states: List of all the state (state is an array)
:type states: array or list
:param basemap: Basemap object on which the trajectory will be drawn
:type basemap: `Basemap <https://matplotlib.org/basemap/api/basemap_api.html#module-mpl_toolkits.basemap>`_
:param boolean quiv: If True, shows the wind at each time step and returns it.
:param boolean scatter: If True, plots the positions of the boat at\
each time step as scatter only
:param str color: Color of the trajectory points
:return: u,v if quiver is true
:rtype: float,float
"""
states = np.array(states)
posLat = states[:, 1]
posLon = states[:, 2]
times = self.times[states[:, 0].astype(int)]
x, y = basemap(posLon, posLat)
if scatter:
basemap.scatter(x, y, zorder=0, c=color, s=100, label=label)
else:
basemap.plot(x, y, markersize=4, zorder=0, | |
<reponame>BramKaashoek/commercetools-python-sdk
# DO NOT EDIT! This file is automatically generated
import datetime
import typing
from commercetools.types._abstract import _BaseType
if typing.TYPE_CHECKING:
from ._channel import ChannelReference
from ._common import LocalizedString, Price, PriceDraft, Reference, ReferenceTypeId
from ._customer_group import CustomerGroupReference
from ._product import Attribute
__all__ = [
"AccessDeniedError",
"ConcurrentModificationError",
"DiscountCodeNonApplicableError",
"DuplicateAttributeValueError",
"DuplicateAttributeValuesError",
"DuplicateFieldError",
"DuplicateFieldWithConflictingResourceError",
"DuplicatePriceScopeError",
"DuplicateVariantValuesError",
"EnumValueIsUsedError",
"ErrorByExtension",
"ErrorObject",
"ErrorResponse",
"ExtensionBadResponseError",
"ExtensionNoResponseError",
"ExtensionUpdateActionsFailedError",
"InsufficientScopeError",
"InvalidCredentialsError",
"InvalidCurrentPasswordError",
"InvalidFieldError",
"InvalidInputError",
"InvalidItemShippingDetailsError",
"InvalidJsonInputError",
"InvalidOperationError",
"InvalidSubjectError",
"InvalidTokenError",
"MatchingPriceNotFoundError",
"MissingTaxRateForCountryError",
"NoMatchingProductDiscountFoundError",
"OutOfStockError",
"PriceChangedError",
"ReferenceExistsError",
"RequiredFieldError",
"ResourceNotFoundError",
"ShippingMethodDoesNotMatchCartError",
"VariantValues",
]
class ErrorByExtension(_BaseType):
"Corresponding marshmallow schema is :class:`commercetools.schemas.ErrorByExtensionSchema`."
#: :class:`str`
id: str
#: Optional :class:`str`
key: typing.Optional[str]
def __init__(self, *, id: str = None, key: typing.Optional[str] = None) -> None:
self.id = id
self.key = key
super().__init__()
def __repr__(self) -> str:
return "ErrorByExtension(id=%r, key=%r)" % (self.id, self.key)
class ErrorObject(_BaseType):
"Corresponding marshmallow schema is :class:`commercetools.schemas.ErrorObjectSchema`."
#: :class:`str`
code: str
#: :class:`str`
message: str
def __init__(self, *, code: str = None, message: str = None) -> None:
self.code = code
self.message = message
super().__init__()
def __repr__(self) -> str:
return "ErrorObject(code=%r, message=%r)" % (self.code, self.message)
class ErrorResponse(_BaseType):
"Corresponding marshmallow schema is :class:`commercetools.schemas.ErrorResponseSchema`."
#: :class:`int` `(Named` ``statusCode`` `in Commercetools)`
status_code: int
#: :class:`str`
message: str
#: Optional :class:`str`
error: typing.Optional[str]
#: Optional :class:`str`
error_description: typing.Optional[str]
#: Optional :class:`list`
errors: typing.Optional[list]
def __init__(
self,
*,
status_code: int = None,
message: str = None,
error: typing.Optional[str] = None,
error_description: typing.Optional[str] = None,
errors: typing.Optional[list] = None
) -> None:
self.status_code = status_code
self.message = message
self.error = error
self.error_description = error_description
self.errors = errors
super().__init__()
def __repr__(self) -> str:
return (
"ErrorResponse(status_code=%r, message=%r, error=%r, error_description=%r, errors=%r)"
% (
self.status_code,
self.message,
self.error,
self.error_description,
self.errors,
)
)
class VariantValues(_BaseType):
"Corresponding marshmallow schema is :class:`commercetools.schemas.VariantValuesSchema`."
#: Optional :class:`str`
sku: typing.Optional[str]
#: List of :class:`commercetools.types.PriceDraft`
prices: typing.List["PriceDraft"]
#: List of :class:`commercetools.types.Attribute`
attributes: typing.List["Attribute"]
def __init__(
self,
*,
sku: typing.Optional[str] = None,
prices: typing.List["PriceDraft"] = None,
attributes: typing.List["Attribute"] = None
) -> None:
self.sku = sku
self.prices = prices
self.attributes = attributes
super().__init__()
def __repr__(self) -> str:
return "VariantValues(sku=%r, prices=%r, attributes=%r)" % (
self.sku,
self.prices,
self.attributes,
)
class AccessDeniedError(ErrorObject):
"Corresponding marshmallow schema is :class:`commercetools.schemas.AccessDeniedErrorSchema`."
def __init__(self, *, code: str = None, message: str = None) -> None:
super().__init__(code="access_denied", message=message)
def __repr__(self) -> str:
return "AccessDeniedError(code=%r, message=%r)" % (self.code, self.message)
class ConcurrentModificationError(ErrorObject):
"Corresponding marshmallow schema is :class:`commercetools.schemas.ConcurrentModificationErrorSchema`."
#: Optional :class:`int` `(Named` ``currentVersion`` `in Commercetools)`
current_version: typing.Optional[int]
def __init__(
self,
*,
code: str = None,
message: str = None,
current_version: typing.Optional[int] = None
) -> None:
self.current_version = current_version
super().__init__(code="ConcurrentModification", message=message)
def __repr__(self) -> str:
return (
"ConcurrentModificationError(code=%r, message=%r, current_version=%r)"
% (self.code, self.message, self.current_version)
)
class DiscountCodeNonApplicableError(ErrorObject):
"Corresponding marshmallow schema is :class:`commercetools.schemas.DiscountCodeNonApplicableErrorSchema`."
#: Optional :class:`str` `(Named` ``discountCode`` `in Commercetools)`
discount_code: typing.Optional[str]
#: Optional :class:`str`
reason: typing.Optional[str]
#: Optional :class:`str` `(Named` ``dicountCodeId`` `in Commercetools)`
dicount_code_id: typing.Optional[str]
#: Optional :class:`datetime.datetime` `(Named` ``validFrom`` `in Commercetools)`
valid_from: typing.Optional[datetime.datetime]
#: Optional :class:`datetime.datetime` `(Named` ``validUntil`` `in Commercetools)`
valid_until: typing.Optional[datetime.datetime]
#: Optional :class:`datetime.datetime` `(Named` ``validityCheckTime`` `in Commercetools)`
validity_check_time: typing.Optional[datetime.datetime]
def __init__(
self,
*,
code: str = None,
message: str = None,
discount_code: typing.Optional[str] = None,
reason: typing.Optional[str] = None,
dicount_code_id: typing.Optional[str] = None,
valid_from: typing.Optional[datetime.datetime] = None,
valid_until: typing.Optional[datetime.datetime] = None,
validity_check_time: typing.Optional[datetime.datetime] = None
) -> None:
self.discount_code = discount_code
self.reason = reason
self.dicount_code_id = dicount_code_id
self.valid_from = valid_from
self.valid_until = valid_until
self.validity_check_time = validity_check_time
super().__init__(code="DiscountCodeNonApplicable", message=message)
def __repr__(self) -> str:
return (
"DiscountCodeNonApplicableError(code=%r, message=%r, discount_code=%r, reason=%r, dicount_code_id=%r, valid_from=%r, valid_until=%r, validity_check_time=%r)"
% (
self.code,
self.message,
self.discount_code,
self.reason,
self.dicount_code_id,
self.valid_from,
self.valid_until,
self.validity_check_time,
)
)
class DuplicateAttributeValueError(ErrorObject):
"Corresponding marshmallow schema is :class:`commercetools.schemas.DuplicateAttributeValueErrorSchema`."
#: :class:`commercetools.types.Attribute`
attribute: "Attribute"
def __init__(
self, *, code: str = None, message: str = None, attribute: "Attribute" = None
) -> None:
self.attribute = attribute
super().__init__(code="DuplicateAttributeValue", message=message)
def __repr__(self) -> str:
return "DuplicateAttributeValueError(code=%r, message=%r, attribute=%r)" % (
self.code,
self.message,
self.attribute,
)
class DuplicateAttributeValuesError(ErrorObject):
"Corresponding marshmallow schema is :class:`commercetools.schemas.DuplicateAttributeValuesErrorSchema`."
#: List of :class:`commercetools.types.Attribute`
attributes: typing.List["Attribute"]
def __init__(
self,
*,
code: str = None,
message: str = None,
attributes: typing.List["Attribute"] = None
) -> None:
self.attributes = attributes
super().__init__(code="DuplicateAttributeValues", message=message)
def __repr__(self) -> str:
return "DuplicateAttributeValuesError(code=%r, message=%r, attributes=%r)" % (
self.code,
self.message,
self.attributes,
)
class DuplicateFieldError(ErrorObject):
"Corresponding marshmallow schema is :class:`commercetools.schemas.DuplicateFieldErrorSchema`."
#: Optional :class:`str`
field: typing.Optional[str]
#: Optional :class:`typing.Any` `(Named` ``duplicateValue`` `in Commercetools)`
duplicate_value: typing.Optional[typing.Any]
#: Optional :class:`commercetools.types.Reference` `(Named` ``conflictingResource`` `in Commercetools)`
conflicting_resource: typing.Optional["Reference"]
def __init__(
self,
*,
code: str = None,
message: str = None,
field: typing.Optional[str] = None,
duplicate_value: typing.Optional[typing.Any] = None,
conflicting_resource: typing.Optional["Reference"] = None
) -> None:
self.field = field
self.duplicate_value = duplicate_value
self.conflicting_resource = conflicting_resource
super().__init__(code="DuplicateField", message=message)
def __repr__(self) -> str:
return (
"DuplicateFieldError(code=%r, message=%r, field=%r, duplicate_value=%r, conflicting_resource=%r)"
% (
self.code,
self.message,
self.field,
self.duplicate_value,
self.conflicting_resource,
)
)
class DuplicateFieldWithConflictingResourceError(ErrorObject):
"Corresponding marshmallow schema is :class:`commercetools.schemas.DuplicateFieldWithConflictingResourceErrorSchema`."
#: :class:`str`
field: str
#: :class:`typing.Any` `(Named` ``duplicateValue`` `in Commercetools)`
duplicate_value: typing.Any
#: :class:`commercetools.types.Reference` `(Named` ``conflictingResource`` `in Commercetools)`
conflicting_resource: "Reference"
def __init__(
self,
*,
code: str = None,
message: str = None,
field: str = None,
duplicate_value: typing.Any = None,
conflicting_resource: "Reference" = None
) -> None:
self.field = field
self.duplicate_value = duplicate_value
self.conflicting_resource = conflicting_resource
super().__init__(code="DuplicateFieldWithConflictingResource", message=message)
def __repr__(self) -> str:
return (
"DuplicateFieldWithConflictingResourceError(code=%r, message=%r, field=%r, duplicate_value=%r, conflicting_resource=%r)"
% (
self.code,
self.message,
self.field,
self.duplicate_value,
self.conflicting_resource,
)
)
class DuplicatePriceScopeError(ErrorObject):
"Corresponding marshmallow schema is :class:`commercetools.schemas.DuplicatePriceScopeErrorSchema`."
#: List of :class:`commercetools.types.Price` `(Named` ``conflictingPrices`` `in Commercetools)`
conflicting_prices: typing.List["Price"]
def __init__(
self,
*,
code: str = None,
message: str = None,
conflicting_prices: typing.List["Price"] = None
) -> None:
self.conflicting_prices = conflicting_prices
super().__init__(code="DuplicatePriceScope", message=message)
def __repr__(self) -> str:
return (
"DuplicatePriceScopeError(code=%r, message=%r, conflicting_prices=%r)"
% (self.code, self.message, self.conflicting_prices)
)
class DuplicateVariantValuesError(ErrorObject):
"Corresponding marshmallow schema is :class:`commercetools.schemas.DuplicateVariantValuesErrorSchema`."
#: :class:`commercetools.types.VariantValues` `(Named` ``variantValues`` `in Commercetools)`
variant_values: "VariantValues"
def __init__(
self,
*,
code: str = None,
message: str = None,
variant_values: "VariantValues" = None
) -> None:
self.variant_values = variant_values
super().__init__(code="DuplicateVariantValues", message=message)
def __repr__(self) -> str:
return "DuplicateVariantValuesError(code=%r, message=%r, variant_values=%r)" % (
self.code,
self.message,
self.variant_values,
)
class EnumValueIsUsedError(ErrorObject):
"Corresponding marshmallow schema is :class:`commercetools.schemas.EnumValueIsUsedErrorSchema`."
def __init__(self, *, code: str = None, message: str = None) -> None:
super().__init__(code="EnumValueIsUsed", message=message)
def __repr__(self) -> str:
return "EnumValueIsUsedError(code=%r, message=%r)" % (self.code, self.message)
class ExtensionBadResponseError(ErrorObject):
"Corresponding marshmallow schema is :class:`commercetools.schemas.ExtensionBadResponseErrorSchema`."
#: Optional :class:`commercetools.types.LocalizedString` `(Named` ``localizedMessage`` `in Commercetools)`
localized_message: typing.Optional["LocalizedString"]
#: Optional :class:`object` `(Named` ``extensionExtraInfo`` `in Commercetools)`
extension_extra_info: typing.Optional[object]
#: :class:`commercetools.types.ErrorByExtension` `(Named` ``errorByExtension`` `in Commercetools)`
error_by_extension: "ErrorByExtension"
def __init__(
self,
*,
code: str = None,
message: str = None,
localized_message: typing.Optional["LocalizedString"] = None,
extension_extra_info: typing.Optional[object] = None,
error_by_extension: "ErrorByExtension" = None
) -> None:
self.localized_message = localized_message
self.extension_extra_info = extension_extra_info
self.error_by_extension = error_by_extension
super().__init__(code="ExtensionBadResponse", message=message)
def __repr__(self) -> str:
return (
"ExtensionBadResponseError(code=%r, message=%r, localized_message=%r, extension_extra_info=%r, error_by_extension=%r)"
% (
self.code,
self.message,
self.localized_message,
self.extension_extra_info,
self.error_by_extension,
)
)
class ExtensionNoResponseError(ErrorObject):
"Corresponding marshmallow schema is :class:`commercetools.schemas.ExtensionNoResponseErrorSchema`."
#: Optional :class:`commercetools.types.LocalizedString` `(Named` ``localizedMessage`` `in Commercetools)`
localized_message: typing.Optional["LocalizedString"]
#: Optional :class:`object` `(Named` ``extensionExtraInfo`` `in Commercetools)`
extension_extra_info: typing.Optional[object]
#: :class:`commercetools.types.ErrorByExtension` `(Named` ``errorByExtension`` `in Commercetools)`
error_by_extension: "ErrorByExtension"
def __init__(
self,
*,
code: str = None,
message: str = None,
localized_message: typing.Optional["LocalizedString"] = None,
extension_extra_info: typing.Optional[object] = None,
error_by_extension: "ErrorByExtension" = None
) -> None:
self.localized_message = localized_message
self.extension_extra_info = extension_extra_info
self.error_by_extension = error_by_extension
super().__init__(code="ExtensionNoResponse", message=message)
def __repr__(self) -> str:
return (
"ExtensionNoResponseError(code=%r, message=%r, localized_message=%r, extension_extra_info=%r, error_by_extension=%r)"
% (
self.code,
self.message,
self.localized_message,
self.extension_extra_info,
self.error_by_extension,
)
)
class ExtensionUpdateActionsFailedError(ErrorObject):
"Corresponding marshmallow schema is :class:`commercetools.schemas.ExtensionUpdateActionsFailedErrorSchema`."
#: Optional :class:`commercetools.types.LocalizedString` `(Named` ``localizedMessage`` `in Commercetools)`
localized_message: typing.Optional["LocalizedString"]
#: Optional :class:`object` `(Named` ``extensionExtraInfo`` `in Commercetools)`
extension_extra_info: typing.Optional[object]
#: :class:`commercetools.types.ErrorByExtension` `(Named` ``errorByExtension`` `in Commercetools)`
error_by_extension: "ErrorByExtension"
def __init__(
self,
*,
code: str = None,
message: str = None,
localized_message: typing.Optional["LocalizedString"] = None,
extension_extra_info: typing.Optional[object] = None,
error_by_extension: "ErrorByExtension" = None
) -> None:
self.localized_message = localized_message
self.extension_extra_info = extension_extra_info
self.error_by_extension = error_by_extension
super().__init__(code="ExtensionUpdateActionsFailed", message=message)
def __repr__(self) -> str:
return (
"ExtensionUpdateActionsFailedError(code=%r, message=%r, localized_message=%r, extension_extra_info=%r, error_by_extension=%r)"
% (
self.code,
self.message,
self.localized_message,
self.extension_extra_info,
self.error_by_extension,
)
)
class InsufficientScopeError(ErrorObject):
"Corresponding marshmallow schema is :class:`commercetools.schemas.InsufficientScopeErrorSchema`."
def __init__(self, *, code: str = None, message: str = None) -> None:
super().__init__(code="insufficient_scope", message=message)
def __repr__(self) -> str:
return "InsufficientScopeError(code=%r, message=%r)" % (self.code, self.message)
class InvalidCredentialsError(ErrorObject):
"Corresponding marshmallow schema is :class:`commercetools.schemas.InvalidCredentialsErrorSchema`."
def __init__(self, *, code: str = None, message: str = None) -> None:
super().__init__(code="InvalidCredentials", message=message)
def __repr__(self) -> | |
data.text not in ["condition-true", "condition-false"]:
logging.warning(
"Invalid value for key 'control': {}".format(data.text),
data.sourceline,
)
elif key == witness.STARTLINE:
self.check_linenumber(data.text, data.sourceline)
elif key == witness.ENDLINE:
self.check_linenumber(data.text, data.sourceline)
elif key == witness.STARTOFFSET:
self.check_character_offset(data.text, data.sourceline)
elif key == witness.ENDOFFSET:
self.check_character_offset(data.text, data.sourceline)
elif key == witness.ENTERLOOPHEAD:
if data.text == "false":
logging.info(
"Specifying value 'false' for key 'enterLoopHead' is unnecessary",
data.sourceline,
)
elif not data.text == "true":
logging.warning(
"Invalid value for key 'enterLoopHead': {}".format(data.text),
data.sourceline,
)
elif key == witness.ENTERFUNCTION:
for child in parent:
child.text = child.text.strip()
if (
child.tag.rpartition("}")[2] == witness.DATA
and child.attrib.get(witness.KEY) == witness.THREADID
and child.text in self.witness.threads
and self.witness.threads[child.text] is None
):
self.witness.threads[child.text] = data.text
break
self.check_functionname(data.text, data.sourceline)
elif key in ["returnFrom", witness.RETURNFROMFUNCTION]:
for child in parent:
child.text = child.text.strip()
if (
child.tag.rpartition("}")[2] == witness.DATA
and child.attrib.get(witness.KEY) == witness.THREADID
and child.text in self.witness.threads
and self.witness.threads[child.text] == data.text
):
del self.witness.threads[child.text]
break
self.check_functionname(data.text, data.sourceline)
elif key == witness.THREADID:
# Check disabled for SV-COMP'21 as questions about the specification
# need to be resolved first, see
# https://gitlab.com/sosy-lab/sv-comp/archives-2021/-/issues/30
# if data.text not in self.witness.threads:
# logging.warning(
# "Thread with id {} doesn't exist".format(data.text),
# data.sourceline,
# )
pass
elif key == witness.CREATETHREAD:
if data.text in self.witness.threads:
# logging.warning(
# "Thread with id {} has already been created".format(data.text),
# data.sourceline,
# )
pass
else:
self.witness.threads[data.text] = None
elif self.witness.defined_keys.get(key) == witness.EDGE:
# Other, tool-specific keys are allowed as long as they have been defined
pass
else:
logging.warning(
"Unknown key for edge data element: {}".format(key), data.sourceline
)
def handle_graph_data(self, data, key):
"""
Performs checks for data elements that are direct children of a graph element.
"""
data.text = data.text.strip()
if key == witness.WITNESS_TYPE:
if data.text not in ["correctness_witness", "violation_witness"]:
logging.warning(
"Invalid value for key 'witness-type': {}".format(data.text),
data.sourceline,
)
elif self.witness.witness_type is None:
self.witness.witness_type = data.text
else:
logging.warning(
"Found multiple definitions of witness-type", data.sourceline
)
elif key == witness.SOURCECODELANG:
if data.text not in ["C", "Java"]:
logging.warning(
"Invalid value for key 'sourcecodelang': {}".format(data.text),
data.sourceline,
)
elif self.witness.sourcecodelang is None:
self.witness.sourcecodelang = data.text
else:
logging.warning(
"Found multiple definitions of sourcecodelang", data.sourceline
)
elif key == witness.PRODUCER:
if self.witness.producer is None:
self.witness.producer = data.text
else:
logging.warning(
"Found multiple definitions of producer", data.sourceline
)
elif key == witness.SPECIFICATION:
self.witness.specifications.add(data.text)
if self.options.svcomp and data.text not in SV_COMP_SPECIFICATIONS:
logging.warning("Invalid specification for SV-COMP", data.sourceline)
elif key == witness.PROGRAMFILE:
if self.witness.programfile is None:
self.witness.programfile = data.text
try:
source = open(self.witness.programfile)
source.close()
if self.program_info is None:
self.collect_program_info(self.witness.programfile)
except FileNotFoundError:
logging.info(
"Programfile specified in witness could not be accessed",
data.sourceline,
)
else:
logging.warning(
"Found multiple definitions of programfile", data.sourceline
)
elif key == witness.PROGRAMHASH:
if (
self.program_info is not None
and self.options.excludeRecentChecks > 1
and data.text.lower() != self.program_info.get("sha256_hash")
):
logging.warning(
"Programhash does not match the hash specified in the witness",
data.sourceline,
)
if self.witness.programhash is None:
self.witness.programhash = data.text
else:
logging.warning(
"Found multiple definitions of programhash", data.sourceline
)
elif key == witness.ARCHITECTURE:
if self.witness.architecture is not None:
logging.warning(
"Found multiple definitions of architecture", data.sourceline
)
elif data.text in ["32bit", "64bit"]:
self.witness.architecture = data.text
else:
logging.warning("Invalid architecture identifier", data.sourceline)
elif key == witness.CREATIONTIME:
if self.witness.creationtime is not None:
logging.warning(
"Found multiple definitions of creationtime", data.sourceline
)
else:
self.witness.creationtime = data.text
if self.options.excludeRecentChecks > 1 and not re.match(
CREATIONTIME_PATTERN, data.text
):
logging.warning("Invalid format for creationtime", data.sourceline)
elif self.witness.defined_keys.get(key) == witness.GRAPH:
# Other, tool-specific keys are allowed as long as they have been defined
pass
else:
logging.warning(
"Unknown key for graph data element: {}".format(key), data.sourceline
)
def handle_key(self, key):
"""
Checks a key definition for validity.
Should the key definition contain the mandatory 'id' and 'for'
attributes the defined key may be used in the appropriate
data elements of any following graph definitions, even if
the key definition is faulty for other reasons.
Appropriate are all data elements that are direct children
of an element of type key_domain, which is the value of the 'for' attribute.
Key definitions in a witness may have a child element of type 'default'
specifying the default value for this key, but are currently expected
to have no other children.
"""
key_id = key.attrib.get("id")
key_domain = key.attrib.get("for")
if key_id and key_domain:
if key_id in self.witness.defined_keys:
logging.warning(
"Found multiple key definitions with id '{}'".format(key_id),
key.sourceline,
)
else:
if witness.COMMON_KEYS.get(key_id, key_domain) != key_domain:
logging.warning(
"Key '{0}' should be used for '{1}' elements but "
"was defined for '{2}' elements".format(
key_id, witness.COMMON_KEYS[key_id], key_domain
),
key.sourceline,
)
self.witness.defined_keys[key_id] = key_domain
else:
if key_id is None:
logging.warning("Key is missing attribute 'id'", key.sourceline)
if key_domain is None:
logging.warning("Key is missing attribute 'for'", key.sourceline)
if len(key) > 1:
logging.warning(
"Expected key to have at most one child but has {}".format(len(key)),
key.sourceline,
)
for child in key:
child.text = child.text.strip()
if child.tag.rpartition("}")[2] == witness.DEFAULT:
if len(child.attrib) != 0:
logging.warning(
"Expected no attributes for 'default'"
"element but found {0} ({1})".format(
len(child.attrib), list(child.attrib)
),
key.sourceline,
)
if key_id in [
witness.ENTRY,
witness.SINK,
witness.VIOLATION,
witness.ENTERLOOPHEAD,
]:
if not child.text == "false":
logging.warning(
"Default value for {} should be 'false'".format(key_id),
key.sourceline,
)
self.key_defaults[key_id] = child.text
else:
logging.warning(
"Invalid child for key element: {}".format(child.tag),
child.sourceline,
)
def handle_node(self, node):
"""
Checks a node element for validity.
Nodes must have an unique id but should not have any other attributes.
Nodes in a witness are currently not supposed have any non-data children.
"""
if len(node.attrib) > 1:
logging.warning(
"Expected node element to have exactly one attribute "
"but has {}".format(len(node.attrib)),
node.sourceline,
)
node_id = node.attrib.get("id")
if node_id is None:
logging.warning(
"Expected node element to have attribute 'id'", node.sourceline
)
elif node_id in self.witness.node_ids:
logging.warning(
"Found multiple nodes with id '{}'".format(node_id), node.sourceline
)
else:
self.witness.node_ids.add(node_id)
for child in node:
if child.tag.rpartition("}")[2] == witness.DATA:
self.handle_data(child, node)
else:
logging.warning(
"Node has unexpected child element of type '{}'".format(child.tag),
child.sourceline,
)
def handle_edge(self, edge):
"""
Checks an edge element for validity.
Edges must have attributes 'source' and 'target', each referencing
a different existing node by its id.
Other attributes are allowed but no checks are currently performed for them.
Edges in a witness are currently not supposed to have any non-data children.
"""
source = edge.attrib.get("source")
if source is None:
logging.warning("Edge is missing attribute 'source'", edge.sourceline)
else:
if source in self.witness.sink_nodes:
logging.warning(
"Sink node should have no leaving edges", edge.sourceline
)
if not self.options.strictChecking:
# Otherwise this information is stored in self.witness.transitions
self.witness.transition_sources.add(source)
if source not in self.witness.node_ids:
self.check_existence_later.add(source)
target = edge.attrib.get("target")
if target is None:
logging.warning("Edge is missing attribute 'target'", edge.sourceline)
else:
if source == target and not self.options.ignoreSelfLoops:
logging.warning(
"Node '{}' has self-loop".format(source), edge.sourceline
)
if target not in self.witness.node_ids:
self.check_existence_later.add(target)
if self.options.strictChecking:
enter, return_from = (None, None)
for child in edge:
child.text = child.text.strip()
if child.tag.rpartition("}")[2] == witness.DATA:
self.handle_data(child, edge)
key = child.attrib.get(witness.KEY)
if key == witness.ENTERFUNCTION:
enter = child.text
elif key in ["returnFrom", witness.RETURNFROMFUNCTION]:
return_from = child.text
else:
logging.warning(
"Edge has unexpected child element of type '{}'".format(
child.tag
),
child.sourceline,
)
if source and target:
if source in self.witness.transitions:
self.witness.transitions[source].append(
(target, enter, return_from)
)
else:
self.witness.transitions[source] = [(target, enter, return_from)]
else:
for child in edge:
if child.tag.rpartition("}")[2] == witness.DATA:
self.handle_data(child, edge)
else:
logging.warning(
"Edge has unexpected child element of type '{}'".format(
child.tag
),
child.sourceline,
)
def handle_graph(self, graph):
"""
Checks a graph element for validity.
A graph may have an 'edgedefault' attribute specifying whether edges
are directed or undirected by default. As edges of witnesses
should always be directed the value of the 'edgedefault' attribute
is checked to be 'directed'.
Other attributes are allowed but no checks are currently performed for them.
Currently a witness graph is not supposed to have any children of types
other than 'node', 'edge' or 'data'.
"""
edge_default = graph.attrib.get("edgedefault")
if edge_default is None:
logging.warning(
"Graph definition is missing attribute 'edgedefault'", graph.sourceline
)
elif edge_default != "directed":
logging.warning("Edgedefault should be 'directed'", graph.sourceline)
for child in graph:
child_tag = child.tag.rpartition("}")[2]
if child_tag == witness.DATA:
self.handle_data(child, graph)
elif child_tag not in [witness.NODE, witness.EDGE]:
logging.warning(
"Graph element has | |
"""
The trained 1900-dimensional mLSTM babbler.
From the UniRep Paper [Alley2019]
"""
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import tensorflow_addons as tfa
import tensorflow_probability as tfp
import numpy as np
import pandas as pd
import sys
sys.path.append('../')
from utils import aa_seq_to_int, int_to_aa
from utils.unirep_utils import bucketbatchpad
import os
# Helpers
def tf_get_shape(tensor):
static_shape = tensor.shape.as_list()
dynamic_shape = tf.unstack(tf.shape(tensor))
dims = [s[1] if s[0] is None else s[0]
for s in zip(static_shape, dynamic_shape)]
return dims
def sample_with_temp(logits, t):
"""
Takes temperature between 0 and 1 -> zero most conservative, 1 most liberal. Samples.
"""
t_adjusted = logits / t # broadcast temperature normalization
softed = tf.nn.softmax(t_adjusted)
# Make a categorical distribution from the softmax and sample
return tfp.distributions.Categorical(probs=softed).sample()
def initialize_uninitialized(sess):
"""
from https://stackoverflow.com/questions/35164529/in-tensorflow-is-there-any-way-to-just-initialize-uninitialised-variables
"""
global_vars = tf.global_variables()
is_not_initialized = sess.run([tf.is_variable_initialized(var) for var in global_vars])
not_initialized_vars = [v for (v, f) in zip(global_vars, is_not_initialized) if not f]
if len(not_initialized_vars):
sess.run(tf.variables_initializer(not_initialized_vars))
# Setup to initialize from the correctly named model files.
class mLSTMCell1900(tf.nn.rnn_cell.RNNCell):
def __init__(self,
num_units,
model_path="./",
wn=True,
scope='mlstm',
var_device='cpu:0',
):
# Really not sure if I should reuse here
super(mLSTMCell1900, self).__init__()
self._num_units = num_units
self._model_path = model_path
self._wn = wn
self._scope = scope
self._var_device = var_device
@property
def state_size(self):
# The state is a tuple of c and h
return (self._num_units, self._num_units)
@property
def output_size(self):
# The output is h
return (self._num_units)
def zero_state(self, batch_size, dtype):
c = tf.zeros([batch_size, self._num_units], dtype=dtype)
h = tf.zeros([batch_size, self._num_units], dtype=dtype)
return (c, h)
def call(self, inputs, state):
# Inputs will be a [batch_size, input_dim] tensor.
# Eg, input_dim for a 10-D embedding is 10
nin = inputs.get_shape()[1].value
# Unpack the state tuple
c_prev, h_prev = state
with tf.variable_scope(self._scope):
wx_init = np.load(os.path.join(self._model_path, "rnn_mlstm_mlstm_wx:0.npy"))
wh_init = np.load(os.path.join(self._model_path, "rnn_mlstm_mlstm_wh:0.npy"))
wmx_init = np.load(os.path.join(self._model_path, "rnn_mlstm_mlstm_wmx:0.npy"))
wmh_init = np.load(os.path.join(self._model_path, "rnn_mlstm_mlstm_wmh:0.npy"))
b_init = np.load(os.path.join(self._model_path, "rnn_mlstm_mlstm_b:0.npy"))
gx_init = np.load(os.path.join(self._model_path, "rnn_mlstm_mlstm_gx:0.npy"))
gh_init = np.load(os.path.join(self._model_path, "rnn_mlstm_mlstm_gh:0.npy"))
gmx_init = np.load(os.path.join(self._model_path, "rnn_mlstm_mlstm_gmx:0.npy"))
gmh_init = np.load(os.path.join(self._model_path, "rnn_mlstm_mlstm_gmh:0.npy"))
wx = tf.get_variable(
"wx", initializer=wx_init)
wh = tf.get_variable(
"wh", initializer=wh_init)
wmx = tf.get_variable(
"wmx", initializer=wmx_init)
wmh = tf.get_variable(
"wmh", initializer=wmh_init)
b = tf.get_variable(
"b", initializer=b_init)
if self._wn:
gx = tf.get_variable(
"gx", initializer=gx_init)
gh = tf.get_variable(
"gh", initializer=gh_init)
gmx = tf.get_variable(
"gmx", initializer=gmx_init)
gmh = tf.get_variable(
"gmh", initializer=gmh_init)
if self._wn:
wx = tf.nn.l2_normalize(wx, axis=0) * gx
wh = tf.nn.l2_normalize(wh, axis=0) * gh
wmx = tf.nn.l2_normalize(wmx, axis=0) * gmx
wmh = tf.nn.l2_normalize(wmh, axis=0) * gmh
m = tf.matmul(inputs, wmx) * tf.matmul(h_prev, wmh)
z = tf.matmul(inputs, wx) + tf.matmul(m, wh) + b
i, f, o, u = tf.split(z, 4, 1)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f * c_prev + i * u
h = o * tf.tanh(c)
return h, (c, h)
class mLSTMCell(tf.nn.rnn_cell.RNNCell):
def __init__(self,
num_units,
wx_init=tf.orthogonal_initializer(),
wh_init=tf.orthogonal_initializer(),
wmx_init=tf.orthogonal_initializer(),
wmh_init=tf.orthogonal_initializer(),
b_init=tf.orthogonal_initializer(),
gx_init=tf.ones_initializer(),
gh_init=tf.ones_initializer(),
gmx_init=tf.ones_initializer(),
gmh_init=tf.ones_initializer(),
wn=True,
scope='mlstm',
var_device='cpu:0',
):
# Really not sure if I should reuse here
super(mLSTMCell, self).__init__()
self._num_units = num_units
self._wn = wn
self._scope = scope
self._var_device = var_device
self._wx_init = wx_init
self._wh_init = wh_init
self._wmx_init = wmx_init
self._wmh_init = wmh_init
self._b_init = b_init
self._gx_init = gx_init
self._gh_init = gh_init
self._gmx_init = gmx_init
self._gmh_init = gmh_init
@property
def state_size(self):
# The state is a tuple of c and h
return (self._num_units, self._num_units)
@property
def output_size(self):
# The output is h
return (self._num_units)
def zero_state(self, batch_size, dtype):
c = tf.zeros([batch_size, self._num_units], dtype=dtype)
h = tf.zeros([batch_size, self._num_units], dtype=dtype)
return (c, h)
def call(self, inputs, state):
# Inputs will be a [batch_size, input_dim] tensor.
# Eg, input_dim for a 10-D embedding is 10
nin = inputs.get_shape()[1].value
# Unpack the state tuple
c_prev, h_prev = state
with tf.variable_scope(self._scope):
wx = tf.get_variable(
"wx", initializer=self._wx_init)
wh = tf.get_variable(
"wh", initializer=self._wh_init)
wmx = tf.get_variable(
"wmx", initializer=self._wmx_init)
wmh = tf.get_variable(
"wmh", initializer=self._wmh_init)
b = tf.get_variable(
"b", initializer=self._b_init)
if self._wn:
gx = tf.get_variable(
"gx", initializer=self._gx_init)
gh = tf.get_variable(
"gh", initializer=self._gh_init)
gmx = tf.get_variable(
"gmx", initializer=self._gmx_init)
gmh = tf.get_variable(
"gmh", initializer=self._gmh_init)
if self._wn:
wx = tf.nn.l2_normalize(wx, dim=0) * gx
wh = tf.nn.l2_normalize(wh, dim=0) * gh
wmx = tf.nn.l2_normalize(wmx, dim=0) * gmx
wmh = tf.nn.l2_normalize(wmh, dim=0) * gmh
m = tf.matmul(inputs, wmx) * tf.matmul(h_prev, wmh)
z = tf.matmul(inputs, wx) + tf.matmul(m, wh) + b
i, f, o, u = tf.split(z, 4, 1)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f * c_prev + i * u
h = o * tf.tanh(c)
return h, (c, h)
class mLSTMCellStackNPY(tf.nn.rnn_cell.RNNCell):
def __init__(self,
num_units=256,
num_layers=4,
dropout=None,
res_connect=False,
wn=True,
scope='mlstm_stack',
var_device='cpu:0',
model_path="./"
):
# Really not sure if I should reuse here
super(mLSTMCellStackNPY, self).__init__()
self._model_path=model_path
self._num_units = num_units
self._num_layers = num_layers
self._dropout = dropout
self._res_connect = res_connect
self._wn = wn
self._scope = scope
self._var_device = var_device
bs = "rnn_mlstm_stack_mlstm_stack" # base scope see weight file names
join = lambda x: os.path.join(self._model_path, x)
layers = [mLSTMCell(
num_units=self._num_units,
wn=self._wn,
scope=self._scope + str(i),
var_device=self._var_device,
wx_init=np.load(join(bs + "{0}_mlstm_stack{1}_wx:0.npy".format(i,i))),
wh_init=np.load(join(bs + "{0}_mlstm_stack{1}_wh:0.npy".format(i,i))),
wmx_init=np.load(join(bs + "{0}_mlstm_stack{1}_wmx:0.npy".format(i,i))),
wmh_init=np.load(join(bs + "{0}_mlstm_stack{1}_wmh:0.npy".format(i,i))),
b_init=np.load(join(bs + "{0}_mlstm_stack{1}_b:0.npy".format(i,i))),
gx_init=np.load(join(bs + "{0}_mlstm_stack{1}_gx:0.npy".format(i,i))),
gh_init=np.load(join(bs + "{0}_mlstm_stack{1}_gh:0.npy".format(i,i))),
gmx_init=np.load(join(bs + "{0}_mlstm_stack{1}_gmx:0.npy".format(i,i))),
gmh_init=np.load(join(bs + "{0}_mlstm_stack{1}_gmh:0.npy".format(i,i)))
) for i in range(self._num_layers)]
if self._dropout:
layers = [
tf.nn.rnn_cell.DropoutWrapper(
layer, output_keep_prob=1-self._dropout) for layer in layers[:-1]] + layers[-1:]
self._layers = layers
@property
def state_size(self):
# The state is a tuple of c and h
return (
tuple(self._num_units for _ in range(self._num_layers)),
tuple(self._num_units for _ in range(self._num_layers))
)
@property
def output_size(self):
# The output is h
return (self._num_units)
def zero_state(self, batch_size, dtype):
c_stack = tuple(tf.zeros([batch_size, self._num_units], dtype=dtype) for _ in range(self._num_layers))
h_stack = tuple(tf.zeros([batch_size, self._num_units], dtype=dtype) for _ in range(self._num_layers))
return (c_stack, h_stack)
def call(self, inputs, state):
# Inputs will be a [batch_size, input_dim] tensor.
# Eg, input_dim for a 10-D embedding is 10
# Unpack the state tuple
c_prev, h_prev = state
new_outputs = []
new_cs = []
new_hs = []
for i, layer in enumerate(self._layers):
if i == 0:
h, (c,h_state) = layer(inputs, (c_prev[i],h_prev[i]))
else:
h, (c,h_state) = layer(new_outputs[-1], (c_prev[i],h_prev[i]))
new_outputs.append(h)
new_cs.append(c)
new_hs.append(h_state)
if self._res_connect:
# Make sure number of layers does not affect the scale of the output
scale_factor = tf.constant(1 / float(self._num_layers))
final_output = tf.scalar_mul(scale_factor,tf.add_n(new_outputs))
else:
final_output = new_outputs[-1]
return final_output, (tuple(new_cs), tuple(new_hs))
class babbler1900():
def __init__(self,
model_path="./pbab_weights",
batch_size=256
):
self._rnn_size = 1900
self._vocab_size = 26
self._embed_dim = 10
self._wn = True
self._shuffle_buffer = 10000
self._model_path = model_path
self._batch_size = batch_size
self._batch_size_placeholder = tf.placeholder(tf.int32, shape=[], name="batch_size")
self._minibatch_x_placeholder = tf.placeholder(
tf.int32, shape=[None, None], name="minibatch_x")
self._initial_state_placeholder = (
tf.placeholder(tf.float32, shape=[None, self._rnn_size]),
tf.placeholder(tf.float32, shape=[None, self._rnn_size])
)
self._minibatch_y_placeholder = tf.placeholder(
tf.int32, shape=[None, None], name="minibatch_y")
# Batch size dimensional placeholder which gives the
# Lengths of the input sequence batch. Used to index into
# The final_hidden output and select the stop codon -1
# final hidden for the graph operation.
self._seq_length_placeholder = tf.placeholder(
tf.int32, shape=[None], name="seq_len")
self._temp_placeholder = tf.placeholder(tf.float32, shape=[], name="temp")
rnn = mLSTMCell1900(self._rnn_size, model_path=model_path, wn=self._wn)
zero_state = rnn.zero_state(self._batch_size, tf.float32)
single_zero = rnn.zero_state(1, tf.float32)
mask = tf.sign(self._minibatch_y_placeholder) # 1 for nonpad, zero for pad
inverse_mask = 1 - mask # 0 for nonpad, 1 for pad
total_padded = tf.reduce_sum(inverse_mask)
pad_adjusted_targets = (self._minibatch_y_placeholder - 1) + inverse_mask
embed_matrix = tf.get_variable(
"embed_matrix", dtype=tf.float32,
initializer=np.load(os.path.join(self._model_path, "embed_matrix:0.npy"))
)
embed_cell = tf.nn.embedding_lookup(embed_matrix, self._minibatch_x_placeholder)
self._output, self._final_state = tf.nn.dynamic_rnn(
rnn,
embed_cell,
initial_state=self._initial_state_placeholder,
swap_memory=True,
parallel_iterations=1
)
# If we are training a model on top of the rep model, we need to access
# the final_hidden rep from output. Recall we are padding these sequences
# to max length, so the -1 position will not necessarily be the right rep.
# to get the right rep, I will use the provided sequence length to index.
# Subtract one for the last place
indices = self._seq_length_placeholder - 1
self._top_final_hidden = tf.gather_nd(self._output,
tf.stack([tf.range(tf_get_shape(self._output)[0],
dtype=tf.int32), indices], axis=1))
fmask = tf.cast(mask, tf.float32)[:, :, None]
self._avg_hidden = tf.reduce_sum(fmask * self._output,
axis=1) / tf.reduce_sum(fmask, axis=1)
# LEFTOFF self._output is a batch size, seq_len, num_hidden.
# I want to average along num_hidden, but I'll have to figure out how to mask out
# the dimensions along sequence_length which are longer than the given sequence.
flat = tf.reshape(self._output, [-1, self._rnn_size])
if os.path.exists(os.path.join(self._model_path, "fully_connected_weights:0.npy")):
weights_name="fully_connected_weights"
bias_name="fully_connected_biases"
else:
| |
in enumerate(layers):
h = misc.CaptureLayerOutput(post_process=None)
_ = self.module_layer._modules[l].register_forward_hook(h)
self.activation_hooks.append(h)
r'''
This object will be used to combine all the saliency maps together after we compute them.
'''
self.combine_maps = CombineSaliencyMaps(output_size=output_size, map_num=len(weights), weights=weights,
resize_mode=resize_mode, do_relu=do_relu)
r'''
Are we also computing the CAM map?
'''
if isinstance(model,resnet.ResNet_FastCAM) or expl_do_fast_cam:
self.do_fast_cam = True
self.do_nonclass_map = do_nonclass_map
self.cam_method = cam_method
self.cam_each_map = cam_each_map
else:
self.do_fast_cam = False
self.do_nonclass_map = None
self.cam_method = None
self.cam_each_map = None
def __call__(self, input, grad_enabled=False):
"""
Args:
input: input image with shape of (B, 3, H, W)
grad_enabled: Set this to true if you need to compute grads when running the network. For instance, while training.
Return:
combined_map: [Batch x output height x output width] set of 2D saliency maps combined from each layer
we compute from and combined with a CAM if we computed one.
saliency_maps: A list [number of layers size] containing each saliency map [Batch x output height x output width].
These will have been resized from their orginal layer size.
logit: The output neural network logits.
"""
r'''
Don't compute grads if we do not need them. Cuts network compute time way down.
'''
with torch.set_grad_enabled(grad_enabled):
r'''
Get the size, but we support lists here for certain special cases.
'''
b, c, h, w = input[0].size() if isinstance(input,list) else input.size()
self.model.eval()
if self.do_fast_cam:
logit,cam_map = self.model(input,method=self.cam_method)
else:
logit = self.model(input)
saliency_maps = []
r'''
Get the activation for each layer in our list. Then compute saliency and normalize.
'''
for i,l in enumerate(self.layers):
activations = self.activation_hooks[i].data
b, k, u, v = activations.size()
activations = F.relu(activations)
saliency_map = self.get_norm(self.get_smap(activations)).view(b, u, v)
saliency_maps.append(saliency_map)
r'''
Combine each saliency map together into a single 2D saliency map.
'''
combined_map, saliency_maps = self.combine_maps(saliency_maps)
r'''
If we computed a CAM, combine it with the forward only saliency map.
'''
if self.do_fast_cam:
if self.do_nonclass_map:
combined_map = combined_map*(1.0 - cam_map)
if self.cam_each_map:
saliency_maps = saliency_maps.squeeze(0)
saliency_maps = saliency_maps*(1.0 - cam_map)
saliency_maps = saliency_maps.unsqueeze(0)
else:
combined_map = combined_map * cam_map
if self.cam_each_map:
saliency_maps = saliency_maps.squeeze(0)
saliency_maps = saliency_maps*cam_map
saliency_maps = saliency_maps.unsqueeze(0)
return combined_map, saliency_maps, logit
# *******************************************************************************************************************
# *******************************************************************************************************************
class SaliencyVector(SaliencyMap):
r'''
Given an input model and parameters, run the neural network and compute saliency maps for given images.
input: input image with shape of (batch size, 3, H, W)
Parameters:
model: This should be a valid Torch neural network such as a ResNet.
layers: A list of layers you wish to process given by name. If none, we can auto compute a selection.
maps_method: How do we compute saliency for each activation map? Default: SMOEScaleMap
norm_method: How do we post process normalize each saliency map? Default: norm.GaussNorm2D
This can also be norm.GammaNorm2D or norm.RangeNorm2D.
output_size: This is the standard 2D size for the saliency maps. Torch nn.functional.interpolate
will be used to make each saliency map this size. Default [224,224]
weights: The weight for each layer in the combined saliency map's weighted average.
It should either be a list of floats or None.
resize_mode: Is given to Torch nn.functional.interpolate. Whatever it supports will work here.
do_relu: Should we do a final clamp on values to set all negative values to 0?
Will Return:
combined_map: [Batch x output height x output width] set of 2D saliency maps combined from each layer
we compute from and combined with a CAM if we computed one.
saliency_maps: A list [number of layers size] containing each saliency map [Batch x output height x output width].
These will have been resized from their orginal layer size.
logit: The output neural network logits.
sal_location: A tuple of x,y locations which are the most salienct in each image.
feature_vecs: List of salient feature vectors. Each list item is assocaited with each layer in the layers argument.
'''
def __init__(self, model, layers, **kwargs):
super(SaliencyVector, self).__init__(model, layers, **kwargs)
def __call__(self, input, grad_enabled=False):
"""
Args:
input: input image with shape of (B, 3, H, W)
grad_enabled: Set this to true if you need to compute grads when running the network. For instance, while training.
Return:
combined_map: [Batch x output height x output width] set of 2D saliency maps combined from each layer
we compute from and combined with a CAM if we computed one.
saliency_maps: A list [number of layers size] containing each saliency map [Batch x output height x output width].
These will have been resized from their orginal layer size.
logit: The output neural network logits.
sal_location: A tuple of x,y locations which are the most salienct in each image.
feature_vecs: List of salient feature vectors. Each list item is assocaited with each layer in the layers argument.
"""
r'''
Call the base __call__ method from the base class first to get saliency maps.
'''
combined_map, saliency_maps, logit = super(SaliencyVector, self).__call__(input, grad_enabled)
sz = combined_map.size()
combined_map = combined_map.reshape(sz[0],sz[1]*sz[2])
r'''
Get the location x,y expressed as one vector.
'''
sal_loc = torch.argmax(combined_map,dim=1)
r'''
Get the actual location by offseting the y place size.
'''
sal_y = sal_loc//sz[1]
sal_x = sal_loc%sz[1]
r'''
Get each activation layer again from the layer hooks.
'''
feature_vecs = []
for i,l in enumerate(self.layers):
activations = self.activation_hooks[i].data
b, k, v, u = activations.size() # Note: v->y and u->x
r'''
Compute new x,y location based on the layers size.
'''
loc_x = math.floor((v/sz[2])*float(sal_x))
loc_y = math.floor((u/sz[1])*float(sal_y))
loc = loc_y*u + loc_x
r'''
Get feature vectors k at location loc from all batches b.
'''
feature_vecs.append(activations.permute(0,2,3,1).reshape(b,v*u,k)[:,loc,:])
combined_map = combined_map.reshape(sz[0],sz[1],sz[2])
sal_location = (sal_x,sal_y)
return combined_map, saliency_maps, logit, sal_location, feature_vecs
# *******************************************************************************************************************
# *******************************************************************************************************************
class SaliencyModel(nn.Module):
r'''
Given an input model and parameters, run the neural network and compute saliency maps for given images.
This version will run as a regular batch on a mutli-GPU machine. It will eventually replace SaliencyMap.
input: input image with shape of (batch size, 3, H, W)
Parameters:
model: This should be a valid Torch neural network such as a ResNet.
layers: A list of layers you wish to process given by name. If none, we can auto compute a selection.
maps_method: How do we compute saliency for each activation map? Default: SMOEScaleMap
norm_method: How do we post process normalize each saliency map? Default: norm.GaussNorm2D
This can also be norm.GammaNorm2D or norm.RangeNorm2D.
output_size: This is the standard 2D size for the saliency maps. Torch nn.functional.interpolate
will be used to make each saliency map this size. Default [224,224]
resize_mode: Is given to Torch nn.functional.interpolate. Whatever it supports will work here.
do_relu: Should we do a final clamp on values to set all negative values to 0?
cam_method: A string with the method for running CAM. Can be:
gradcam - Default, Standard GradCAM from Selvaraju 2017
gradcampp - GradCAM++ from from Chattopadhyay 2018
xgradcam - XGradCAM from Fu 2020
Will Return:
combined_map: [Batch x output height x output width] set of 2D saliency maps combined from each layer
we compute from and combined with a CAM if we computed one.
saliency_maps: A list [number of layers size] containing each saliency map [Batch x output height x output width].
These will have been resized from their orginal layer size.
logit: The output neural network logits.
'''
def __init__(self, model, layers=None, maps_method=SMOEScaleMap, norm_method=norm.GammaNorm2D,
output_size=[224,224], weights=None, auto_layer=nn.ReLU, resize_mode='bilinear',
do_relu=False, cam_method='gradcam', module_layer=None, expl_do_fast_cam=False,
do_nonclass_map=False, cam_each_map=False):
assert isinstance(model, nn.Module), "model must be a valid PyTorch module"
assert isinstance(layers, list) or layers is None, "Layers must be | |
<reponame>siduojiang/BERTVision
###########################################################################################################
## IMPORTS
###########################################################################################################
import os
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
import pickle
import tensorflow as tf
from tensorflow.keras.layers import LeakyReLU, ELU, ReLU
from tensorflow.keras.models import Sequential, Model, model_from_json
from tensorflow.keras.layers import Activation, Convolution2D, Conv2D, LocallyConnected2D, MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, SeparableConv2D
from tensorflow.keras.layers import BatchNormalization, Flatten, Dense, Dropout, Input, concatenate, add, Add, ZeroPadding2D, GlobalMaxPooling2D, DepthwiseConv2D
from tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2
from tensorflow.keras import metrics, losses, initializers, backend
from tensorflow.keras.losses import SparseCategoricalCrossentropy, BinaryCrossentropy, CategoricalCrossentropy
from tensorflow.keras.utils import multi_gpu_model
from tensorflow.keras.initializers import glorot_uniform, Constant, lecun_uniform
from tensorflow.keras import backend as K
from tensorflow.keras.utils import plot_model
from sklearn.model_selection import train_test_split
from tensorflow.python.keras import backend, initializers, models, regularizers
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
layers = tf.keras.layers
np.random.seed(42)
tf.random.set_seed(42)
tf.get_logger().setLevel('ERROR')
physical_devices = tf.config.list_physical_devices('GPU')
for pd_dev in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[pd_dev], True)
###########################################################################################################
## HELPER FUNCTIONS
###########################################################################################################
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
###########################################################################################################
## CUSTOM TENSORFLOW LAYERS
###########################################################################################################
class BertConcat(layers.Layer):
def __init__(self, units = 1):
super().__init__()
#Will only work currently with units = 1
self.units = 1
def build(self, input_shape):
self.w = self.add_weight(shape = (input_shape[-1],), trainable = True, initializer = 'random_normal', name = 'weights')
self.t = self.add_weight(shape = (1), trainable = True, initializer = 'ones', name = 'probes')
def call(self, inputs):
w = tf.nn.softmax(self.w)
return tf.tensordot(w, inputs, axes = (0, -1)) * self.t
class AdapterPooler(tf.keras.layers.Layer):
def __init__(self, adapter_dim, init_scale = 1e-3, shared_weights = True):
super().__init__()
self.adapter_dim = adapter_dim
self.initializer = tf.keras.initializers.TruncatedNormal(stddev=init_scale)
if shared_weights:
self.pooler_layer = tf.keras.layers.TimeDistributed(
tf.keras.layers.Dense(self.adapter_dim, kernel_initializer=self.initializer))
else:
self.pooler_layer = tf.keras.layers.LocallyConnected1D(self.adapter_dim, 1, 1, kernel_initializer=self.initializer)
def call(self, inputs):
'''Input shape expected to be (batch_size, 386, 1024, 24)
Call reshapes tensor into (batch_size * 386, 24, 1024)
Apply pooler_layer to input with gelu activation
'''
sequence_dim = inputs.shape[1]
embedding_dim = inputs.shape[2]
encoder_dim = inputs.shape[3]
#Combine batch and sequence length dimension
X = tf.reshape(inputs, [-1, embedding_dim, encoder_dim])
#Move encoder_dim to axis = 1
X = tf.transpose(X, (0, 2, 1))
X = self.pooler_layer(X)
X = gelu(X)
#Regenerate shape
X = tf.transpose(X, (0, 2, 1))
X = tf.reshape(X, [-1, sequence_dim, self.adapter_dim, encoder_dim])
return X
class MeanConcat(layers.Layer):
def __init__(self, units = 1):
super().__init__()
#Will only work currently with units = 1
self.units = 1
def build(self, input_shape):
self.last_axis = len(input_shape) - 1
def call(self, inputs):
return tf.reduce_mean(inputs, self.last_axis)
###########################################################################################################
## CLASS CONTAINING BINARY CLASSIFICATION MODELS
###########################################################################################################
class BinaryClassificationModels(object):
def __init__(self, **kwargs):
self.__GPU_count = len(tf.config.list_physical_devices('GPU'))
######################################################
### Private Methods
######################################################
# validate required input parameter values aren't set to None
@staticmethod
def __require_params(**kwargs):
needed_args = [key for key,value in kwargs.items() if value is None]
if len(needed_args) > 0:
raise ValueError("If running in training, must specify following outputs: %s" %(', '.join(needed_args)))
return
def __verbose_print(self, model, model_name, input_shape, opt, loss, metrics):
print("".join(["\n", "*" * 100, "\nModel Details\n", "*" * 100, "\n"]))
print(f"Model Name: {model_name}")
print(f"Optimizer Details: name = {opt.get_config()['name']}, learning rate = {opt.get_config()['learning_rate']}")
print(f"Loss Details: name = {loss.get_config()['name']}, from_logits = {loss.get_config()['from_logits']}")
print(f"Input Shape: {tuple(input_shape)}")
print(f"Metrics: {"".join(metrics)}")
print("*" * 100)
print(model.summary())
print("*" * 100, "\n")
return
######################################################
### Public Methods
######################################################
#/////////////////////////////////////////////////////
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
### "Tiny" Tenney
#/////////////////////////////////////////////////////
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
def get_tiny_tenney(self, input_shape = (1, 1024, 26), gpu_device = "/gpu:0", verbose = True):
r"""Returns the TensorFlow 2.2 implementation of 'Tiny' Tenny linear model.
Inspired by Tenney et al. 2019 : https://arxiv.org/abs/1905.05950
Args:
input_shape (tuple, optional): Shape of the input tensor. Defaults to (1, 1024, 26).
gpu_device (str, optional): If GPU devices are available, defines which one to utilize. Defaults to "/gpu:0".
verbose (bool, optional): Log details to console. Defaults to True.
Returns:
tf.keras.Model: returns a TensorFlow 2.20 model (compiled, untrained)
"""
# Input validation
self.__require_params(input_shape = input_shape)
if (not gpu_device) or self.__GPU_count == 0:
gpu_device = "/cpu:0"
# Model hyperparameters and metadata
model_name = 'Binary Classification Tenney (Tiny)'
opt = Adam(lr = 1e-3, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-8)
loss = CategoricalCrossentropy(from_logits = True)
metrics = ['accuracy']
# Construct model & compile
with tf.device(gpu_device):
inp = layers.Input(input_shape, name = 'input_layer')
X = BertConcat() (inp)
X = tf.squeeze(X, axis = 1)
X = layers.Dense(2) (X)
model = Model(inputs = inp, outputs = X, name = model_name)
model.compile(loss = loss, optimizer = opt, metrics = metrics)
# Print verbose output to console
if verbose:
self.__verbose_print(model, model_name, input_shape, opt, loss, metrics)
return model
#/////////////////////////////////////////////////////
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
### Adapter Pooler Tenney
#/////////////////////////////////////////////////////
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
def get_adapter_pooler_tenney(self, input_shape = (1, 1024, 26), gpu_device = "/gpu:0", verbose = True):
r"""Returns the TensorFlow 2.2 implementation of Adapter Pooler Tenny linear model.
tensor contraction inspired by Tenney et al. 2019 : https://arxiv.org/abs/1905.05950
adapter pooler layer inspired by Houlsby et al. 2019 : https://arxiv.org/abs/1902.00751v2
Args:
input_shape (tuple, optional): Shape of the input tensor. Defaults to (1, 1024, 26).
gpu_device (str, optional): If GPU devices are available, defines which one to utilize. Defaults to "/gpu:0".
verbose (bool, optional): Log details to console. Defaults to True.
Returns:
tf.keras.Model: returns a TensorFlow 2.20 model (compiled, untrained)
"""
# Input validation
self.__require_params(input_shape = input_shape)
if (not gpu_device) or self.__GPU_count == 0:
gpu_device = "/cpu:0"
# Model hyperparameters and metadata
model_name = 'Binary Classification Adapter Pooler Tenney'
opt = Adam(lr = 1e-3, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-8)
loss = CategoricalCrossentropy(from_logits = True)
metrics = ['accuracy']
# Construct model & compile
with tf.device(gpu_device):
inp = layers.Input(input_shape, name = 'input_layer')
inp_seq = inp[:,:,:,-1]
X = BertConcat() (inp)
X = tf.expand_dims(X, axis = -1, name ='expand_dims')
X = AdapterPooler(386, shared_weights = True) (X)
X = tf.reshape(X, (-1, X.shape[1], X.shape[2] * X.shape[3]))
X = tf.concat([X, inp_seq], axis = 2)
X = tf.squeeze(X, axis = 1)
X = layers.Dense(2) (X)
model = Model(inputs = inp, outputs = X, name = model_name)
model.compile(loss = loss, optimizer = opt, metrics = metrics)
# Print verbose output to console
if verbose:
self.__verbose_print(model, model_name, input_shape, opt, loss, metrics)
return model
#/////////////////////////////////////////////////////
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
### Xception (Abbreviated)
#/////////////////////////////////////////////////////
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
def get_xception_abbreviated(self, input_shape = (1, 1024, 26), gpu_device = "/gpu:0", verbose = True):
r"""Returns the TensorFlow 2.2 implementation of Xception (Abbreviated).
Inspired by Chollet 2017 : http://arxiv.org/abs/1610.02357
Args:
input_shape (tuple, optional): Shape of the input tensor. Defaults to (1, 1024, 26).
gpu_device (str, optional): If GPU devices are available, defines which one to utilize. Defaults to "/gpu:0".
verbose (bool, optional): Log details to console. Defaults to True.
Returns:
tf.keras.Model: returns a TensorFlow 2.20 model (compiled, untrained)
"""
# Input validation
self.__require_params(input_shape = input_shape)
if (not gpu_device) or self.__GPU_count == 0:
gpu_device = "/cpu:0"
# Model hyperparameters and metadata
model_name = 'Binary Classification Xception (Abbreviated)'
opt = Adam(lr = 1e-3, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-8)
loss = CategoricalCrossentropy(from_logits = True)
metrics = ['accuracy']
# Construct model & compile
with tf.device(gpu_device):
# input image size
input_img = layers.Input(shape = input_shape, dtype = tf.float32)
# Block 1
x = Conv2D(64, (1, 3), strides=(1, 3), use_bias=False) (input_img)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(128, (1, 3), use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
residual = Conv2D(512, (1, 1), strides=(1, 2), padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
# Block 2
x = SeparableConv2D(256, (1, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = SeparableConv2D(512, (1, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
# Block 2 Pool
x = AveragePooling2D((1, 3), strides=(1, 2), padding='same')(x)
x = layers.add([x, residual])
# Fully Connected Layer
x = GlobalAveragePooling2D()(x)
x = layers.Dense(2, dtype = tf.float32, name = 'dense_2_final') (x)
model = models.Model(input_img, x, name = model_name)
model.compile(loss = loss, optimizer = opt, metrics = metrics)
# Print verbose output to console
if verbose:
self.__verbose_print(model, model_name, input_shape, opt, loss, metrics)
return model
#/////////////////////////////////////////////////////
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
### Xception
#/////////////////////////////////////////////////////
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
def get_xception(self, input_shape = (1, 1024, 26), gpu_device = "/gpu:0", verbose = True):
r"""Returns the TensorFlow 2.2 implementation of Xception for SQuAD v2 Binary Classification.
Inspired by Chollet 2017 : http://arxiv.org/abs/1610.02357
Args:
input_shape (tuple, optional): Shape of the input tensor. Defaults to | |
""" Module for calculating pseudoinverses. """
from sympy import *
class Calculator:
@staticmethod
def load_matrix(m, n):
"""
Returns the matrix which user gives as input or None if the input is incorrect.
"""
matrix = []
for i in range(m):
matrix.append(input().split())
if len(matrix[i]) != n:
return None
return matrix
@staticmethod
def find_row(A, i, j):
""" Greedy. """
m, n = A.shape
for row in range(i + 1, m):
if A[row, j] != 0:
return row
return None
@staticmethod
def find_column(A, i, j):
""" Greedy. """
m, n = A.shape
for column in range(j + 1, n):
if A[i, column] != 0:
return column
return None
def gauss_jordan_row(self, A):
m, n = A.shape
P = eye(m)
i, j = 0, 0
while i < m and j < n:
# 1. step: If A[i, j] == 0 swap the ith row with some other row below to guarantee that A[i, j] != 0.
# If all entries in the column are zero, increase j by 1.
if A[i, j] == 0:
row_to_swap_with = self.find_row(A, i, j)
if row_to_swap_with is None:
j += 1
continue
A = A.elementary_row_op(op='n<->m', row1=i, row2=row_to_swap_with)
P = P.elementary_row_op(op='n<->m', row1=i, row2=row_to_swap_with)
if A[i, j] != 0:
# 2. step: Divide the ith row by A[i, j] to make the pivot entry = 1.
# k = Rational(1, A[i, j])
k = 1 / A[i, j]
if k != 1:
A = A.elementary_row_op(op='n->kn', row=i, k=k)
P = P.elementary_row_op(op='n->kn', row=i, k=k)
# 3. step: Eliminate all other entries in the jth column by subtracting suitable multiples of the
# ith row from the other rows.
for row in range(m):
if row != i and A[row, j] != 0:
k = -A[row, j]
A = A.elementary_row_op(op='n->n+km', row=row, k=k, row2=i)
P = P.elementary_row_op(op='n->n+km', row=row, k=k, row2=i)
# 4. step: Increase i by 1 and j by 1 to choose the new pivot element. Return to step 1.
i += 1
j += 1
return A, P
def gauss_jordan_column(self, A):
m, n = A.shape
Q = eye(n)
i, j = 0, 0
while i < m and j < n:
# 1. step: If A[i, j] == 0 swap the jth column with some other column to the right to guarantee that A[i, j] != 0.
# If all entries in the row are zero, increase i by 1.
if A[i, j] == 0:
column_to_swap_with = self.find_column(A, i, j)
if column_to_swap_with is None:
i += 1
continue
A = A.elementary_col_op(op='n<->m', col1=j, col2=column_to_swap_with)
Q = Q.elementary_col_op(op='n<->m', col1=j, col2=column_to_swap_with)
if A[i, j] != 0:
# 2. step: Divide the jth column by A[i, j] to make the pivot entry = 1.
# k = Rational(1, A[i, j])
k = 1 / A[i, j]
if k != 1:
A = A.elementary_col_op(op='n->kn', col=j, k=k)
Q = Q.elementary_col_op(op='n->kn', col=j, k=k)
# 3. step: Eliminate all other entries in the ith row by subtracting suitable multiples of the
# jth column from the other columns.
for column in range(n):
if column != j and A[i, column] != 0:
k = -A[i, column]
A = A.elementary_col_op(op='n->n+km', col=column, k=k, col2=j)
Q = Q.elementary_col_op(op='n->n+km', col=column, k=k, col2=j)
# 4. step: Increase i by 1 and j by 1 to choose the new pivot element. Return to step 1.
i += 1
j += 1
return A, Q
def calculate_P_Q(self, matrix):
if matrix is None:
return None, None
A = matrix.copy()
A, P = self.gauss_jordan_row(A)
A, Q = self.gauss_jordan_column(A)
return P, Q
@staticmethod
def calculate_general_1_inverse(matrix):
m, n = matrix.shape
r = matrix.rank()
hasX1 = m - r > 0
hasX2 = n - r > 0
hasX3 = hasX1 and hasX2
X0 = MatrixSymbol('X0', r, r)
X1 = MatrixSymbol('X1', r, m - r)
X2 = MatrixSymbol('X2', n - r, r)
X3 = MatrixSymbol('X3', n - r, m - r)
if hasX3:
R = BlockMatrix(2, 2, [X0, X1, X2, X3])
elif hasX1:
R = BlockMatrix(1, 2, [X0, X1])
elif hasX2:
R = BlockMatrix(2, 1, [X0, X2])
else:
R = BlockMatrix(1, 1, [X0])
R = R.subs(X0, eye(r))
return R
@staticmethod
def calculate_general_12_inverse(matrix):
m, n = matrix.shape
r = matrix.rank()
hasX1 = m - r > 0
hasX2 = n - r > 0
hasX3 = hasX1 and hasX2
X0 = MatrixSymbol('X0', r, r)
X1 = MatrixSymbol('X1', r, m - r)
X2 = MatrixSymbol('X2', n - r, r)
X3 = MatrixSymbol('X3', n - r, m - r)
if hasX3:
R = BlockMatrix(2, 2, [X0, X1, X2, X3])
elif hasX1:
R = BlockMatrix(1, 2, [X0, X1])
elif hasX2:
R = BlockMatrix(2, 1, [X0, X2])
else:
R = BlockMatrix(1, 1, [X0])
R = R.subs(X0, eye(r))
if hasX3:
R = R.subs(X3, X2 * X1)
return R
@staticmethod
def calculate_general_13_inverse(matrix, P):
m, n = matrix.shape
r = matrix.rank()
hasX1 = m - r > 0
hasX2 = n - r > 0
hasX3 = hasX1 and hasX2
X0 = MatrixSymbol('X0', r, r)
X1 = MatrixSymbol('X1', r, m - r)
X2 = MatrixSymbol('X2', n - r, r)
X3 = MatrixSymbol('X3', n - r, m - r)
# Shapes
# S -> (m, m)
# S1 -> (r, r)
# S2 -> (r, m - r)
# S3 -> (m - r, r)
# S4 -> (m - r, m - r)
S = P * P.transpose()
# X1 = -S2 * S4 ** -1, it is guaranteed that S4 has inverse
S2 = S[:r, r:]
S4 = S[r:, r:]
if hasX3:
R = BlockMatrix(2, 2, [X0, X1, X2, X3])
elif hasX1:
R = BlockMatrix(1, 2, [X0, X1])
elif hasX2:
R = BlockMatrix(2, 1, [X0, X2])
else:
R = BlockMatrix(1, 1, [X0])
R = R.subs(X0, eye(r))
if hasX3 or hasX1:
R = R.subs(X1, -S2 * S4 ** -1)
return R
@staticmethod
def calculate_general_14_inverse(matrix, Q):
m, n = matrix.shape
r = matrix.rank()
hasX1 = m - r > 0
hasX2 = n - r > 0
hasX3 = hasX1 and hasX2
X0 = MatrixSymbol('X0', r, r)
X1 = MatrixSymbol('X1', r, m - r)
X2 = MatrixSymbol('X2', n - r, r)
X3 = MatrixSymbol('X3', n - r, m - r)
# Shapes
# T -> (n, n)
# T1 -> (r, r)
# T2 -> (r, n - r)
# T3 -> (n - r, r)
# T4 -> (n - r, n - r)
T = Q.transpose() * Q
# X2 = -T4 ** -1 * T3, it is guaranteed that T4 has inverse
T3 = T[r:, :r]
T4 = T[r:, r:]
if hasX3:
R = BlockMatrix(2, 2, [X0, X1, X2, X3])
elif hasX1:
R = BlockMatrix(1, 2, [X0, X1])
elif hasX2:
R = BlockMatrix(2, 1, [X0, X2])
else:
R = BlockMatrix(1, 1, [X0])
R = R.subs(X0, eye(r))
if hasX3 or hasX2:
R = R.subs(X2, -T4 ** -1 * T3)
return R
@staticmethod
def calculate_moore_penrose_inverse(matrix, P, Q):
m, n = matrix.shape
r = matrix.rank()
hasX1 = m - r > 0
hasX2 = n - r > 0
hasX3 = hasX1 and hasX2
X0 = MatrixSymbol('X0', r, r)
X1 = MatrixSymbol('X1', r, m - r)
X2 = MatrixSymbol('X2', n - r, r)
X3 = MatrixSymbol('X3', n - r, m - r)
# Shapes
#
# S -> (m, m)
# S1 -> (r, r)
# S2 -> (r, m - r)
# S3 -> (m - r, r)
# S4 -> (m - r, m - r)
#
# T -> (n, n)
# T1 -> (r, r)
# T2 -> (r, n - r)
# T3 -> (n - r, r)
# T4 -> (n - r, n - r)
S = P * P.transpose()
T = Q.transpose() * Q
S2 = S[:r, r:]
S4 = S[r:, r:]
T3 = T[r:, :r]
T4 = T[r:, r:]
if hasX3:
R = BlockMatrix(2, 2, [X0, X1, X2, X3])
elif hasX1:
R = BlockMatrix(1, 2, [X0, X1])
| |
if weight is None or weight.node().mustBeNone():
assert len(input_sizes) > 1
weight_value = torch.tensor([1.] * input_sizes[1]).type(
'torch.' + input.type().scalarType() + 'Tensor')
weight = g.op("Constant", value_t=weight_value)
if bias is None or bias.node().mustBeNone():
assert len(input_sizes) > 1
bias_value = torch.tensor([0.] * input_sizes[1]).type(
'torch.' + input.type().scalarType() + 'Tensor')
bias = g.op("Constant", value_t=bias_value)
return g.op("InstanceNormalization", input, weight, bias, epsilon_f=eps)
@parse_args('v', 'i', 'i', 'i')
def unfold(g, input, dimension, size, step):
return g.op("ATen", input, operator_s="unfold", dimension_i=dimension, size_i=size, step_i=step)
@parse_args('v', 'v', 'i')
def _weight_norm(graph, v, g, dim):
return graph.op("ATen", v, g, dim_i=dim, operator_s="_weight_norm")
@parse_args('v', 't', 't', 't')
def elu(g, input, alpha, scale, input_scale):
if scale and scale != 1.:
return _unimplemented("scale", "does not support scale in Elu")
if input_scale and input_scale != 1.:
return _unimplemented("input_scale", "does not support input_scale in Elu")
# See Note [Export inplace]
return g.op("Elu", input, alpha_f=_scalar(alpha))
def selu(g, input):
return g.op("Selu", input)
@parse_args('v', 'i', 'v')
def index_select(g, self, dim, index):
return g.op("Gather", self, index, axis_i=dim)
def index_put(g, self, indices_list_value, values, accumulate):
indices_list = _unpack_list(indices_list_value)
args = [self] + indices_list + [values, accumulate]
return g.op("ATen", *args, operator_s='index_put')
def type_as(g, self, other):
if self.isCompleteTensor() and other.isCompleteTensor() and self.type().scalarType() == other.type().scalarType():
return self
if other.isCompleteTensor():
other_type_name = other.type().scalarType()
return g.op("Cast", self, to_i=cast_pytorch_to_onnx[other_type_name])
else:
# We don't know the type of other, bail by emitting ATen
return g.op("ATen", self, other, operator_s="type_as")
@parse_args('v', 'is', 'v', 'v', 'f', 'i')
def layer_norm(g, self, normalized_shape, weight, bias, eps, cudnn_enable):
return g.op("ATen", self, weight, bias, normalized_shape_i=normalized_shape,
eps_f=eps, cudnn_enable_i=cudnn_enable, operator_s="layer_norm")
# ignore clone operators that are inserted by PyTorch autograd
def clone(g, input):
return input
def abs(g, self):
return g.op("Abs", self)
def log(g, self):
return g.op("Log", self)
def pow(g, self, exponent):
exponent = _maybe_get_scalar(exponent)
return g.op("Pow", self, _if_scalar_type_as(g, exponent, self))
def clamp(g, self, min, max):
# min or max may be None that we need to dispatch to
# Clip separately, as ONNX does not have None syntax
if min.node().mustBeNone():
return clamp_max(g, self, max)
elif max.node().mustBeNone():
return clamp_min(g, self, min)
else:
min = _parse_arg(min, 'f')
max = _parse_arg(max, 'f')
return g.op("Clip", self, min_f=min, max_f=max)
@parse_args('v', 'f')
def clamp_min(g, self, min):
return g.op("Clip", self, min_f=min)
@parse_args('v', 'f')
def clamp_max(g, self, max):
return g.op("Clip", self, max_f=max)
# torch.max (same for torch.min) actually has two interfaces smashed together:
# torch.max(x, dim, keepdim) and torch.max(x, y)
def max(g, self, dim_or_y=None, keepdim=None):
if dim_or_y is None and keepdim is None:
return g.op("ReduceMax", self, keepdims_i=0)
if keepdim is None:
return g.op("Max", self, dim_or_y)
else:
dim = _get_const(dim_or_y, 'i', 'dim')
keepdim = _get_const(keepdim, 'i', 'keepdim')
# TODO: export it as ReduceMax
return g.op("ATen",
self,
operator_s="max",
dim_i=dim,
keepdim_i=keepdim,
outputs=2)
def min(g, self, dim_or_y=None, keepdim=None):
if dim_or_y is None and keepdim is None:
return g.op("ReduceMin", self, keepdims_i=0)
if keepdim is None:
return g.op("Min", self, dim_or_y)
else:
dim = _get_const(dim_or_y, 'i', 'dim')
keepdim = _get_const(keepdim, 'i', 'keepdim')
# TODO: export it as ReduceMax
return g.op("ATen",
self,
operator_s="min",
dim_i=dim,
keepdim_i=keepdim,
outputs=2)
def exp(g, self):
return g.op("Exp", self)
@parse_args('v', 'f', 'i')
def dropout(g, input, p, train):
if not train: # in eval mode, dropout is non-op
return input
r, _ = g.op("Dropout", input, ratio_f=p, outputs=2)
return r
def _unsupported_dropout(name):
@parse_args('v', 'f', 'i')
def feature_dropout(g, input, p, train):
# NB: In inference mode, FeatureDropout is exported as an identity op.
from torch.onnx.symbolic import _unimplemented
if train:
return _unimplemented(name, "training mode")
return input
return feature_dropout
feature_dropout = _unsupported_dropout("feature_dropout")
alpha_dropout = _unsupported_dropout("alpha_dropout")
feature_alpha_dropout = _unsupported_dropout("feature_alpha_dropout")
# See Note [Export inplace]
dropout_ = dropout
feature_dropout_ = feature_dropout
alpha_dropout_ = alpha_dropout
feature_alpha_dropout_ = feature_alpha_dropout
@parse_args('v', 't', 'i', 'i')
def norm(g, self, p, dim, keepdim):
if p == 1:
f = _reduce_op_symbolic("ReduceL1")
elif p == 2:
f = _reduce_op_symbolic("ReduceL2")
else:
raise RuntimeError("ONNX export only p-norms with p of 1 or 2")
return f(g, self, dim=dim, keepdim=keepdim)
@parse_args('v', 'v', 'v', 'i')
def conv_tbc(g, input, weight, bias, pad):
return g.op("ATen", input, weight, bias, operator_s="conv_tbc", pad_i=pad)
@parse_args('v', 'i', 'i')
def _unique(g, input, sorted, return_inverse):
return g.op("ATen", input, operator_s="_unique", sorted_i=sorted,
return_inverse_i=return_inverse, outputs=2)
@parse_args('v', 'i', 'i', 'i')
def _unique2(g, input, sorted, return_inverse, return_counts):
return g.op("ATen", input, operator_s="_unique2", sorted_i=sorted,
return_inverse_i=return_inverse, return_counts_i=return_counts,
outputs=3)
# Metaprogram symbolics for each ATen native specialized cast operator.
# For e.g. we specify a function named `_cast_uint8_t` that instantiates an
# ONNX cast node with `to` attribute 'UINT8'
#
# TODO: remove these once we support Type's in the JIT IR and we can once again
# use the unified toType operator
cast_pytorch_to_onnx = {
'Byte': torch.onnx.TensorProtoDataType.UINT8,
'Char': torch.onnx.TensorProtoDataType.INT8,
'Double': torch.onnx.TensorProtoDataType.DOUBLE,
'Float': torch.onnx.TensorProtoDataType.FLOAT,
'Half': torch.onnx.TensorProtoDataType.FLOAT16,
'Int': torch.onnx.TensorProtoDataType.INT32,
'Long': torch.onnx.TensorProtoDataType.INT64,
'Short': torch.onnx.TensorProtoDataType.INT16,
}
scalar_name_to_pytorch = {
'uint8_t': 'Byte',
'int8_t': 'Char',
'double': 'Double',
'float': 'Float',
'half': 'Half',
'int': 'Int',
'int64_t': 'Long',
'int16_t': 'Short',
}
# This indicates each scalar type's corresponding
# torch type. Related source:
# https://github.com/pytorch/pytorch/blob/da7468853ae322252270bbb58032668bd21b7457/c10/core/ScalarType.h
scalar_type_to_pytorch_type = [
torch.uint8, # 0
torch.int8, # 1
torch.short, # 2
torch.int, # 3
torch.int64, # 4
torch.half, # 5
torch.float, # 6
torch.double, # 7
]
def _cast_func_template(to_i, g, input, non_blocking):
return g.op("Cast", input, to_i=to_i)
for k, v in cast_pytorch_to_onnx.items():
name = '_cast_{}'.format(k)
globals()[name] = parse_args('v', 'i')(partial(_cast_func_template, v))
scalar_type_to_onnx = [
cast_pytorch_to_onnx["Byte"],
cast_pytorch_to_onnx["Char"],
cast_pytorch_to_onnx["Short"],
cast_pytorch_to_onnx["Int"],
cast_pytorch_to_onnx["Long"],
cast_pytorch_to_onnx["Half"],
cast_pytorch_to_onnx["Float"],
cast_pytorch_to_onnx["Double"],
]
@parse_args('v', 'i', 'v', 'v', 'b')
def zeros(g, sizes, dtype, layout, device, pin_memory=False):
if pin_memory:
raise RuntimeError("onnx pin_memory support is not implemented")
# NOTE: no way to set device and layout in ONNX, so we ignore it
return g.op("ConstantOfShape", sizes,
value_t=torch.tensor([0], dtype=scalar_type_to_pytorch_type[dtype]))
@parse_args('v', 'i', 'v', 'v', 'b')
def zeros_like(g, input, dtype, layout, device, pin_memory=False):
if pin_memory:
raise RuntimeError("onnx pin_memory support is not implemented")
shape = g.op("Shape", input)
return g.op("ConstantOfShape", shape,
value_t=torch.tensor([0], dtype=scalar_type_to_pytorch_type[dtype]))
@parse_args('v', 'i', 'v', 'v', 'b')
def ones(g, sizes, dtype, layout, device, pin_memory=False):
if pin_memory:
raise RuntimeError("onnx pin_memory support is not implemented")
return g.op("ConstantOfShape", sizes,
value_t=torch.tensor([1], dtype=scalar_type_to_pytorch_type[dtype]))
@parse_args('v', 'i', 'v', 'v', 'b')
def ones_like(g, input, dtype, layout, device, pin_memory=False):
if pin_memory:
raise RuntimeError("onnx pin_memory support is not implemented")
shape = g.op("Shape", input)
return g.op("ConstantOfShape", shape,
value_t=torch.tensor([1], dtype=scalar_type_to_pytorch_type[dtype]))
def full(g, sizes, value, dtype, layout, device, pin_memory=False):
if pin_memory and _parse_arg(pin_memory, 'b'):
raise RuntimeError("onnx pin_memory support is not implemented")
const_value = _maybe_get_const(value, 't')
if _is_value(const_value):
tmp = zeros(sizes, dtype, layout, device)
return add(tmp, value, g.op("Constant", value_t=torch.tensor(1)))
else:
dtype = _get_const(dtype, 'i', 'dtype')
return g.op("ConstantOfShape", sizes,
value_t=torch.tensor([const_value], dtype=scalar_type_to_pytorch_type[dtype]))
@parse_args('v', 'f', 'i', 'v', 'v', 'b')
def full_like(g, input, fill_value, dtype, layout, device, pin_memory=False):
if pin_memory:
raise RuntimeError("onnx pin_memory support is not implemented")
shape = g.op("Shape", input)
return g.op("ConstantOfShape", shape,
value_t=torch.tensor([fill_value], dtype=scalar_type_to_pytorch_type[dtype]))
@parse_args('v', 'v', 'v', 'v', 'i')
def slice(g, self, dim, start, end, step):
if step != 1:
_unimplemented("slice", "step!=1 is currently not supported")
if start.node().kind() != 'onnx::Constant' or \
end.node().kind() != 'onnx::Constant' or dim.node().kind() != 'onnx::Constant':
start_unsqueezed = g.op("Unsqueeze", start, axes_i=[0])
end_unsqueezed = g.op("Unsqueeze", end, axes_i=[0])
dim_unsqueezed = g.op("Unsqueeze", dim, axes_i=[0])
return g.op("DynamicSlice", self, start_unsqueezed, end_unsqueezed, dim_unsqueezed)
else:
start = _parse_arg(start, 'i')
end = _parse_arg(end, 'i')
dim = _parse_arg(dim, 'i')
return _slice_op(g, self, axes=[dim], starts=[start], ends=[end])
@parse_args('v', 'f', 'f')
def hardtanh(g, self, min_val, max_val):
return g.op("Clip", self, min_f=min_val, max_f=max_val)
def alias(g, self):
return self
@parse_args('v', 'i')
def unsqueeze(g, self, dim):
# Handle negative dim
if dim < 0:
if self.type().kind() == "CompleteTensorType" or self.type().kind() == "DimensionedTensorType":
warnings.warn("ONNX export unsqueeze with negative axis " + str(dim) +
" might cause the onnx model to be incorrect. " +
"Negative axis is not supported in ONNX. " +
"Axis is converted to " + str(dim + self.type().dim() + 1) +
" based on input shape at export time. " +
"Passing an tensor of different rank in execution will be incorrect.")
dim = dim + self.type().dim() + 1
else:
return _unimplemented('unsqueeze', 'negative axis with unknown input rank')
return g.op("Unsqueeze", self, axes_i=[dim])
@parse_args('v', 'i', 'i', 'i', 'i')
def topk(g, self, k, dim, largest, sorted, out=None):
if out is not None:
_unimplemented("TopK", "Out parameter is not supported for topk")
if not largest:
_unimplemented("TopK", "Ascending TopK is not supported")
return g.op("TopK", self, k_i=k, axis_i=dim, outputs=2)
def to(g, self, *args):
# ONNX doesn't have a concept of a device, so we ignore device casts
if len(args) == 3:
if args[0].type().isSubtypeOf(ListType.ofInts()):
# aten::to(Tensor, Device, bool, bool)
return self
else:
# aten::to(Tensor, ScalarType, bool, bool)
dtype = _get_const(args[0], 'i', 'dtype')
return g.op("Cast", self, to_i=scalar_type_to_onnx[dtype])
elif len(args) == 4:
# aten::to(Tensor, Device, ScalarType, bool, bool)
dtype = _get_const(args[1], 'i', 'dtype')
return g.op("Cast", self, to_i=scalar_type_to_onnx[dtype])
elif len(args) == 5:
# aten::to(Tensor, ScalarType, Layout, Device, bool, bool) -> Tensor
dtype = _get_const(args[0], 'i', 'dtype')
# Layout and device are ignored
return g.op("Cast", self, to_i=scalar_type_to_onnx[dtype])
elif len(args) == 6:
| |
identifier for the identity.
- name: --microsoft-graph-identity-application
short-summary: "identity"
long-summary: |
Usage: --microsoft-graph-identity-application display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --microsoft-graph-identity-device
short-summary: "identity"
long-summary: |
Usage: --microsoft-graph-identity-device display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --microsoft-graph-identity-user
short-summary: "identity"
long-summary: |
Usage: --microsoft-graph-identity-user display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
"""
helps['people user-profile update-interest'] = """
type: command
short-summary: "Update the navigation property interests in users."
parameters:
- name: --inference
short-summary: "inferenceData"
long-summary: |
Usage: --inference confidence-score=XX user-has-verified-accuracy=XX
- name: --source
short-summary: "personDataSources"
long-summary: |
Usage: --source type=XX
- name: --application
short-summary: "identity"
long-summary: |
Usage: --application display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --device
short-summary: "identity"
long-summary: |
Usage: --device display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --user
short-summary: "identity"
long-summary: |
Usage: --user display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --microsoft-graph-identity-application
short-summary: "identity"
long-summary: |
Usage: --microsoft-graph-identity-application display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --microsoft-graph-identity-device
short-summary: "identity"
long-summary: |
Usage: --microsoft-graph-identity-device display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --microsoft-graph-identity-user
short-summary: "identity"
long-summary: |
Usage: --microsoft-graph-identity-user display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
"""
helps['people user-profile update-language'] = """
type: command
short-summary: "Update the navigation property languages in users."
parameters:
- name: --inference
short-summary: "inferenceData"
long-summary: |
Usage: --inference confidence-score=XX user-has-verified-accuracy=XX
- name: --source
short-summary: "personDataSources"
long-summary: |
Usage: --source type=XX
- name: --application
short-summary: "identity"
long-summary: |
Usage: --application display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --device
short-summary: "identity"
long-summary: |
Usage: --device display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --user
short-summary: "identity"
long-summary: |
Usage: --user display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --microsoft-graph-identity-application
short-summary: "identity"
long-summary: |
Usage: --microsoft-graph-identity-application display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --microsoft-graph-identity-device
short-summary: "identity"
long-summary: |
Usage: --microsoft-graph-identity-device display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --microsoft-graph-identity-user
short-summary: "identity"
long-summary: |
Usage: --microsoft-graph-identity-user display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
"""
helps['people user-profile update-name'] = """
type: command
short-summary: "Update the navigation property names in users."
parameters:
- name: --inference
short-summary: "inferenceData"
long-summary: |
Usage: --inference confidence-score=XX user-has-verified-accuracy=XX
- name: --source
short-summary: "personDataSources"
long-summary: |
Usage: --source type=XX
- name: --application
short-summary: "identity"
long-summary: |
Usage: --application display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --device
short-summary: "identity"
long-summary: |
Usage: --device display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, | |
<gh_stars>0
#### Created as part of the Metro21 Fire Risk Analysis project
#### In partnership with the City of Pittsburgh's Department of Innovation and Performance, and the Pittsburgh Bureau of Fire
# Authors:
# <NAME>
# <NAME>
# <NAME>
# <NAME>
# <NAME>
# <NAME>
# <NAME>
#importing relevant libraries
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import pandas as pd
import numpy as np
import sqlalchemy as sa
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
import pandas as pd
#from sklearn import datasets, linear_model, cross_validation, grid_search
from sklearn import datasets, linear_model
from sklearn.model_selection import cross_validate
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
#from sklearn.grid_search import GridSearchCV
from sklearn.model_selection import GridSearchCV
from sklearn_pandas import DataFrameMapper
from sklearn.preprocessing import OneHotEncoder
from sklearn import metrics
from sklearn.metrics import confusion_matrix
from xgboost import XGBClassifier
import xgboost as xgb
from sklearn.ensemble import ExtraTreesClassifier
import datetime
from dateutil.relativedelta import relativedelta
import os
from functools import reduce
# Turn off pandas chained assignment warning
pd.options.mode.chained_assignment = None # default='warn'
# =============================#1: CLEAN PLI & PITT DATA========================
# create directory paths for opening files
#root = os.path.dirname(os.path.realpath(__file__))
#root = "/home/linadmin/FirePred/"
root = ""
dataset_path = "{0}datasets/".format(root)
log_path = "{0}log/".format(root)
png_path = "{0}images/".format(root)
# Reading plidata
plidata = pd.read_csv(os.path.join(dataset_path, "pli.csv"),encoding = 'utf-8',dtype={'STREET_NUM':'str','STREET_NAME':'str'}, low_memory=False)
#Reading city of Pittsburgh dataset
#pittdata = pd.read_csv(os.path.join(dataset_path, "pittdata.csv"),dtype={'PROPERTYADDRESS':'str','PROPERTYHOUSENUM':'str','CLASSDESC':'str'}, low_memory=False)
pittdata = pd.read_csv(os.path.join(dataset_path, "pittdata.csv"),encoding = 'cp1252',dtype={'PROPERTYADDRESS':'str','PROPERTYHOUSENUM':'str','CLASSDESC':'str'}, low_memory=False)
#removing all properties outside Pittsburgh, Wilkinsburg, and Ingram
pittdata = pittdata[(pittdata.PROPERTYCITY == 'PITTSBURGH')]
pittdata = pittdata[pittdata['MUNIDESC'].str.contains("Ward|Ingram|Wilkinsburg")]
#removing extra whitespaces
plidata['STREET_NAME'] = plidata['STREET_NAME'].str.strip()
plidata['STREET_NUM'] = plidata['STREET_NUM'].str.strip()
#removing residential data
pittdata = pittdata[pittdata.CLASSDESC!='RESIDENTIAL']
pittdata = pittdata[pittdata.PROPERTYHOUSENUM!= '0']
pittdata = pittdata[pittdata.PROPERTYADDRESS!= '']
#dropping columns with less than 15% data
pittdata = pittdata.dropna(thresh=4000, axis=1)
pittdata = pittdata.rename(columns={pittdata.columns[0]:'PARID'})
pittdata = pittdata.drop_duplicates()
#merging pli with city of pitt
#plipca = pd.merge(pittdata, plidata[['PARCEL','INSPECTION_DATE','INSPECTION_RESULT','VIOLATION']], how = 'left', left_on =['PARID'], right_on = ['PARCEL'] )
#don't drop x and y
plipca = pd.merge(pittdata, plidata[['PARCEL','INSPECTION_DATE','INSPECTION_RESULT','VIOLATION','X',"Y","STREET_NAME"]],
how = 'left', left_on =['PARID'], right_on = ['PARCEL'] )
plipca = plipca.drop_duplicates()
#dropping nas
newpli = plipca.dropna(subset =['PARCEL','INSPECTION_DATE','INSPECTION_RESULT','VIOLATION'] )
newpli = newpli.reset_index()
newpli = newpli.drop(['index','PARID','index',
u'PROPERTYCITY', u'PROPERTYSTATE', u'PROPERTYUNIT', u'PROPERTYZIP',
u'MUNICODE', u'MUNIDESC', u'SCHOOLCODE', u'SCHOOLDESC', u'LEGAL1',
u'LEGAL2', u'LEGAL3', u'NEIGHCODE',
u'TAXCODE', u'TAXDESC',
u'OWNERCODE', u'OWNERDESC', u'CLASS',
u'CLASSDESC', u'USECODE', u'USEDESC', u'LOTAREA', u'SALEDATE',
u'SALEPRICE', u'SALECODE', u'SALEDESC', u'DEEDBOOK', u'DEEDPAGE',
u'CHANGENOTICEADDRESS1', u'CHANGENOTICEADDRESS2',
u'CHANGENOTICEADDRESS3', u'CHANGENOTICEADDRESS4', u'COUNTYBUILDING',
u'COUNTYLAND', u'COUNTYTOTAL', u'COUNTYEXEMPTBLDG', u'LOCALBUILDING',
u'LOCALLAND', u'LOCALTOTAL', u'FAIRMARKETBUILDING', u'FAIRMARKETLAND',
u'FAIRMARKETTOTAL', u'PARCEL'], axis=1)
newpli = newpli.drop_duplicates()
#converting to datetime
newpli.INSPECTION_DATE = pd.to_datetime(newpli.INSPECTION_DATE)
newpli['violation_year'] = newpli['INSPECTION_DATE'].map(lambda x: x.year)
plipca.SALEPRICE = plipca.SALEPRICE.fillna(0)
#Groups by address and replaces LOTAREA','SALEPRICE','FAIRMARKETLAND','FAIRMARKETBUILDING' by mean
numerical = plipca.groupby( [ "PROPERTYHOUSENUM", "PROPERTYADDRESS"] , as_index=False)[['LOTAREA','SALEPRICE',
'FAIRMARKETLAND',
'FAIRMARKETBUILDING']].mean()
# Following blocks of code group by address and get the category with maximum count for each given categorical columns
temp = pd.DataFrame({'count' : plipca.groupby( [ "PROPERTYHOUSENUM", "PROPERTYADDRESS"] ).CLASSDESC.value_counts()}).reset_index()
idx = temp.groupby([ "PROPERTYHOUSENUM", "PROPERTYADDRESS"])['count'].transform(max) == temp['count']
result1 = temp[idx]
result1 = result1.drop_duplicates(subset=[ "PROPERTYHOUSENUM", "PROPERTYADDRESS"], keep = 'last')
del result1['count']
temp = pd.DataFrame({'count' : plipca.groupby( [ "PROPERTYHOUSENUM", "PROPERTYADDRESS"] ).CLASSDESC.value_counts()}).reset_index()
temp.groupby([ "PROPERTYHOUSENUM", "PROPERTYADDRESS"])['count'].transform(max)
idx = temp.groupby([ "PROPERTYHOUSENUM", "PROPERTYADDRESS"])['count'].transform(max) == temp['count']
result1 = temp[idx]
result1 = result1.drop_duplicates(subset=[ "PROPERTYHOUSENUM", "PROPERTYADDRESS"], keep = 'last')
del result1['count']
temp = pd.DataFrame({'count' : plipca.groupby( [ "PROPERTYHOUSENUM", "PROPERTYADDRESS"] ).SCHOOLDESC.value_counts()}).reset_index()
idx = temp.groupby([ "PROPERTYHOUSENUM", "PROPERTYADDRESS"])['count'].transform(max) == temp['count']
result2 = temp[idx]
result2 = result2.drop_duplicates(subset=[ "PROPERTYHOUSENUM", "PROPERTYADDRESS"], keep = 'last')
del result2['count']
temp = pd.DataFrame({'count' : plipca.groupby( [ "PROPERTYHOUSENUM", "PROPERTYADDRESS"] ).OWNERDESC.value_counts()}).reset_index()
idx = temp.groupby([ "PROPERTYHOUSENUM", "PROPERTYADDRESS"])['count'].transform(max) == temp['count']
result3 = temp[idx]
result3 = result3.drop_duplicates(subset=[ "PROPERTYHOUSENUM", "PROPERTYADDRESS"], keep = 'last')
del result3['count']
temp = pd.DataFrame({'count' : plipca.groupby( [ "PROPERTYHOUSENUM", "PROPERTYADDRESS"] ).MUNIDESC.value_counts()}).reset_index()
idx = temp.groupby([ "PROPERTYHOUSENUM", "PROPERTYADDRESS"])['count'].transform(max) == temp['count']
result4 = temp[idx]
result4 = result4.drop_duplicates(subset=[ "PROPERTYHOUSENUM", "PROPERTYADDRESS"], keep = 'last')
del result4['count']
temp = pd.DataFrame({'count' : plipca.groupby( [ "PROPERTYHOUSENUM", "PROPERTYADDRESS"] ).INSPECTION_RESULT.value_counts()}).reset_index()
idx = temp.groupby([ "PROPERTYHOUSENUM", "PROPERTYADDRESS"])['count'].transform(max) == temp['count']
result5 = temp[idx]
result5 = result5.drop_duplicates(subset=[ "PROPERTYHOUSENUM", "PROPERTYADDRESS"], keep = 'last')
del result5['count']
temp = pd.DataFrame({'count' : plipca.groupby( [ "PROPERTYHOUSENUM", "PROPERTYADDRESS"] ).NEIGHCODE.value_counts()}).reset_index()
idx = temp.groupby([ "PROPERTYHOUSENUM", "PROPERTYADDRESS"])['count'].transform(max) == temp['count']
result6 = temp[idx]
result6 = result6.drop_duplicates(subset=[ "PROPERTYHOUSENUM", "PROPERTYADDRESS"], keep = 'last')
del result6['count']
temp = pd.DataFrame({'count' : plipca.groupby( [ "PROPERTYHOUSENUM", "PROPERTYADDRESS"] ).TAXDESC.value_counts()}).reset_index()
idx = temp.groupby([ "PROPERTYHOUSENUM", "PROPERTYADDRESS"])['count'].transform(max) == temp['count']
result7 = temp[idx]
result7 = result7.drop_duplicates(subset=[ "PROPERTYHOUSENUM", "PROPERTYADDRESS"], keep = 'last')
del result7['count']
temp = pd.DataFrame({'count' : plipca.groupby( [ "PROPERTYHOUSENUM", "PROPERTYADDRESS"] ).USEDESC.value_counts()}).reset_index()
idx = temp.groupby([ "PROPERTYHOUSENUM", "PROPERTYADDRESS"])['count'].transform(max) == temp['count']
result8 = temp[idx]
result8 = result8.drop_duplicates(subset=[ "PROPERTYHOUSENUM", "PROPERTYADDRESS"], keep = 'last')
del result8['count']
dfs = [result1,result2,result3,result4,result6,result7,result8,numerical]
pcafinal = reduce(lambda left,right: pd.merge(left,right,on= [ "PROPERTYHOUSENUM", "PROPERTYADDRESS"] ), dfs)
plipca1 = pd.merge(pcafinal, newpli, how = 'left', left_on =[ "PROPERTYHOUSENUM", "PROPERTYADDRESS"], right_on = [ "PROPERTYHOUSENUM", "PROPERTYADDRESS"] )
plipca1.to_csv("datasets/citydata_totableauprep.csv")
# ============#1 DONE, ^this is the cleaned dataframe of pli + pitt ============
del newpli, numerical, idx, plipca, pittdata, temp
del result1, result2, result3, result4, result5, result6, result7, result8, dfs
# =====================#2 CLEAN FIRE INCIDENT DATA====================
#loading fire incidents csvs , so fire date is missing because it is private
fire_pre14 = pd.read_csv(os.path.join(dataset_path, "Fire_Incidents_Pre14.csv"),encoding = 'latin-1',dtype={'street':'str','number':'str','incident_type':'str'}, low_memory=False)
# fire_new = pd.read_csv(os.path.join(dataset_path, "Fire_Incidents_New.csv"),encoding = 'utf-8',dtype={'street':'str','number':'str'}, low_memory=False)
fire_new = fire_pre14
# Cleaning columns of fire_new
# Situation found coded is missing so we have to use Incident Type
# Normally use the Situation found or full code
#fire_pre14['incident_type'] = fire_pre14['incident_type'].str.replace(' -',' -')
#fire_pre14['st_type'] = fire_pre14['st_type'].str.strip()
#fire_pre14['street'] = fire_pre14['street'].str.strip()
#fire_pre14['number'] = fire_pre14['number'].str.strip()
#fire_pre14['st_type'] = fire_pre14['st_type'].str.replace('AV','AVE')
#fire_pre14['street'] = fire_pre14['street'].str.strip() +' ' +fire_pre14['st_type'].str.strip()
#= ("string")
# drop irrelevant columns
#pre14_drop = ['PRIMARY_UNIT','MAP_PAGE','alm_dttm','arv_dttm','XCOORD','YCOORD',
# 'inci_id','inci_type','alarms','st_prefix','st_suffix','st_type','CALL_NO']
#for col in pre14_drop:
# del fire_pre14[col]
#pre14_drop = [0, 4]
#fire_pre14.drop(fire_pre14.columns[pre14_drop], axis=1, inplace=True)
#post14_drop = ['alm_dttm','arv_dttm','XCOORD','YCOORD','alarms','inci_type','CALL_NO']
#post14_drop = ["alarms","call_no"]
#post14_drop = ['latitude','longtidue','alarms','inci_type','CALL_NO']
#for col in post14_drop: #********
# del fire_new[col]
#joining both the fire incidents file together
# fire_new = fire_new.append(fire_pre14, ignore_index=True)
# removing events that are not fire related
fire_new['type_description'] = fire_new['type_description'].str.strip()
remove_descript = ['System malfunction, Other',
# 'Smoke detector activation, no fire - unintentional']
# 'Alarm system activation, no fire - unintentional']
'Detector activation, no fire - unintentional', 'Smoke detector activation due to malfunction',
'Dispatched & cancelled en route', 'Dispatched & cancelled on arrival',
'EMS call, excluding vehicle accident with injury', 'Medical assist, assist EMS crew',
'Emergency medical service, other', 'Good intent call, Other', 'Rescue, EMS incident, other',
'Medical Alarm Activation (No Medical Service Req)', 'Motor Vehicle Accident with no injuries',
'No Incident found on arrival at dispatch address', 'Unintentional transmission of alarm, Other',
'Motor vehicle accident with injuries', 'Vehicle accident, general cleanup', 'Power line down',
'Person in distress, Other', 'Cable/Telco Wires Down', 'Service Call, other',
'Vehicle Accident canceled en route', 'Lock-out', 'False alarm or false call, Other',
'Assist police or other governmental agency', 'Special type of incident, Other',
'Alarm system sounded due to malfunction', 'Motor vehicle/pedestrian accident (MV Ped)',
'Assist invalid ', 'Malicious, mischievous false call, Other', 'Accident, potential accident, Other',
'Assist invalid', 'EMS call, party transported by non-fire agency', 'Rescue or EMS standby',
'Public service assistance, Other', 'Police matter', 'Lock-in (if lock out , use 511 )',
'Sprinkler activation, no fire - unintentional', 'Wrong location',
'Local alarm system, malicious false alarm', 'Authorized controlled burning',
'Water problem, Other',
# 'Smoke or odor removal']
'Passenger vehicle fire', 'CO detector activation due to malfunction',
'Authorized controlled burning', 'Steam, vapor, fog or dust thought to be smoke', 'Overheated motor',
'Local alarm system, malicious false alarm', 'Central station, malicious false alarm',
'Public service',
# 'Building or structure weakened or collapsed'
'Heat detector activation due to malfunction', 'Citizen complaint',
'Municipal alarm system, malicious false alarm', 'Sprinkler activation due to malfunction',
'Severe weather or natural disaster, Other', 'Water evacuation', 'Breakdown of light ballast',
'Extrication of victim(s) from vehicle', 'Flood assessment', 'Telephone, malicious false alarm',
'Cover assignment, standby, moveup', 'Road freight or transport vehicle fire']
# Currently just removes 'Passenger vehicle fire', 'Road freight or transport vehicle fire',
for descript in remove_descript:
fire_new = fire_new[fire_new.type_description != descript]
#fire_new = fire_new[fire_new['incident_type'].str.strip() != '540 - Animal problem, Other']
#fire_new = fire_new[fire_new['incident_type'].str.strip() != '5532 - Public Education (Station Visit)']
#fire_new = fire_new[fire_new['incident_type'].str.strip() != '353 - Removal of victim(s) from stalled elevator']
fire_new = fire_new[fire_new['incident_type'].str.strip() != '540']
fire_new = fire_new[fire_new['incident_type'].str.strip() != '5532']
fire_new = fire_new[fire_new['incident_type'].str.strip() != '353']
#Fix missing codes numbers
#Note sure how to write this as a map or a pipe function like tidyverse
def casewhen(row):
if row['type_description'] == "Brush or brush-and-grass mixture fire": return "142"
elif row['type_description'] == "Building fire": return "111"
elif row['type_description'] == "Cooking fire, confined to container": return "113"
elif row['type_description'] == "Camper or recreational vehicle (RV) fire": return "137"
elif row['type_description'] == "Outside rubbish fire, Other": return "150"
elif row['type_description'] == "Outside rubbish, trash or waste fire": return "151"
elif row['type_description'] == "Outside storage fire": return "131"
elif row['type_description'] == "Trash or rubbish fire, contained": return "118"
else: return row['incident_type']
replace = fire_new.apply(casewhen,axis=1)
fire_new['incident_type']=np.where(fire_new['incident_type'].isnull(),replace,fire_new['incident_type'])
#correcting problems with the street column
fire_new['address'] = fire_new['address'].replace(to_replace=', PGH', value='', regex=True)
fire_new['address'] = fire_new['address'].replace(to_replace=', P', value='', regex=True)
fire_new['address'] = fire_new['address'].replace(to_replace=',', value='', regex=True)
fire_new['address'] = fire_new['address'].replace(to_replace='#.*', value='', regex=True)
fire_new['address'] = fire_new['address'].str.strip()
# fire_new['number'] = fire_new['number'].str.strip()
#converting to date time and extracting year
fireDate, fireTime = fire_new['alarm_time'].astype(str).str.split('T', 1).str
fire_new['alarm_date']= fireDate
fire_new['alarm_date'] = pd.to_datetime(fire_new['alarm_date'])
fire_new['fire_year'] = fire_new['alarm_date'].map(lambda x: x.year)
#removing all codes with less than 20 occurences
for | |
QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_sell_volume_row_4_4.setGeometry(QtCore.QRect(210, 60, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_sell_volume_row_4_4.setFont(font)
self.gb_pb_sell_volume_row_4_4.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(1, 208, 25); } \n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
""))
self.gb_pb_sell_volume_row_4_4.setText(_fromUtf8(""))
self.gb_pb_sell_volume_row_4_4.setFlat(True)
self.gb_pb_sell_volume_row_4_4.setObjectName(_fromUtf8("gb_pb_sell_volume_row_4_4"))
self.gb_pb_sell_volume_row_6 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_sell_volume_row_6.setGeometry(QtCore.QRect(210, 80, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_sell_volume_row_6.setFont(font)
self.gb_pb_sell_volume_row_6.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(1, 208, 25); } \n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
""))
self.gb_pb_sell_volume_row_6.setText(_fromUtf8(""))
self.gb_pb_sell_volume_row_6.setFlat(True)
self.gb_pb_sell_volume_row_6.setObjectName(_fromUtf8("gb_pb_sell_volume_row_6"))
self.gb_pb_sell_volume_row_2_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_sell_volume_row_2_4.setGeometry(QtCore.QRect(210, 100, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_sell_volume_row_2_4.setFont(font)
self.gb_pb_sell_volume_row_2_4.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(1, 208, 25); } \n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
""))
self.gb_pb_sell_volume_row_2_4.setText(_fromUtf8(""))
self.gb_pb_sell_volume_row_2_4.setFlat(True)
self.gb_pb_sell_volume_row_2_4.setObjectName(_fromUtf8("gb_pb_sell_volume_row_2_4"))
self.gb_pb_sell_volume_row_1_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_sell_volume_row_1_4.setGeometry(QtCore.QRect(210, 120, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_sell_volume_row_1_4.setFont(font)
self.gb_pb_sell_volume_row_1_4.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(1, 208, 25); } \n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
""))
self.gb_pb_sell_volume_row_1_4.setText(_fromUtf8(""))
self.gb_pb_sell_volume_row_1_4.setFlat(True)
self.gb_pb_sell_volume_row_1_4.setObjectName(_fromUtf8("gb_pb_sell_volume_row_1_4"))
self.gb_pb_sell_volume_5_1_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_sell_volume_5_1_4.setGeometry(QtCore.QRect(270, 40, 41, 23))
self.gb_pb_sell_volume_5_1_4.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_5_1_4.setObjectName(_fromUtf8("gb_pb_sell_volume_5_1_4"))
self.gb_pb_sell_volume_5_2_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_sell_volume_5_2_4.setGeometry(QtCore.QRect(310, 40, 41, 23))
self.gb_pb_sell_volume_5_2_4.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_5_2_4.setObjectName(_fromUtf8("gb_pb_sell_volume_5_2_4"))
self.gb_pb_sell_volume_5_3_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_sell_volume_5_3_4.setGeometry(QtCore.QRect(350, 40, 41, 23))
self.gb_pb_sell_volume_5_3_4.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_5_3_4.setObjectName(_fromUtf8("gb_pb_sell_volume_5_3_4"))
self.gb_pb_sell_volume_4_3_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_sell_volume_4_3_4.setGeometry(QtCore.QRect(350, 60, 41, 23))
self.gb_pb_sell_volume_4_3_4.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_4_3_4.setText(_fromUtf8(""))
self.gb_pb_sell_volume_4_3_4.setObjectName(_fromUtf8("gb_pb_sell_volume_4_3_4"))
self.gb_pb_sell_volume_4_2_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_sell_volume_4_2_4.setGeometry(QtCore.QRect(310, 60, 41, 23))
self.gb_pb_sell_volume_4_2_4.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_4_2_4.setText(_fromUtf8(""))
self.gb_pb_sell_volume_4_2_4.setObjectName(_fromUtf8("gb_pb_sell_volume_4_2_4"))
self.gb_pb_sell_volume_4_1_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_sell_volume_4_1_4.setGeometry(QtCore.QRect(270, 60, 41, 23))
self.gb_pb_sell_volume_4_1_4.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_4_1_4.setText(_fromUtf8(""))
self.gb_pb_sell_volume_4_1_4.setObjectName(_fromUtf8("gb_pb_sell_volume_4_1_4"))
self.gb_pb_sell_volume_3_1_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_sell_volume_3_1_4.setGeometry(QtCore.QRect(270, 80, 41, 23))
self.gb_pb_sell_volume_3_1_4.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_3_1_4.setText(_fromUtf8(""))
self.gb_pb_sell_volume_3_1_4.setObjectName(_fromUtf8("gb_pb_sell_volume_3_1_4"))
self.gb_pb_sell_volume_3_2_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_sell_volume_3_2_4.setGeometry(QtCore.QRect(310, 80, 41, 23))
self.gb_pb_sell_volume_3_2_4.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_3_2_4.setText(_fromUtf8(""))
self.gb_pb_sell_volume_3_2_4.setObjectName(_fromUtf8("gb_pb_sell_volume_3_2_4"))
self.gb_pb_sell_volume_3_3_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_sell_volume_3_3_4.setGeometry(QtCore.QRect(350, 80, 41, 23))
self.gb_pb_sell_volume_3_3_4.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_3_3_4.setText(_fromUtf8(""))
self.gb_pb_sell_volume_3_3_4.setObjectName(_fromUtf8("gb_pb_sell_volume_3_3_4"))
self.gb_pb_sell_volume_2_3_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_sell_volume_2_3_4.setGeometry(QtCore.QRect(350, 100, 41, 23))
self.gb_pb_sell_volume_2_3_4.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_2_3_4.setText(_fromUtf8(""))
self.gb_pb_sell_volume_2_3_4.setObjectName(_fromUtf8("gb_pb_sell_volume_2_3_4"))
self.gb_pb_sell_volume_2_2_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_sell_volume_2_2_4.setGeometry(QtCore.QRect(310, 100, 41, 23))
self.gb_pb_sell_volume_2_2_4.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_2_2_4.setText(_fromUtf8(""))
self.gb_pb_sell_volume_2_2_4.setObjectName(_fromUtf8("gb_pb_sell_volume_2_2_4"))
self.gb_pb_sell_volume_2_1_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_sell_volume_2_1_4.setGeometry(QtCore.QRect(270, 100, 41, 23))
self.gb_pb_sell_volume_2_1_4.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_2_1_4.setText(_fromUtf8(""))
self.gb_pb_sell_volume_2_1_4.setObjectName(_fromUtf8("gb_pb_sell_volume_2_1_4"))
self.gb_pb_sell_volume_1_1_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_sell_volume_1_1_4.setGeometry(QtCore.QRect(270, 120, 41, 23))
self.gb_pb_sell_volume_1_1_4.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_1_1_4.setText(_fromUtf8(""))
self.gb_pb_sell_volume_1_1_4.setObjectName(_fromUtf8("gb_pb_sell_volume_1_1_4"))
self.gb_pb_sell_volume_1_3_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_sell_volume_1_3_4.setGeometry(QtCore.QRect(350, 120, 41, 23))
self.gb_pb_sell_volume_1_3_4.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_1_3_4.setText(_fromUtf8(""))
self.gb_pb_sell_volume_1_3_4.setObjectName(_fromUtf8("gb_pb_sell_volume_1_3_4"))
self.gb_pb_sell_volume_1_2_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_sell_volume_1_2_4.setGeometry(QtCore.QRect(310, 120, 41, 23))
self.gb_pb_sell_volume_1_2_4.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF; background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(1, 150, 25); } QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_sell_volume_1_2_4.setText(_fromUtf8(""))
self.gb_pb_sell_volume_1_2_4.setObjectName(_fromUtf8("gb_pb_sell_volume_1_2_4"))
self.label_4 = QtGui.QLabel(self.gb_ETFOrder_4)
self.label_4.setGeometry(QtCore.QRect(10, 140, 381, 20))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gb_pb_buy_volume_2_2_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_buy_volume_2_2_4.setGeometry(QtCore.QRect(310, 180, 41, 23))
self.gb_pb_buy_volume_2_2_4.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_2_2_4.setText(_fromUtf8(""))
self.gb_pb_buy_volume_2_2_4.setObjectName(_fromUtf8("gb_pb_buy_volume_2_2_4"))
self.gb_pb_buy_volume_row_minus_2_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_buy_volume_row_minus_2_4.setGeometry(QtCore.QRect(180, 180, 31, 23))
self.gb_pb_buy_volume_row_minus_2_4.setObjectName(_fromUtf8("gb_pb_buy_volume_row_minus_2_4"))
self.gb_pb_b5_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_b5_4.setGeometry(QtCore.QRect(10, 240, 51, 20))
self.gb_pb_b5_4.setFlat(True)
self.gb_pb_b5_4.setObjectName(_fromUtf8("gb_pb_b5_4"))
self.gb_pb_b5_price_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_b5_price_4.setGeometry(QtCore.QRect(60, 240, 51, 20))
self.gb_pb_b5_price_4.setStyleSheet(_fromUtf8(""))
self.gb_pb_b5_price_4.setFlat(True)
self.gb_pb_b5_price_4.setObjectName(_fromUtf8("gb_pb_b5_price_4"))
self.gb_pb_b4_1_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_b4_1_4.setGeometry(QtCore.QRect(10, 220, 51, 20))
self.gb_pb_b4_1_4.setFlat(True)
self.gb_pb_b4_1_4.setObjectName(_fromUtf8("gb_pb_b4_1_4"))
self.gb_pb_buy_volume_row_minus_1_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_buy_volume_row_minus_1_4.setGeometry(QtCore.QRect(180, 160, 31, 23))
self.gb_pb_buy_volume_row_minus_1_4.setObjectName(_fromUtf8("gb_pb_buy_volume_row_minus_1_4"))
self.gb_pb_b3_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_b3_4.setGeometry(QtCore.QRect(10, 200, 51, 20))
self.gb_pb_b3_4.setFlat(True)
self.gb_pb_b3_4.setObjectName(_fromUtf8("gb_pb_b3_4"))
self.gb_pb_b2_volume_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_b2_volume_4.setGeometry(QtCore.QRect(110, 180, 71, 20))
self.gb_pb_b2_volume_4.setFlat(True)
self.gb_pb_b2_volume_4.setObjectName(_fromUtf8("gb_pb_b2_volume_4"))
self.gb_pb_buy_volume_1_3_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_buy_volume_1_3_4.setGeometry(QtCore.QRect(350, 160, 41, 23))
self.gb_pb_buy_volume_1_3_4.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_1_3_4.setText(_fromUtf8(""))
self.gb_pb_buy_volume_1_3_4.setObjectName(_fromUtf8("gb_pb_buy_volume_1_3_4"))
self.gb_pb_buy_volume_row_3_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_buy_volume_row_3_4.setGeometry(QtCore.QRect(210, 200, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_buy_volume_row_3_4.setFont(font)
self.gb_pb_buy_volume_row_3_4.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
" "))
self.gb_pb_buy_volume_row_3_4.setFlat(True)
self.gb_pb_buy_volume_row_3_4.setObjectName(_fromUtf8("gb_pb_buy_volume_row_3_4"))
self.gb_pb_buy_volume_5_1_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_buy_volume_5_1_4.setGeometry(QtCore.QRect(270, 240, 41, 23))
self.gb_pb_buy_volume_5_1_4.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_5_1_4.setText(_fromUtf8(""))
self.gb_pb_buy_volume_5_1_4.setObjectName(_fromUtf8("gb_pb_buy_volume_5_1_4"))
self.gb_pb_buy_volume_4_1_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_buy_volume_4_1_4.setGeometry(QtCore.QRect(270, 220, 41, 23))
self.gb_pb_buy_volume_4_1_4.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_4_1_4.setText(_fromUtf8(""))
self.gb_pb_buy_volume_4_1_4.setObjectName(_fromUtf8("gb_pb_buy_volume_4_1_4"))
self.gb_pb_buy_volume_row_4_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_buy_volume_row_4_4.setGeometry(QtCore.QRect(210, 220, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_buy_volume_row_4_4.setFont(font)
self.gb_pb_buy_volume_row_4_4.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
" "))
self.gb_pb_buy_volume_row_4_4.setText(_fromUtf8(""))
self.gb_pb_buy_volume_row_4_4.setFlat(True)
self.gb_pb_buy_volume_row_4_4.setObjectName(_fromUtf8("gb_pb_buy_volume_row_4_4"))
self.gb_pb_buy_volume_3_1_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_buy_volume_3_1_4.setGeometry(QtCore.QRect(270, 200, 41, 23))
self.gb_pb_buy_volume_3_1_4.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_3_1_4.setObjectName(_fromUtf8("gb_pb_buy_volume_3_1_4"))
self.gb_pb_b1_price_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_b1_price_4.setGeometry(QtCore.QRect(60, 160, 51, 20))
self.gb_pb_b1_price_4.setFlat(True)
self.gb_pb_b1_price_4.setObjectName(_fromUtf8("gb_pb_b1_price_4"))
self.gb_pb_buy_volume_3_2_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_buy_volume_3_2_4.setGeometry(QtCore.QRect(310, 200, 41, 23))
self.gb_pb_buy_volume_3_2_4.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_3_2_4.setObjectName(_fromUtf8("gb_pb_buy_volume_3_2_4"))
self.gb_pb_b3_volume_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_b3_volume_4.setGeometry(QtCore.QRect(110, 200, 71, 20))
self.gb_pb_b3_volume_4.setFlat(True)
self.gb_pb_b3_volume_4.setObjectName(_fromUtf8("gb_pb_b3_volume_4"))
self.gb_pb_buy_volume_row_2_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_buy_volume_row_2_4.setGeometry(QtCore.QRect(210, 180, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_buy_volume_row_2_4.setFont(font)
self.gb_pb_buy_volume_row_2_4.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
" "))
self.gb_pb_buy_volume_row_2_4.setText(_fromUtf8(""))
self.gb_pb_buy_volume_row_2_4.setFlat(True)
self.gb_pb_buy_volume_row_2_4.setObjectName(_fromUtf8("gb_pb_buy_volume_row_2_4"))
self.gb_pb_b2_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_b2_4.setGeometry(QtCore.QRect(10, 180, 51, 20))
self.gb_pb_b2_4.setFlat(True)
self.gb_pb_b2_4.setObjectName(_fromUtf8("gb_pb_b2_4"))
self.gb_pb_buy_volume_3_3_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_buy_volume_3_3_4.setGeometry(QtCore.QRect(350, 200, 41, 23))
self.gb_pb_buy_volume_3_3_4.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_3_3_4.setObjectName(_fromUtf8("gb_pb_buy_volume_3_3_4"))
self.gb_pb_b2_price_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_b2_price_4.setGeometry(QtCore.QRect(60, 180, 51, 20))
self.gb_pb_b2_price_4.setFlat(True)
self.gb_pb_b2_price_4.setObjectName(_fromUtf8("gb_pb_b2_price_4"))
self.gb_pb_buy_volume_row_minus_3_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_buy_volume_row_minus_3_4.setGeometry(QtCore.QRect(180, 200, 31, 23))
self.gb_pb_buy_volume_row_minus_3_4.setObjectName(_fromUtf8("gb_pb_buy_volume_row_minus_3_4"))
self.gb_pb_b3_price_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_b3_price_4.setGeometry(QtCore.QRect(60, 200, 51, 20))
self.gb_pb_b3_price_4.setFlat(True)
self.gb_pb_b3_price_4.setObjectName(_fromUtf8("gb_pb_b3_price_4"))
self.gb_pb_b4_volume_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_b4_volume_4.setGeometry(QtCore.QRect(110, 220, 71, 20))
self.gb_pb_b4_volume_4.setFlat(True)
self.gb_pb_b4_volume_4.setObjectName(_fromUtf8("gb_pb_b4_volume_4"))
self.gb_pb_buy_volume_1_1_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_buy_volume_1_1_4.setGeometry(QtCore.QRect(270, 160, 41, 23))
self.gb_pb_buy_volume_1_1_4.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_1_1_4.setText(_fromUtf8(""))
self.gb_pb_buy_volume_1_1_4.setObjectName(_fromUtf8("gb_pb_buy_volume_1_1_4"))
self.gb_pb_buy_volume_row_minus_5_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_buy_volume_row_minus_5_4.setGeometry(QtCore.QRect(180, 240, 31, 23))
self.gb_pb_buy_volume_row_minus_5_4.setObjectName(_fromUtf8("gb_pb_buy_volume_row_minus_5_4"))
self.gb_pb_buy_volume_5_3_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_buy_volume_5_3_4.setGeometry(QtCore.QRect(350, 240, 41, 23))
self.gb_pb_buy_volume_5_3_4.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_5_3_4.setText(_fromUtf8(""))
self.gb_pb_buy_volume_5_3_4.setObjectName(_fromUtf8("gb_pb_buy_volume_5_3_4"))
self.gb_pb_buy_volume_2_1_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_buy_volume_2_1_4.setGeometry(QtCore.QRect(270, 180, 41, 23))
self.gb_pb_buy_volume_2_1_4.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_2_1_4.setText(_fromUtf8(""))
self.gb_pb_buy_volume_2_1_4.setObjectName(_fromUtf8("gb_pb_buy_volume_2_1_4"))
self.gb_pb_buy_volume_1_2_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_buy_volume_1_2_4.setGeometry(QtCore.QRect(310, 160, 41, 23))
self.gb_pb_buy_volume_1_2_4.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_1_2_4.setText(_fromUtf8(""))
self.gb_pb_buy_volume_1_2_4.setObjectName(_fromUtf8("gb_pb_buy_volume_1_2_4"))
self.gb_pb_buy_volume_row_5_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_buy_volume_row_5_4.setGeometry(QtCore.QRect(210, 240, 31, 23))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Book Antiqua"))
font.setPointSize(8)
self.gb_pb_buy_volume_row_5_4.setFont(font)
self.gb_pb_buy_volume_row_5_4.setStyleSheet(_fromUtf8("QPushButton { border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
" "))
self.gb_pb_buy_volume_row_5_4.setText(_fromUtf8(""))
self.gb_pb_buy_volume_row_5_4.setFlat(True)
self.gb_pb_buy_volume_row_5_4.setObjectName(_fromUtf8("gb_pb_buy_volume_row_5_4"))
self.gb_pb_buy_volume_4_3_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_buy_volume_4_3_4.setGeometry(QtCore.QRect(350, 220, 41, 23))
self.gb_pb_buy_volume_4_3_4.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_4_3_4.setText(_fromUtf8(""))
self.gb_pb_buy_volume_4_3_4.setObjectName(_fromUtf8("gb_pb_buy_volume_4_3_4"))
self.gb_pb_b4_price_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_b4_price_4.setGeometry(QtCore.QRect(60, 220, 51, 20))
self.gb_pb_b4_price_4.setStyleSheet(_fromUtf8("\n"
"QPushButton:hover { background-color: rgb(89, 89, 89); }\n"
""))
self.gb_pb_b4_price_4.setFlat(True)
self.gb_pb_b4_price_4.setObjectName(_fromUtf8("gb_pb_b4_price_4"))
self.gb_pb_b5_volume_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_b5_volume_4.setGeometry(QtCore.QRect(110, 240, 71, 20))
self.gb_pb_b5_volume_4.setFlat(True)
self.gb_pb_b5_volume_4.setObjectName(_fromUtf8("gb_pb_b5_volume_4"))
self.gb_pb_buy_volume_4_2_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_buy_volume_4_2_4.setGeometry(QtCore.QRect(310, 220, 41, 23))
self.gb_pb_buy_volume_4_2_4.setStyleSheet(_fromUtf8("QPushButton {border: 1px solid #FFFFFF;background-color: rgb(189, 189, 189);border-radius: 3px; color: rgb(255, 0, 0); }\n"
"QPushButton:hover { background-color: rgb(255, 255, 255); }"))
self.gb_pb_buy_volume_4_2_4.setText(_fromUtf8(""))
self.gb_pb_buy_volume_4_2_4.setObjectName(_fromUtf8("gb_pb_buy_volume_4_2_4"))
self.gb_pb_b1_volume_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_b1_volume_4.setGeometry(QtCore.QRect(110, 160, 71, 20))
self.gb_pb_b1_volume_4.setFlat(True)
self.gb_pb_b1_volume_4.setObjectName(_fromUtf8("gb_pb_b1_volume_4"))
self.gb_pb_buy_volume_2_3_4 = QtGui.QPushButton(self.gb_ETFOrder_4)
self.gb_pb_buy_volume_2_3_4.setGeometry(QtCore.QRect(350, 180, 41, 23))
self.gb_pb_buy_volume_2_3_4.setStyleSheet(_fromUtf8("QPushButton {border: | |
ideograph
0x275E3B: (0x9508, 0), # East Asian ideograph
0x21574E: (0x521D, 0), # East Asian ideograph
0x6F5859: (0xCAC0, 0), # Korean hangul
0x233651: (0x8CBA, 0), # East Asian ideograph
0x233652: (0x8CB5, 0), # East Asian ideograph
0x213653: (0x553E, 0), # East Asian ideograph
0x213654: (0x5563, 0), # East Asian ideograph
0x6F4F51: (0xB968, 0), # Korean hangul
0x275E3C: (0x956D, 0), # East Asian ideograph
0x234177: (0x91F1, 0), # East Asian ideograph
0x6F7656: (0xE8C8, 0), # Korean hangul
0x213657: (0x552E, 0), # East Asian ideograph
0x6F4A3A: (0xAE38, 0), # Korean hangul
0x34682A: (0x7C7C, 0), # East Asian ideograph
0x275E3D: (0x94C1, 0), # East Asian ideograph
0x214621: (0x6B61, 0), # East Asian ideograph
0x234622: (0x938C, 0), # East Asian ideograph
0x214623: (0x6B63, 0), # East Asian ideograph
0x214624: (0x6B64, 0), # East Asian ideograph
0x214625: (0x6B65, 0), # East Asian ideograph
0x214627: (0x6B66, 0), # East Asian ideograph
0x214628: (0x6B6A, 0), # East Asian ideograph
0x214629: (0x6B72, 0), # East Asian ideograph
0x22462A: (0x6BF6, 0), # East Asian ideograph
0x21462B: (0x6B78, 0), # East Asian ideograph
0x21462C: (0x6B79, 0), # East Asian ideograph
0x21462D: (0x6B7B, 0), # East Asian ideograph
0x21462E: (0x6B7F, 0), # East Asian ideograph
0x21462F: (0x6B83, 0), # East Asian ideograph
0x214630: (0x6B86, 0), # East Asian ideograph
0x214631: (0x6B8A, 0), # East Asian ideograph
0x214632: (0x6B89, 0), # East Asian ideograph
0x214633: (0x6B98, 0), # East Asian ideograph
0x214634: (0x6B96, 0), # East Asian ideograph
0x214635: (0x6BA4, 0), # East Asian ideograph
0x214636: (0x6BAE, 0), # East Asian ideograph
0x214637: (0x6BAF, 0), # East Asian ideograph
0x214638: (0x6BB2, 0), # East Asian ideograph
0x214639: (0x6BB5, 0), # East Asian ideograph
0x21463A: (0x6BB7, 0), # East Asian ideograph
0x21463B: (0x6BBA, 0), # East Asian ideograph
0x21463C: (0x6BBC, 0), # East Asian ideograph
0x21463D: (0x6BC0, 0), # East Asian ideograph
0x21463E: (0x6BBF, 0), # East Asian ideograph
0x21463F: (0x6BC5, 0), # East Asian ideograph
0x214640: (0x6BC6, 0), # East Asian ideograph
0x214641: (0x6BCB, 0), # East Asian ideograph
0x214642: (0x6BCD, 0), # East Asian ideograph
0x214643: (0x6BCF, 0), # East Asian ideograph
0x214644: (0x6BD2, 0), # East Asian ideograph
0x214646: (0x6BD4, 0), # East Asian ideograph
0x214647: (0x6BD7, 0), # East Asian ideograph
0x214648: (0x6BDB, 0), # East Asian ideograph
0x214649: (0x6BEB, 0), # East Asian ideograph
0x21464A: (0x6BEF, 0), # East Asian ideograph
0x21464B: (0x6BFD, 0), # East Asian ideograph
0x21464C: (0x6C0F, 0), # East Asian ideograph
0x21464D: (0x6C11, 0), # East Asian ideograph
0x21464E: (0x6C10, 0), # East Asian ideograph
0x21464F: (0x6C13, 0), # East Asian ideograph
0x214650: (0x6C16, 0), # East Asian ideograph
0x214651: (0x6C1B, 0), # East Asian ideograph
0x214652: (0x6C1F, 0), # East Asian ideograph
0x214653: (0x6C27, 0), # East Asian ideograph
0x214654: (0x6C26, 0), # East Asian ideograph
0x214655: (0x6C23, 0), # East Asian ideograph
0x214656: (0x6C28, 0), # East Asian ideograph
0x214657: (0x6C24, 0), # East Asian ideograph
0x214658: (0x6C2B, 0), # East Asian ideograph
0x214659: (0x6C2E, 0), # East Asian ideograph
0x21465A: (0x6C33, 0), # East Asian ideograph
0x21465B: (
0x6C2F,
0,
), # East Asian ideograph (variant of 45465B which maps to 6C2F)
0x21465C: (0x6C34, 0), # East Asian ideograph
0x21465D: (0x6C38, 0), # East Asian ideograph
0x21465E: (0x6C41, 0), # East Asian ideograph
0x23465F: (0x93E5, 0), # East Asian ideograph
0x214660: (0x6C40, 0), # East Asian ideograph
0x214661: (0x6C42, 0), # East Asian ideograph
0x214662: (0x6C5E, 0), # East Asian ideograph
0x214663: (0x6C57, 0), # East Asian ideograph
0x214664: (0x6C5F, 0), # East Asian ideograph
0x214665: (0x6C59, 0), # East Asian ideograph
0x214666: (0x6C60, 0), # East Asian ideograph
0x214667: (0x6C55, 0), # East Asian ideograph
0x214668: (0x6C50, 0), # East Asian ideograph
0x214669: (0x6C5D, 0), # East Asian ideograph
0x21466A: (0x6C9B, 0), # East Asian ideograph
0x21466B: (0x6C81, 0), # East Asian ideograph
0x21466D: (0x6C7A, 0), # East Asian ideograph
0x21466E: (0x6C6A, 0), # East Asian ideograph
0x21466F: (0x6C8C, 0), # East Asian ideograph
0x214670: (0x6C90, 0), # East Asian ideograph
0x214671: (0x6C72, 0), # East Asian ideograph
0x214672: (0x6C70, 0), # East Asian ideograph
0x214673: (0x6C68, 0), # East Asian ideograph
0x214674: (0x6C96, 0), # East Asian ideograph
0x234675: (0x93DB, 0), # East Asian ideograph
0x214676: (
0x6C89,
0,
), # East Asian ideograph (variant of 4B4676 which maps to 6C89)
0x214677: (0x6C99, 0), # East Asian ideograph
0x214678: (0x6C7E, 0), # East Asian ideograph
0x214679: (0x6C7D, 0), # East Asian ideograph
0x21467A: (0x6C92, 0), # East Asian ideograph
0x21467B: (0x6C83, 0), # East Asian ideograph
0x21467C: (0x6CB1, 0), # East Asian ideograph
0x23366A: (0x8CE1, 0), # East Asian ideograph
0x21467E: (0x6CF3, 0), # East Asian ideograph
0x21366B: (0x559D, 0), # East Asian ideograph
0x2D5421: (0x9AD7, 0), # East Asian ideograph
0x6F4A56: (0xAE7D, 0), # Korean hangul
0x4C4359: (0x6B05, 0), # East Asian ideograph
0x27366D: (0x5524, 0), # East Asian ideograph
0x21366E: (0x557E, 0), # East Asian ideograph
0x294869: (0x9567, 0), # East Asian ideograph
0x284027: (0x6864, 0), # East Asian ideograph
0x21366F: (0x55AC, 0), # East Asian ideograph
0x213670: (0x5589, 0), # East Asian ideograph
0x223671: (0x6595, 0), # East Asian ideograph
0x213672: (0x55BB, 0), # East Asian ideograph
0x27406C: (0x631F, 0), # East Asian ideograph
0x4C3B60: (0x6764, 0), # East Asian ideograph
0x294228: (0x94AC, 0), # East Asian ideograph
0x213674: (0x55DF, 0), # East Asian ideograph
0x213675: (0x55D1, 0), # East Asian ideograph
0x213869: (0x58D3, 0), # East Asian ideograph
0x28734E: (0x7F32, 0), # East Asian ideograph
0x233676: (0x8CEE, 0), # East Asian ideograph
0x216121: (0x993F, 0), # East Asian ideograph
0x213677: (0x55E6, 0), # East Asian ideograph
0x4B6122: (0x994B, 0), # East Asian ideograph
0x6F4F58: (0xB985, 0), # Korean hangul
0x273678: (0x556C, 0), # East Asian ideograph
0x275E43: (0x94F8, 0), # East Asian ideograph
0x216123: (0x9945, 0), # East Asian ideograph
0x6F5263: (0xC0B0, 0), # Korean hangul
0x21353D: (0x53F2, 0), # East Asian ideograph
0x276124: (0x9976, 0), # East Asian ideograph
0x27367A: (0x5417, 0), # East Asian ideograph
0x2D5424: (0x5367, 0), # East Asian ideograph
0x23367B: (0x8CF1, 0), # East Asian ideograph
0x216126: (0x995C, 0), # East Asian ideograph
0x6F5851: (0xCA54, 0), # Korean hangul
0x21367C: (0x55EF, 0), # East Asian ideograph
0x2D475B: (0x51C9, 0), # East Asian ideograph
0x276127: (0x998B, 0), # East Asian ideograph
0x6F4F59: (0xB987, 0), # Korean hangul
0x21767D: (0x5844, 0), # East Asian ideograph
0x275E44: (0x9573, 0), # East Asian ideograph
0x21367E: (0x55C5, 0), # East Asian ideograph
0x396C6B: (0x60A4, 0), # East Asian ideograph
0x6F5B37: (0xD168, 0), # Korean hangul
0x213E61: (0x60E1, 0), # East Asian ideograph
0x224A4A: (0x6DE6, 0), # East Asian ideograph
0x4B5D34: (0x91B8, 0), # East Asian ideograph
0x27612C: (0x9A6C, 0), # East Asian ideograph
0x217971: (0x59A0, 0), # East Asian ideograph
0x21353F: (0x540B, 0), # East Asian ideograph
0x27612E: (0x9A6D, 0), # East Asian ideograph
0x2D6260: (0x5E85, 0), # East Asian ideograph
0x27612F: (0x9A70, 0), # East Asian ideograph
0x287351: (0x7F33, 0), # East Asian ideograph
0x214721: (0x6CE3, 0), # East Asian ideograph
0x214722: (0x6CF0, 0), # East Asian ideograph
0x214723: (0x6CB8, 0), # East Asian ideograph
0x214724: (0x6CD3, 0), # East Asian ideograph
0x214725: (0x6CAB, 0), # East Asian ideograph
0x214726: (0x6CE5, 0), # East Asian ideograph
0x214727: (0x6CBD, 0), # East Asian ideograph
0x214728: (0x6CB3, 0), # East Asian ideograph
0x214729: (0x6CC4, 0), # East Asian ideograph
0x21472A: (0x6CD5, 0), # East Asian ideograph
0x21472B: (0x6CE2, 0), # East Asian ideograph
0x21472C: (0x6CBC, 0), # East Asian ideograph
0x21472D: (0x6CAE, 0), # East Asian ideograph
0x21472E: (0x6CB9, 0), # East Asian ideograph
0x21472F: (0x6CF1, 0), # East Asian ideograph
0x214730: (0x6CC1, 0), # East Asian ideograph
0x214731: (0x6CBE, 0), # East Asian ideograph
0x214732: (0x6CC5, 0), # East Asian ideograph
0x214733: (0x6CD7, 0), # East Asian ideograph
0x234734: (0x9413, 0), # East Asian ideograph
0x214735: (0x6CDB, 0), # East Asian ideograph
0x214736: (0x6CE1, 0), # | |
self.input)
raise eee
cnt17 += 1
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
class orOp_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def orOp(self, ):
retval = self.orOp_return()
retval.start = self.input.LT(1)
root_0 = None
WS40 = None
OR41 = None
WS42 = None
WS40_tree = None
OR41_tree = None
WS42_tree = None
try:
try:
pass
root_0 = self._adaptor.nil()
cnt18 = 0
while True:
alt18 = 2
LA18_0 = self.input.LA(1)
if (LA18_0 == WS) :
alt18 = 1
if alt18 == 1:
pass
WS40=self.match(self.input, WS, self.FOLLOW_WS_in_orOp532)
WS40_tree = self._adaptor.createWithPayload(WS40)
self._adaptor.addChild(root_0, WS40_tree)
else:
if cnt18 >= 1:
break
eee = EarlyExitException(18, self.input)
raise eee
cnt18 += 1
OR41=self.match(self.input, OR, self.FOLLOW_OR_in_orOp535)
OR41_tree = self._adaptor.createWithPayload(OR41)
self._adaptor.addChild(root_0, OR41_tree)
cnt19 = 0
while True:
alt19 = 2
LA19_0 = self.input.LA(1)
if (LA19_0 == WS) :
alt19 = 1
if alt19 == 1:
pass
WS42=self.match(self.input, WS, self.FOLLOW_WS_in_orOp537)
WS42_tree = self._adaptor.createWithPayload(WS42)
self._adaptor.addChild(root_0, WS42_tree)
else:
if cnt19 >= 1:
break
eee = EarlyExitException(19, self.input)
raise eee
cnt19 += 1
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
class notOp_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def notOp(self, ):
retval = self.notOp_return()
retval.start = self.input.LT(1)
root_0 = None
char_literal43 = None
NOT44 = None
WS45 = None
char_literal43_tree = None
NOT44_tree = None
WS45_tree = None
try:
try:
alt21 = 2
LA21_0 = self.input.LA(1)
if (LA21_0 == 43) :
alt21 = 1
elif (LA21_0 == NOT) :
alt21 = 2
else:
nvae = NoViableAltException("", 21, 0, self.input)
raise nvae
if alt21 == 1:
pass
root_0 = self._adaptor.nil()
char_literal43=self.match(self.input, 43, self.FOLLOW_43_in_notOp552)
char_literal43_tree = self._adaptor.createWithPayload(char_literal43)
self._adaptor.addChild(root_0, char_literal43_tree)
elif alt21 == 2:
pass
root_0 = self._adaptor.nil()
NOT44=self.match(self.input, NOT, self.FOLLOW_NOT_in_notOp558)
NOT44_tree = self._adaptor.createWithPayload(NOT44)
self._adaptor.addChild(root_0, NOT44_tree)
cnt20 = 0
while True:
alt20 = 2
LA20_0 = self.input.LA(1)
if (LA20_0 == WS) :
alt20 = 1
if alt20 == 1:
pass
WS45=self.match(self.input, WS, self.FOLLOW_WS_in_notOp560)
WS45_tree = self._adaptor.createWithPayload(WS45)
self._adaptor.addChild(root_0, WS45_tree)
else:
if cnt20 >= 1:
break
eee = EarlyExitException(20, self.input)
raise eee
cnt20 += 1
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
class sep_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def sep(self, ):
retval = self.sep_return()
retval.start = self.input.LT(1)
root_0 = None
WS46 = None
char_literal47 = None
WS48 = None
WS46_tree = None
char_literal47_tree = None
WS48_tree = None
try:
try:
pass
root_0 = self._adaptor.nil()
while True:
alt22 = 2
LA22_0 = self.input.LA(1)
if (LA22_0 == WS) :
alt22 = 1
if alt22 == 1:
pass
WS46=self.match(self.input, WS, self.FOLLOW_WS_in_sep575)
WS46_tree = self._adaptor.createWithPayload(WS46)
self._adaptor.addChild(root_0, WS46_tree)
else:
break
char_literal47=self.match(self.input, 44, self.FOLLOW_44_in_sep578)
char_literal47_tree = self._adaptor.createWithPayload(char_literal47)
self._adaptor.addChild(root_0, char_literal47_tree)
while True:
alt23 = 2
LA23_0 = self.input.LA(1)
if (LA23_0 == WS) :
alt23 = 1
if alt23 == 1:
pass
WS48=self.match(self.input, WS, self.FOLLOW_WS_in_sep580)
WS48_tree = self._adaptor.createWithPayload(WS48)
self._adaptor.addChild(root_0, WS48_tree)
else:
break
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
class fnname_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def fnname(self, ):
retval = self.fnname_return()
retval.start = self.input.LT(1)
root_0 = None
set49 = None
set49_tree = None
try:
try:
pass
root_0 = self._adaptor.nil()
set49 = self.input.LT(1)
if (DISTANCE_FN <= self.input.LA(1) <= GEO_POINT_FN):
self.input.consume()
self._adaptor.addChild(root_0, self._adaptor.createWithPayload(set49))
self._state.errorRecovery = False
else:
mse = MismatchedSetException(None, self.input)
raise mse
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
class composite_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def composite(self, ):
retval = self.composite_return()
retval.start = self.input.LT(1)
root_0 = None
LPAREN50 = None
WS51 = None
WS53 = None
RPAREN54 = None
expression52 = None
LPAREN50_tree = None
WS51_tree = None
WS53_tree = None
RPAREN54_tree = None
stream_RPAREN = RewriteRuleTokenStream(self._adaptor, "token RPAREN")
stream_WS = RewriteRuleTokenStream(self._adaptor, "token WS")
stream_LPAREN = RewriteRuleTokenStream(self._adaptor, "token LPAREN")
stream_expression = RewriteRuleSubtreeStream(self._adaptor, "rule expression")
try:
try:
pass
LPAREN50=self.match(self.input, LPAREN, self.FOLLOW_LPAREN_in_composite616)
stream_LPAREN.add(LPAREN50)
while True:
alt24 = 2
LA24_0 = self.input.LA(1)
if (LA24_0 == WS) :
alt24 = 1
if alt24 == 1:
pass
WS51=self.match(self.input, WS, self.FOLLOW_WS_in_composite618)
stream_WS.add(WS51)
else:
break
self._state.following.append(self.FOLLOW_expression_in_composite621)
expression52 = self.expression()
self._state.following.pop()
stream_expression.add(expression52.tree)
while True:
alt25 = 2
LA25_0 = self.input.LA(1)
if (LA25_0 == WS) :
alt25 = 1
if alt25 == 1:
pass
WS53=self.match(self.input, WS, self.FOLLOW_WS_in_composite623)
stream_WS.add(WS53)
else:
break
RPAREN54=self.match(self.input, RPAREN, self.FOLLOW_RPAREN_in_composite626)
stream_RPAREN.add(RPAREN54)
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
self._adaptor.addChild(root_0, stream_expression.nextTree())
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
class item_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def item(self, ):
retval = self.item_return()
retval.start = self.input.LT(1)
root_0 = None
FIX55 = None
REWRITE57 = None
value56 = None
value58 = None
value59 = None
FIX55_tree = None
REWRITE57_tree = None
stream_FIX = RewriteRuleTokenStream(self._adaptor, "token FIX")
stream_REWRITE = RewriteRuleTokenStream(self._adaptor, "token REWRITE")
stream_value = RewriteRuleSubtreeStream(self._adaptor, "rule value")
try:
try:
alt26 = 3
LA26 = self.input.LA(1)
if LA26 == FIX:
alt26 = 1
elif LA26 == REWRITE:
alt26 = 2
elif LA26 == DISTANCE_FN or LA26 == GEO_POINT_FN or LA26 == TEXT or LA26 == QUOTE:
alt26 = 3
else:
nvae = NoViableAltException("", 26, 0, self.input)
raise nvae
if alt26 == 1:
pass
FIX55=self.match(self.input, FIX, self.FOLLOW_FIX_in_item646)
stream_FIX.add(FIX55)
self._state.following.append(self.FOLLOW_value_in_item648)
value56 = self.value()
self._state.following.pop()
stream_value.add(value56.tree)
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(LITERAL, "LITERAL"), root_1)
self._adaptor.addChild(root_1, stream_value.nextTree())
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
elif alt26 == 2:
pass
REWRITE57=self.match(self.input, REWRITE, self.FOLLOW_REWRITE_in_item662)
stream_REWRITE.add(REWRITE57)
self._state.following.append(self.FOLLOW_value_in_item664)
value58 = self.value()
self._state.following.pop()
stream_value.add(value58.tree)
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(FUZZY, "FUZZY"), root_1)
self._adaptor.addChild(root_1, stream_value.nextTree())
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
elif alt26 == 3:
pass
self._state.following.append(self.FOLLOW_value_in_item678)
value59 = self.value()
self._state.following.pop()
stream_value.add(value59.tree)
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
self._adaptor.addChild(root_0, stream_value.nextTree())
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
class value_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
def value(self, ):
retval = self.value_return()
retval.start = self.input.LT(1)
root_0 = None
text60 = None
phrase61 = None
stream_text = RewriteRuleSubtreeStream(self._adaptor, "rule text")
stream_phrase = RewriteRuleSubtreeStream(self._adaptor, "rule phrase")
try:
try:
alt27 = 2
LA27_0 = self.input.LA(1)
if ((DISTANCE_FN <= LA27_0 <= GEO_POINT_FN) or LA27_0 == TEXT) :
alt27 = 1
elif (LA27_0 == QUOTE) :
alt27 = 2
else:
nvae = NoViableAltException("", 27, 0, self.input)
raise nvae
if alt27 == 1:
pass
self._state.following.append(self.FOLLOW_text_in_value696)
text60 = self.text()
self._state.following.pop()
stream_text.add(text60.tree)
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(VALUE, "VALUE"), root_1)
self._adaptor.addChild(root_1, self._adaptor.createFromType(TEXT, "TEXT"))
self._adaptor.addChild(root_1, stream_text.nextTree())
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
elif alt27 == 2:
pass
self._state.following.append(self.FOLLOW_phrase_in_value712)
phrase61 = self.phrase()
self._state.following.pop()
stream_phrase.add(phrase61.tree)
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(self._adaptor.createFromType(VALUE, "VALUE"), root_1)
self._adaptor.addChild(root_1, self._adaptor.createFromType(STRING, "STRING"))
self._adaptor.addChild(root_1, stream_phrase.nextTree())
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
class text_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.tree = None
| |
C++ signature :
void _set_input_arrays(caffe::Net<float>*,boost::python::api::object,boost::python::api::object)
"""
pass
def _top_ids(self, Net, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
"""
_top_ids( (Net)arg1, (int)arg2) -> IntVec :
C++ signature :
std::vector<int, std::allocator<int> > _top_ids(caffe::Net<float> {lvalue},int)
"""
pass
def __init__(self, p_object, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
"""
__init__( (object)arg1, (str)network_file, (int)phase [, (int)level=0 [, (object)stages=None [, (object)weights=None]]]) -> object :
C++ signature :
void* __init__(boost::python::api::object,std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >,int [,int=0 [,boost::python::api::object=None [,boost::python::api::object=None]]])
__init__( (object)arg1, (str)arg2, (str)arg3, (int)arg4) -> object :
C++ signature :
void* __init__(boost::python::api::object,std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >,std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >,int)
"""
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
blobs = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""
An OrderedDict (bottom to top, i.e., input to output) of network
blobs indexed by name
"""
blob_loss_weights = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""
An OrderedDict (bottom to top, i.e., input to output) of network
blob loss weights indexed by name
"""
bottom_names = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
inputs = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
layers = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
layer_dict = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""
An OrderedDict (bottom to top, i.e., input to output) of network
layers indexed by name
"""
outputs = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
params = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""
An OrderedDict (bottom to top, i.e., input to output) of network
parameters indexed by name; each is a list of multiple blobs (e.g.,
weights and biases)
"""
top_names = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_blobs = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_blob_loss_weights = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_blob_names = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_inputs = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_layer_names = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_outputs = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class NetVec(__Boost_Python.instance):
# no doc
def append(self, NetVec, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
"""
append( (NetVec)arg1, (object)arg2) -> None :
C++ signature :
void append(std::vector<boost::shared_ptr<caffe::Net<float> >, std::allocator<boost::shared_ptr<caffe::Net<float> > > > {lvalue},boost::python::api::object)
"""
pass
def extend(self, NetVec, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
"""
extend( (NetVec)arg1, (object)arg2) -> None :
C++ signature :
void extend(std::vector<boost::shared_ptr<caffe::Net<float> >, std::allocator<boost::shared_ptr<caffe::Net<float> > > > {lvalue},boost::python::api::object)
"""
pass
def __contains__(self, NetVec, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
"""
__contains__( (NetVec)arg1, (object)arg2) -> bool :
C++ signature :
bool __contains__(std::vector<boost::shared_ptr<caffe::Net<float> >, std::allocator<boost::shared_ptr<caffe::Net<float> > > > {lvalue},_object*)
"""
pass
def __delitem__(self, NetVec, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
"""
__delitem__( (NetVec)arg1, (object)arg2) -> None :
C++ signature :
void __delitem__(std::vector<boost::shared_ptr<caffe::Net<float> >, std::allocator<boost::shared_ptr<caffe::Net<float> > > > {lvalue},_object*)
"""
pass
def __getitem__(self, p_object, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
"""
__getitem__( (object)arg1, (object)arg2) -> object :
C++ signature :
boost::python::api::object __getitem__(boost::python::back_reference<std::vector<boost::shared_ptr<caffe::Net<float> >, std::allocator<boost::shared_ptr<caffe::Net<float> > > >&>,_object*)
"""
pass
def __init__(self, p_object, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
"""
__init__( (object)arg1) -> None :
C++ signature :
void __init__(_object*)
"""
pass
def __iter__(self, p_object, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
"""
__iter__( (object)arg1) -> object :
C++ signature :
boost::python::objects::iterator_range<boost::python::return_value_policy<boost::python::return_by_value, boost::python::default_call_policies>, __gnu_cxx::__normal_iterator<boost::shared_ptr<caffe::Net<float> >*, std::vector<boost::shared_ptr<caffe::Net<float> >, std::allocator<boost::shared_ptr<caffe::Net<float> > > > > > __iter__(boost::python::back_reference<std::vector<boost::shared_ptr<caffe::Net<float> >, std::allocator<boost::shared_ptr<caffe::Net<float> > > >&>)
"""
pass
def __len__(self, NetVec, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
"""
__len__( (NetVec)arg1) -> int :
C++ signature :
unsigned long __len__(std::vector<boost::shared_ptr<caffe::Net<float> >, std::allocator<boost::shared_ptr<caffe::Net<float> > > > {lvalue})
"""
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __setitem__(self, NetVec, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
"""
__setitem__( (NetVec)arg1, (object)arg2, (object)arg3) -> None :
C++ signature :
void __setitem__(std::vector<boost::shared_ptr<caffe::Net<float> >, std::allocator<boost::shared_ptr<caffe::Net<float> > > > {lvalue},_object*,_object*)
"""
pass
__instance_size__ = 40
class RawBlobVec(__Boost_Python.instance):
# no doc
def append(self, RawBlobVec, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
"""
append( (RawBlobVec)arg1, (object)arg2) -> None :
C++ signature :
void append(std::vector<caffe::Blob<float>*, std::allocator<caffe::Blob<float>*> > {lvalue},boost::python::api::object)
"""
pass
def extend(self, RawBlobVec, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
"""
extend( (RawBlobVec)arg1, (object)arg2) -> None :
C++ signature :
void extend(std::vector<caffe::Blob<float>*, std::allocator<caffe::Blob<float>*> > {lvalue},boost::python::api::object)
"""
pass
def __contains__(self, RawBlobVec, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
"""
__contains__( (RawBlobVec)arg1, (object)arg2) -> bool :
C++ signature :
bool __contains__(std::vector<caffe::Blob<float>*, std::allocator<caffe::Blob<float>*> > {lvalue},_object*)
"""
pass
def __delitem__(self, RawBlobVec, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
"""
__delitem__( (RawBlobVec)arg1, (object)arg2) -> None :
C++ signature :
void __delitem__(std::vector<caffe::Blob<float>*, std::allocator<caffe::Blob<float>*> > {lvalue},_object*)
"""
pass
def __getitem__(self, p_object, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
"""
__getitem__( (object)arg1, (object)arg2) -> object :
C++ signature :
boost::python::api::object __getitem__(boost::python::back_reference<std::vector<caffe::Blob<float>*, std::allocator<caffe::Blob<float>*> >&>,_object*)
"""
pass
def __init__(self, p_object, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
"""
__init__( (object)arg1) -> None :
C++ signature :
void __init__(_object*)
"""
pass
def __iter__(self, p_object, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
"""
__iter__( (object)arg1) -> object :
C++ signature :
boost::python::objects::iterator_range<boost::python::return_value_policy<boost::python::return_by_value, boost::python::default_call_policies>, __gnu_cxx::__normal_iterator<caffe::Blob<float>**, std::vector<caffe::Blob<float>*, std::allocator<caffe::Blob<float>*> > > > __iter__(boost::python::back_reference<std::vector<caffe::Blob<float>*, std::allocator<caffe::Blob<float>*> >&>)
"""
pass
def __len__(self, RawBlobVec, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
"""
__len__( (RawBlobVec)arg1) -> int :
C++ signature :
unsigned long __len__(std::vector<caffe::Blob<float>*, std::allocator<caffe::Blob<float>*> > {lvalue})
"""
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __setitem__(self, RawBlobVec, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
"""
__setitem__( (RawBlobVec)arg1, (object)arg2, (object)arg3) -> None :
C++ signature :
void __setitem__(std::vector<caffe::Blob<float>*, std::allocator<caffe::Blob<float>*> > {lvalue},_object*,_object*)
"""
pass
__instance_size__ = 40
class RMSPropSolver(Solver):
# no doc
def __init__(self, p_object, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
"""
__init__( (object)arg1, (str)arg2) -> None :
C++ signature :
void __init__(_object*,std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >)
"""
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
__instance_size__ = 32
class SGDSolver(Solver):
# no doc
def __init__(self, p_object, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
"""
__init__( (object)arg1, (str)arg2) -> None :
C++ signature :
void __init__(_object*,std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >)
"""
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
__instance_size__ = 32
class SolverParameter(__Boost_Python.instance):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
"""
Raises an exception
This class cannot be instantiated from Python
"""
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
display = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
layer_wise_reduce = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
max_iter = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class StringVec(__Boost_Python.instance):
# no doc
def append(self, StringVec, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
"""
append( (StringVec)arg1, (object)arg2) -> None :
C++ signature :
void append(std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > > > {lvalue},boost::python::api::object)
"""
pass
def | |
== 0 and K_cstr_ss == True):
print 'Initial guess is not feasible because sum of K_{m}<=0.'
elif GoodGuess == True:
print 'Initial guess is feasible.'
# Compute steady state
print 'BEGIN STEADY STATE COMPUTATION'
ss_params = (S, alpha, beta, sigma, ss_tol)
(r_ss, w_ss, p_c_ss, p_tilde_ss, b_ss, c_tilde_ss, c_ss, eul_ss, C_ss, X_ss,
K_ss, L_ss, MCK_err_ss, MCL_err_ss, ss_time) = \
ssf.SS(ss_params, rwbar_init, b_guess, c_bar, A,
gamma, epsilon, delta, xi, pi, I, M, S, n, ss_graphs)
# Print diagnostics
print 'The maximum absolute steady-state Euler error is: ', \
np.absolute(eul_ss).max()
print 'The capital and labor market clearing errors are: ', \
(MCK_err_ss, MCL_err_ss)
print 'The steady-state distribution of capital is:'
print b_ss
print 'The steady-state distribution of composite consumption is:'
print c_tilde_ss
print 'The steady-state distribution of goods consumption is:'
print c_ss
print 'The steady-state interest rate and wage:'
print np.array([r_ss, w_ss])
print 'Steady-state consumption good prices and composite price are:'
print p_c_ss, p_tilde_ss
print 'Aggregate output, capital stock and consumption for each industry/consumption good are:'
print np.array([[X_ss], [K_ss], [C_ss]])
RCdiff_ss = X_ss - (np.dot(np.reshape(C_ss,(1,I)),pi)) - (np.dot(delta*K_ss,xi))
print 'The difference in the resource constraints are: ', RCdiff_ss
# Print SS computation time
if ss_time < 60: # seconds
secs = round(ss_time, 3)
print 'SS computation time: ', secs, ' sec'
elif ss_time >= 60 and ss_time < 3600: # minutes
mins = int(ss_time / 60)
secs = round(((ss_time / 60) - mins) * 60, 1)
print 'SS computation time: ', mins, ' min, ', secs, ' sec'
elif ss_time >= 3600 and ss_time < 86400: # hours
hrs = int(ss_time / 3600)
mins = int(((ss_time / 3600) - hrs) * 60)
secs = round(((ss_time / 60) - mins) * 60, 1)
print 'SS computation time: ', hrs, ' hrs, ', mins, ' min, ', secs, ' sec'
elif ss_time >= 86400: # days
days = int(ss_time / 86400)
hrs = int(((ss_time / 86400) - days) * 24)
mins = int(((ss_time / 3600) - hrs) * 60)
secs = round(((ss_time / 60) - mins) * 60, 1)
print 'SS computation time: ', days, ' days,', hrs, ' hrs, ', mins, ' min, ', secs, ' sec'
'''
--------------------------------------------------------------------
Compute the equilibrium time path by TPI
--------------------------------------------------------------------
Gamma1 = [S-1,] vector, initial period savings distribution
rpath_init = [T+S-1,] vector, initial guess for the time path of
the interest rate
r1 = scalar > 0, guess for period 1 value of r
cc_r = scalar, parabola coefficient for rpath_init
bb_r = scalar, parabola coefficient for rpath_init
aa_r = scalar, parabola coefficient for rpath_init
wpath_init = [T+S-1,] vector, initial guess for the time path of
the wage
w1 = scalar > 0, guess for period 1 value of w
cc_w = scalar, parabola coefficient for wpath_init
bb_w = scalar, parabola coefficient for wpath_init
aa_w = scalar, parabola coefficient for wpath_init
tp_params = length 11 tuple, parameters to pass into TP function:
(S, T, alpha_path, beta, sigma, r_ss, w_ss, tp_tol)
alpha_path = [I,T+S-2] matrix, consumption good shares in each
period along time path
c_bar_path = [I,T+S-2] matrix, minimum consumption amounts in each
period along time path
A_path = [M,T+S-2] matrix, TFP for each industry in each
period along time path
gamma_path = [M,T+S-2] matrix, capital's share of output for each industry in each
period along time path
epsilon_path = [M,T+S-2] matrix, elasticity of substitution for each industry in each
period along time path
delta_path = [M,T+S-2] matrix, physical depreciation for each industry in each
period along time path
r_path = [T+S-2,] vector, equilibrium time path of the interest
rate
w_path = [T+S-2,] vector, equilibrium time path of the wage
pm_path = [M, T+S-2] matrix, equilibrium time path of industry
output prices
pc_path = [I, T+S-2] matrix, equilibrium time path of consumption
good prices
p_tilde_path = [T+S-2,] vector, equilibrium time path of the
composite good price
b_path = [S-1, T+S-2] matrix, equilibrium time path of the
distribution of savings. Period 1 is the initial
exogenous distribution
c_tilde_path = [S, T+S-2] matrix, equilibrium time path of the
distribution of composite good consumption
c_path = [S, T+S-2, I] array, equilibrium time path of the
distribution of individual consumption goods
eul_path = [S-1, T+S-2] matrix, equilibrium time path of the
euler errors associated with the distribution of
savings. Period 1 is a column of zeros
C_path = [I, T+S-2] matrix, equilibrium time path of total
demand for each consumption good
X_path = [M, T+S-2] matrix, equilibrium time path of total
output from each industry
K_path = [M, T+S-2] matrix, equilibrium time path of capital
demand for each industry
L_path = [M, T+S-2] matrix, equilibrium time path of labor
demand for each industry
Inv_path = [M,T+S-2] matrix, equilibrium time path for investment
demand from each industry
X_c_path = [M,T+S-2] matrix, equlibirum time path for demand
for output from each industry from consumption demand
X_inv_path = [M,T+S-2] matrix, equlibirum time path for demand
for output from each industry from investment demand
MCKerr_path = [T+S-2,] vector, equilibrium time path of capital
market clearing errors
MCLerr_path = [T+S-2,] vector, equilibrium time path of labor market
clearing errors
tpi_time = scalar, number of seconds to solve for transition path
ResmDiff = [M, T-1] matrix, errors in the resource constraint
from period 1 to T-1. We don't use T because we are
missing one individual's consumption in that period
--------------------------------------------------------------------
'''
if tp_solve == True:
print 'BEGIN EQUILIBRIUM TIME PATH COMPUTATION'
#Gamma1 = b_ss
Gamma1 = 0.95 * b_ss
# Make sure initial savings distr. is feasible (sum of b_{s}>0)
if Gamma1.sum() <= 0:
print 'Initial savings distribution is not feasible (sum of b_{s}<=0)'
else:
# Choose initial guesses of path of interest rate and wage.
# Use parabola specification aa*x^2 + bb*x + cc
# rpath_init = r_ss * np.ones(T+S-1)
rpath_init = np.zeros(T+S-1)
r1 = 1.02 * r_ss
cc_r = r1
bb_r = - 2 * (r1 - r_ss) / (T - S)
aa_r = -bb_r / (2 * (T - S))
rpath_init[:T-S+1] = (aa_r * (np.arange(0, T-S+1) ** 2) +
(bb_r * np.arange(0, T-S+1)) + cc_r)
rpath_init[T-S+1:] = r_ss
#rpath_init[:] = r_ss
wpath_init = np.zeros(T+S-1)
w1 = 0.98 * w_ss
cc_w = w1
bb_w = - 2 * (w1 - w_ss) / (T - S)
aa_w = -bb_w / (2 * (T - S))
wpath_init[:T-S+1] = (aa_w * (np.arange(0, T-S+1) ** 2) +
(bb_w * np.arange(0, T-S+1)) + cc_w)
wpath_init[T-S+1:] = w_ss
#wpath_init[:] = w_ss
# Solve for time path
# Tile arrays of time path parameters so easy to handle in
# TP functions
alpha_path = np.tile(np.reshape(alpha,(I,1)),(1,len(rpath_init)))
c_bar_path = np.tile(np.reshape(c_bar,(I,1)),(1,len(rpath_init)))
A_path = np.tile(np.reshape(A,(M,1)),(1,len(rpath_init)))
gamma_path = np.tile(np.reshape(gamma,(M,1)),(1,len(rpath_init)))
epsilon_path = np.tile(np.reshape(epsilon,(M,1)),(1,len(rpath_init)))
delta_path = np.tile(np.reshape(delta,(M,1)),(1,len(rpath_init)))
tp_params = (S, T, alpha_path, beta, sigma, r_ss, w_ss, tp_tol)
guesses = np.append(rpath_init[:T], wpath_init[:T])
start_time = time.clock()
solutions = opt.fsolve(tpf.TP_fsolve, guesses, args=(tp_params, K_ss, X_ss,
Gamma1, c_bar_path, A_path, gamma_path, epsilon_path, delta_path, xi, pi, I, M, S, n,
tp_graphs), xtol=tp_tol, col_deriv=1)
#solutions = tpf.TP_fsolve(guesses, tp_params, K_ss, X_ss,
# Gamma1, c_bar_path, A_path, gamma_path, epsilon_path, delta_path, xi, pi, I, M, S, n,
# tp_graphs)
tpi_time = time.clock() - start_time
rpath = solutions[:T].reshape(T)
wpath = solutions[T:].reshape(T)
# run one iteration of TP with fsolve solution to get other output
tp_params = (S, T, alpha_path, beta, sigma, r_ss, w_ss, tp_tol)
(r_path, w_path, pc_path, p_tilde_path, b_path, c_tilde_path, c_path,
eul_path, C_path, X_path, K_path, L_path,
MCKerr_path, MCLerr_path, RCdiff_path) = \
tpf.TP(tp_params, rpath, wpath, K_ss, X_ss,
Gamma1, c_bar_path, A_path, gamma_path, epsilon_path, delta_path, xi, pi, I,
M, S, n, tp_graphs)
# Print diagnostics
print 'The max. absolute difference in the resource constraints are:'
print np.absolute(RCdiff_path).max(axis=1)
print 'The max. absolute error in the market clearing conditions are:'
print np.absolute(MCKerr_path).max(), np.absolute(MCLerr_path).max()
# Print TPI computation time
if tpi_time < 60: # seconds
secs = round(tpi_time, 3)
print 'TPI computation time: ', secs, ' sec'
elif tpi_time >= | |
<filename>src/qecsim/cli.py
"""
This module contains the qecsim command line interface (CLI).
Components are integrated into the CLI via entries in the ``[options.entry-points]`` section of ``setup.cfg``. The
format of entries is ``<short_name> = <module_path>:<class_name>``. Codes, error models and decoders appear under the
keys ``qecsim.cli.run.codes``, ``qecsim.cli.run.error_models`` and ``qecsim.cli.run.decoders``, respectively.
Fault-tolerant compatible codes, error models and decoders appear under the keys ``qecsim.cli.run_ftp.codes``,
``qecsim.cli.run_ftp.error_models`` and ``qecsim.cli.run_ftp.decoders``, respectively.
For example, the 5-qubit code appears in ``setup.cfg`` as follows:
.. code-block:: text
[options.entry_points]
qecsim.cli.run.codes =
five_qubit = qecsim.models.basic:FiveQubitCode
Optionally, one-line descriptions for CLI help messages can be provided by decorating implementation classes with
:func:`qecsim.model.cli_description`. For example, see :class:`qecsim.models.basic.FiveQubitCode`.
"""
import ast
import inspect
import json
import logging
import re
import click
import pkg_resources
import qecsim
from qecsim import app
from qecsim import util
from qecsim.model import ATTR_CLI_DESCRIPTION
logger = logging.getLogger(__name__)
class _ConstructorParamType(click.ParamType):
"""
Constructor param type that accepts parameters in the format ``name(<args>)``.
"""
name = 'constructor'
def __init__(self, constructors):
"""
Initialise new constructor parameter type.
:param constructors: Map of constructor names to constructor functions.
:type constructors: dict of str to function
"""
self._constructors = constructors
def get_metavar(self, param):
"""See ``click.ParamType.get_metavar``"""
return '[{}]'.format('|'.join(sorted(self._constructors.keys())))
def get_missing_message(self, param):
"""See ``click.ParamType.get_missing_message``"""
return '(choose from {})'.format(', '.join(sorted(self._constructors.keys())))
def convert(self, value, param, ctx):
"""
Convert value to model instance.
If the value is correctly formatted as ``name`` or ``name(<args>)`` then:
* constructor is resolved using the constructors map.
* arguments is resolved to a tuple using a literal evaluation of args.
* instance is constructed using constructor(*arguments).
See ``click.ParamType.convert`` for more details.
:param value: Parameter value.
:type value: str
:param param: Parameter.
:type param: click.Parameter
:param ctx: Context.
:type ctx: click.Context
:return: Model instance
:rtype: object
:raises BadParameter: if the value cannot be parsed or does not correspond to valid constructor or arguments.
"""
# constructor regex match
constructor_match = re.fullmatch(r'''
# match 'toric(3,3)' as {'constructor_name': 'toric', 'constructor_args': '3,3'}
(?P<constructor_name>[\w.]+) # capture constructor_name, e.g. 'toric'
(?:\(\s* # skip opening parenthesis and leading whitespace
(?P<constructor_args>.*?) # capture constructor_args, e.g. '3,3'
,?\s*\))? # skip trailing comma, trailing whitespace and closing parenthesis
''', value, re.VERBOSE)
# check format
if constructor_match is None:
self.fail('{} (format as name(<args>))'.format(value), param, ctx)
# convert constructor_name to constructor
constructor_name = constructor_match.group('constructor_name')
if constructor_name in self._constructors.keys():
# select constructor from map
constructor = self._constructors[constructor_name]
else:
self.fail('{} (choose from {})'.format(value, ', '.join(sorted(self._constructors.keys()))), param, ctx)
# convert constructor_args to arguments tuple
constructor_args = constructor_match.group('constructor_args')
if constructor_args:
try:
# eval args as literal (add comma to force tuple)
arguments = ast.literal_eval(constructor_args + ',')
except Exception as ex:
self.fail('{} (failed to parse arguments "{}")'.format(value, ex), param, ctx)
else:
# no args -> empty tuple
arguments = tuple()
# instantiate model
try:
return constructor(*arguments)
except Exception as ex:
self.fail('{} (failed to construct "{}")'.format(value, ex), param, ctx)
def __repr__(self):
return '{}({!r})'.format(type(self).__name__, self._constructors)
# custom argument decorators
def _model_argument(model_type):
"""
Model argument function decorator.
Notes:
* This decorator is applied to a run command to add an argument of the given model type, i.e. code, error_model, or
decoder.
* The possible argument values and corresponding model constructors are loaded from setuptools entry-points, under
the key `qecsim.cli.<run-command>.<model-type>s`, e.g. `qecsim.cli.run_ftp.codes`, with value
`["<model-name> = <model-package>:<model-class>", ...]`, e.g.` ["steane = qecsim.models.basic:SteaneCode", ...]`.
* The doc-string of the run command is updated as follows:
* The placeholder `#<MODEL-TYPE>_PARAMETERS#`, e.g. `#CODE_PARAMETERS#`, is replaced by a definition list
consisting of `<model-name>` and `<cli-description>`, as specified by the model class decorator
:func:`qecsim.model.cli_description`, e.g. `planar` and `Planar (rows INT >= 2, cols INT >= 2)`.
:param model_type: The model type, i.e code, error_model, or decoder.
:type model_type: str
:return: Model argument function decorator.
:rtype: function
"""
def _decorator(func):
# extract name and class from entry-point, e.g. {'five_qubit': FiveQubitCode, ...}
entry_point_id = 'qecsim.cli.{}.{}s'.format(func.__name__, model_type) # e.g. qecsim.cli.run_ftp.codes
entry_points = sorted(pkg_resources.iter_entry_points(entry_point_id), key=lambda ep: ep.name)
constructors = {ep.name: ep.load() for ep in entry_points}
# add argument decorator
func = click.argument(model_type, type=_ConstructorParamType(constructors), metavar=model_type.upper())(func)
# extract name and cli_help, e.g. [('five_qubit', '5-qubit'), ...]
model_definition_list = [(name, getattr(cls, ATTR_CLI_DESCRIPTION, '')) for name, cls in constructors.items()]
# update __doc__
formatter = click.HelpFormatter()
formatter.indent()
if model_definition_list:
formatter.write_dl(model_definition_list)
model_doc_placeholder = '#{}_PARAMETERS#'.format(model_type.upper()) # e.g. #CODE_PARAMETERS#
func.__doc__ = inspect.getdoc(func).replace(model_doc_placeholder, formatter.getvalue())
return func
return _decorator
# custom parameter validators
def _validate_error_probability(ctx, param, value):
if not (0 <= value <= 1):
raise click.BadParameter('{} is not in [0.0, 1.0]'.format(value), ctx, param)
return value
def _validate_error_probabilities(ctx, param, value):
for v in value:
_validate_error_probability(ctx, param, v)
return value
def _validate_measurement_error_probability(ctx, param, value):
if not (value is None or (0 <= value <= 1)):
raise click.BadParameter('{} is not in [0.0, 1.0]'.format(value), ctx, param)
return value
@click.group()
@click.version_option(version=qecsim.__version__, prog_name='qecsim')
def cli():
"""
qecsim - quantum error correction simulator using stabilizer codes.
See qecsim COMMAND --help for command-specific help.
"""
util.init_logging()
@cli.command()
@_model_argument('code')
@_model_argument('error_model')
@_model_argument('decoder')
@click.argument('error_probabilities', required=True, nargs=-1, type=float, metavar='ERROR_PROBABILITY...',
callback=_validate_error_probabilities)
@click.option('--max-failures', '-f', type=click.IntRange(min=1), metavar='INT',
help='Maximum number of failures for each probability.')
@click.option('--max-runs', '-r', type=click.IntRange(min=1), metavar='INT',
help='Maximum number of runs for each probability. [default: 1 if max-failures unspecified]')
@click.option('--output', '-o', default='-', type=click.Path(allow_dash=True), metavar='FILENAME',
help='Output file. (Writes to log if file exists).')
@click.option('--random-seed', '-s', type=click.IntRange(min=0), metavar='INT',
help='Random seed for qubit error generation. (Re-applied for each probability).')
def run(code, error_model, decoder, error_probabilities, max_failures, max_runs, output, random_seed):
"""
Simulate quantum error correction.
Arguments:
\b
CODE Stabilizer code in format name(<args>)
#CODE_PARAMETERS#
\b
ERROR_MODEL Error model in format name(<args>)
#ERROR_MODEL_PARAMETERS#
\b
DECODER Decoder in format name(<args>)
#DECODER_PARAMETERS#
\b
ERROR_PROBABILITY... One or more probabilities as FLOAT in [0.0, 1.0]
Examples:
qecsim run -r10 "five_qubit" "generic.depolarizing" "generic.naive" 0.1
qecsim run -f5 -r50 -s13 "steane" "generic.phase_flip" "generic.naive" 0.1
qecsim run -r20 "planar(7,7)" "generic.bit_flip" "planar.mps(6)" 0.101 0.102 0.103
qecsim run -r10 "color666(7)" "generic.bit_flip" "color666.mps(16)" 0.09 0.10
qecsim run -o"data.json" -f9 "toric(3,3)" "generic.bit_flip" "toric.mwpm" 0.1
"""
# INPUT
code.validate()
logger.info('RUN STARTING: code={}, error_model={}, decoder={}, error_probabilities={}, max_failures={}, '
'max_runs={}, random_seed={}.'
.format(code, error_model, decoder, error_probabilities, max_failures, max_runs, random_seed))
# RUN
data = []
for error_probability in error_probabilities:
runs_data = app.run(code, error_model, decoder, error_probability,
max_runs=max_runs, max_failures=max_failures, random_seed=random_seed)
data.append(runs_data)
logger.info('RUN COMPLETE: data={}'.format(data))
# OUTPUT
_write_data(output, data)
@cli.command()
@_model_argument('code')
@click.argument('time_steps', type=click.IntRange(min=1), metavar='TIME_STEPS')
@_model_argument('error_model')
@_model_argument('decoder')
@click.argument('error_probabilities', required=True, nargs=-1, type=float, metavar='ERROR_PROBABILITY...',
callback=_validate_error_probabilities)
@click.option('--max-failures', '-f', type=click.IntRange(min=1), metavar='INT',
help='Maximum number of failures for each probability.')
@click.option('--max-runs', '-r', type=click.IntRange(min=1), metavar='INT',
help='Maximum number of runs for each probability. [default: 1 if max_failures unspecified]')
@click.option('--measurement-error-probability', '-m', type=float, default=None,
callback=_validate_measurement_error_probability,
help='Measurement error probability [default: 0.0 if TIME_STEPS == 1 else ERROR_PROBABILITY].')
@click.option('--output', '-o', default='-', type=click.Path(allow_dash=True), metavar='FILENAME',
help='Output file. (Writes to log if file exists).')
@click.option('--random-seed', '-s', type=click.IntRange(min=0), metavar='INT',
help='Random seed for qubit error generation. (Re-applied for each probability).')
def run_ftp(code, time_steps, error_model, decoder, error_probabilities, max_failures, max_runs,
measurement_error_probability, output, random_seed):
"""
Simulate fault-tolerant (time-periodic) quantum error correction.
Arguments:
\b
CODE Stabilizer code in format name(<args>)
#CODE_PARAMETERS#
\b
TIME_STEPS Number of time steps as INT >= 1
\b
ERROR_MODEL Error model in format name(<args>)
#ERROR_MODEL_PARAMETERS#
\b
DECODER Decoder in format name(<args>)
#DECODER_PARAMETERS#
\b
ERROR_PROBABILITY... One or more probabilities as FLOAT in [0.0, 1.0]
Examples:
qecsim run-ftp -r5 "rotated_planar(13,13)" 13 "generic.bit_phase_flip" "rotated_planar.smwpm" 0.1 0.2
qecsim run-ftp -r5 -m0.05 "rotated_toric(6,6)" 4 "generic.bit_phase_flip" "rotated_toric.smwpm" 0.1
qecsim run-ftp -r5 -o"data.json" "rotated_planar(7,7)" 7 "generic.depolarizing" "rotated_planar.smwpm" 0.1
"""
# INPUT
code.validate()
logger.info('RUN STARTING: code={}, time_steps={}, error_model={}, decoder={}, error_probabilities={}, '
'max_failures={}, max_runs={}, measurement_error_probability={}, random_seed={}.'
.format(code, time_steps, error_model, decoder, error_probabilities, max_failures, max_runs,
measurement_error_probability, random_seed))
# RUN
data = []
for error_probability in error_probabilities:
runs_data = app.run_ftp(code, time_steps, error_model, decoder, error_probability,
measurement_error_probability=measurement_error_probability,
max_runs=max_runs, max_failures=max_failures, random_seed=random_seed)
data.append(runs_data)
logger.info('RUN COMPLETE: data={}'.format(data))
# OUTPUT
_write_data(output, data)
@cli.command()
@click.argument('data_file', required=True, nargs=-1, type=click.Path(exists=True, file_okay=True, dir_okay=False))
@click.option('--output', '-o', default='-', type=click.Path(allow_dash=True), metavar='FILENAME',
help='Output file. (Writes to log if file exists).')
def merge(data_file, output):
"""
Merge simulation data files.
Arguments:
\b
DATA_FILE... One or more data files
Examples:
qecsim merge "data1.json" "data2.json" "data3.json"
qecsim merge -o"merged.json" data*.json
"""
# INPUT
input_data = []
# extract data from input files
for input_file in data_file:
try:
with open(input_file, 'r') as f:
input_data.append(json.load(f))
except ValueError as ex:
raise click.ClickException('{} (failed to parse JSON data "{}")'.format(input_file, ex))
# MERGE
data = app.merge(*input_data)
# OUTPUT
_write_data(output, data)
def _write_data(output, data):
"""
Write data in JSON format (sorted keys) to the given output.
Note: If the data cannot be written to the given output, for example if the file already exists, then the data is
written to stderr and an exception is raised.
:param output: Output file path or '-' for | |
desc
class MOPSFeatureDescriptor(FeatureDescriptor):
# TODO: Implement parts of this function
def describeFeatures(self, image, keypoints):
'''
Input:
image -- BGR image with values between [0, 255]
keypoints -- the detected features, we have to compute the feature
descriptors at the specified coordinates
Output:
desc -- K x W^2 numpy array, where K is the number of keypoints
and W is the window size
'''
image = image.astype(np.float32)
image /= 255.
# This image represents the window around the feature you need to
# compute to store as the feature descriptor (row-major)
windowSize = 8
desc = np.zeros((len(keypoints), windowSize * windowSize))
grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
grayImage = ndimage.gaussian_filter(grayImage, 0.5)
for i, f in enumerate(keypoints):
# TODO 5: Compute the transform as described by the feature
# location/orientation. You will need to compute the transform
# from each pixel in the 40x40 rotated window surrounding
# the feature to the appropriate pixels in the 8x8 feature
# descriptor image.
transMx = np.zeros((2, 3))
# TODO-BLOCK-BEGIN
x,y = f.pt
x,y = int(x),int(y)
T1 = transformations.get_trans_mx(np.array([-x,-y,0]))
R = transformations.get_rot_mx(0,0,-f.angle*np.pi/180)
S = transformations.get_scale_mx(0.2,0.2,1)
T2 = transformations.get_trans_mx(np.array([4,4,0]))
transMx3D = T2@R@S@T1
transMx[:,0:2] = transMx3D[:2,:2]
transMx[:,2] = transMx3D[:2,3]
#raise Exception("TODO in features.py not implemented")
# TODO-BLOCK-END
# Call the warp affine function to do the mapping
# It expects a 2x3 matrix
destImage = cv2.warpAffine(grayImage, transMx,
(windowSize, windowSize), flags=cv2.INTER_LINEAR)
# TODO 6: Normalize the descriptor to have zero mean and unit
# variance. If the variance is zero then set the descriptor
# vector to zero. Lastly, write the vector to desc.
# TODO-BLOCK-BEGIN
if np.std(destImage)<=1e-5 :
desc[i] = np.zeros_like(destImage).flatten()
else:
destImage = (destImage-np.mean(destImage))/np.std(destImage)
desc[i] = destImage.flatten()
#raise Exception("TODO in features.py not implemented")
# TODO-BLOCK-END
return desc
class ORBFeatureDescriptor(KeypointDetector):
def describeFeatures(self, image, keypoints):
'''
Input:
image -- BGR image with values between [0, 255]
keypoints -- the detected features, we have to compute the feature
descriptors at the specified coordinates
Output:
Descriptor numpy array, dimensions:
keypoint number x feature descriptor dimension
'''
descriptor = cv2.ORB_create()
kps, desc = descriptor.compute(image, keypoints)
if desc is None:
desc = np.zeros((0, 128))
return desc
# Compute Custom descriptors (extra credit)
class CustomFeatureDescriptor(FeatureDescriptor):
def describeFeatures(self, image, keypoints):
'''
Input:
image -- BGR image with values between [0, 255]
keypoints -- the detected features, we have to compute the feature
descriptors at the specified coordinates
Output:
Descriptor numpy array, dimensions:
keypoint number x feature descriptor dimension
'''
raise NotImplementedError('NOT IMPLEMENTED')
## Feature matchers ############################################################
class FeatureMatcher(object):
def matchFeatures(self, desc1, desc2):
'''
Input:
desc1 -- the feature descriptors of image 1 stored in a numpy array,
dimensions: rows (number of key points) x
columns (dimension of the feature descriptor)
desc2 -- the feature descriptors of image 2 stored in a numpy array,
dimensions: rows (number of key points) x
columns (dimension of the feature descriptor)
Output:
features matches: a list of cv2.DMatch objects
How to set attributes:
queryIdx: The index of the feature in the first image
trainIdx: The index of the feature in the second image
distance: The distance between the two features
'''
raise NotImplementedError
# Evaluate a match using a ground truth homography. This computes the
# average SSD distance between the matched feature points and
# the actual transformed positions.
@staticmethod
def evaluateMatch(features1, features2, matches, h):
d = 0
n = 0
for m in matches:
id1 = m.queryIdx
id2 = m.trainIdx
ptOld = np.array(features2[id2].pt)
ptNew = FeatureMatcher.applyHomography(features1[id1].pt, h)
# Euclidean distance
d += np.linalg.norm(ptNew - ptOld)
n += 1
return d / n if n != 0 else 0
# Transform point by homography.
@staticmethod
def applyHomography(pt, h):
x, y = pt
d = h[6]*x + h[7]*y + h[8]
return np.array([(h[0]*x + h[1]*y + h[2]) / d,
(h[3]*x + h[4]*y + h[5]) / d])
"""class SSDFeatureMatcher(FeatureMatcher):
def matchFeatures(self, desc1, desc2):
'''
Input:
desc1 -- the feature descriptors of image 1 stored in a numpy array,
dimensions: rows (number of key points) x
columns (dimension of the feature descriptor)
desc2 -- the feature descriptors of image 2 stored in a numpy array,
dimensions: rows (number of key points) x
columns (dimension of the feature descriptor)
Output:
features matches: a list of cv2.DMatch objects
How to set attributes:
queryIdx: The index of the feature in the first image
trainIdx: The index of the feature in the second image
distance: The distance between the two features
'''
matches = []
# feature count = n
assert desc1.ndim == 2
# feature count = m
assert desc2.ndim == 2
# the two features should have the type
assert desc1.shape[1] == desc2.shape[1]
if desc1.shape[0] == 0 or desc2.shape[0] == 0:
return []
# TODO 7: Perform simple feature matching. This uses the SSD
# distance between two feature vectors, and matches a feature in
# the first image with the closest feature in the second image.
# Note: multiple features from the first image may match the same
# feature in the second image.
# TODO-BLOCK-BEGIN
for i in range(len(desc1)):
dist = np.array([desc1[i]])
temp = scipy.spatial.distance.cdist(dist, desc2)
sort_distance = sorted(temp[0])
min = list(temp[0]).index(sort_distance[0])
DM = cv2.DMatch()
DM.queryIdx = i
DM.trainIdx = min
DM.distance = temp[0][min]
matches.append(DM)
# raise Exception("TODO in features.py not implemented")
# TODO-BLOCK-END
return matches
"""
class SSDFeatureMatcher(FeatureMatcher):
def matchFeatures(self, desc1, desc2):
'''
Input:
desc1 -- the feature descriptors of image 1 stored in a numpy array,
dimensions: rows (number of key points) x
columns (dimension of the feature descriptor)
desc2 -- the feature descriptors of image 2 stored in a numpy array,
dimensions: rows (number of key points) x
columns (dimension of the feature descriptor)
Output:
features matches: a list of cv2.DMatch objects
How to set attributes:
queryIdx: The index of the feature in the first image
trainIdx: The index of the feature in the second image
distance: The distance between the two features
'''
matches = []
# feature count = n
assert desc1.ndim == 2
# feature count = m
assert desc2.ndim == 2
# the two features should have the type
assert desc1.shape[1] == desc2.shape[1]
if desc1.shape[0] == 0 or desc2.shape[0] == 0:
return []
# TODO 7: Perform simple feature matching. This uses the SSD
# distance between two feature vectors, and matches a feature in
# the first image with the closest feature in the second image.
# Note: multiple features from the first image may match the same
# feature in the second image.
# TODO-BLOCK-BEGIN
distanceMx = scipy.spatial.distance.cdist(desc1,desc2)
distMin = list(np.argmin(distanceMx,axis = 1))
for i in range(desc1.shape[0]):
DM = cv2.DMatch()
DM.queryIdx = i
DM.trainIdx = int(distMin[i])
DM.distance = distanceMx[i,distMin[i]]
matches.append(DM)
#raise Exception("TODO in features.py not implemented")
# TODO-BLOCK-END
return matches
class RatioFeatureMatcher(FeatureMatcher):
def matchFeatures(self, desc1, desc2):
'''
Input:
desc1 -- the feature descriptors of image 1 stored in a numpy array,
dimensions: rows (number of key points) x
columns (dimension of the feature descriptor)
desc2 -- the feature descriptors of image 2 stored in a numpy array,
dimensions: rows (number of key points) x
columns (dimension of the feature descriptor)
Output:
features matches: a list of cv2.DMatch objects
How to set attributes:
queryIdx: The index of the feature in the first image
trainIdx: The index of the feature in the second image
distance: The ratio test score
'''
matches = []
# feature count = n
assert desc1.ndim == 2
# feature count = m
assert desc2.ndim == 2
# the two features should have the type
assert desc1.shape[1] == desc2.shape[1]
if desc1.shape[0] == 0 or desc2.shape[0] == 0:
return []
# TODO 8: Perform ratio feature matching.
# This uses the ratio of the SSD distance of the two best matches
# and matches a feature in the first image with the closest feature in the
# second image.
# Note: multiple features from the first image may match the same
# feature in the second | |
<gh_stars>0
import os
import numpy as np
import h5py
from audiomate.feeding import PartitioningFeatureIterator
from audiomate.feeding import partitioning
from audiomate import containers
import pytest
class TestPartitioningContainerLoader:
def test_scan_computes_correct_size_for_one_container(self, tmpdir):
c1 = containers.Container(os.path.join(tmpdir.strpath, 'c1.h5'))
c1.open()
c1.set('utt-1', np.random.random((6, 6)).astype(np.float32))
c1.set('utt-2', np.random.random((2, 6)).astype(np.float32))
c1.set('utt-3', np.random.random((9, 6)).astype(np.float32))
loader = partitioning.PartitioningContainerLoader(['utt-1', 'utt-2', 'utt-3'],
c1, '250', shuffle=True, seed=88)
sizes = loader._scan()
assert sizes == {
'utt-1': 6 * 6 * np.dtype(np.float32).itemsize,
'utt-2': 2 * 6 * np.dtype(np.float32).itemsize,
'utt-3': 9 * 6 * np.dtype(np.float32).itemsize
}
def test_scan_computes_correct_size_for_multiple_containers(self, tmpdir):
c1 = containers.Container(os.path.join(tmpdir.strpath, 'c1.h5'))
c2 = containers.Container(os.path.join(tmpdir.strpath, 'c2.h5'))
c3 = containers.Container(os.path.join(tmpdir.strpath, 'c3.h5'))
c1.open()
c1.set('utt-1', np.random.random((6, 6)).astype(np.float32))
c1.set('utt-2', np.random.random((2, 6)).astype(np.float32))
c1.set('utt-3', np.random.random((9, 6)).astype(np.float32))
c2.open()
c2.set('utt-1', np.random.random((2, 6)).astype(np.float32))
c2.set('utt-2', np.random.random((1, 6)).astype(np.float32))
c2.set('utt-3', np.random.random((4, 6)).astype(np.float32))
c3.open()
c3.set('utt-1', np.random.random((1, 6)).astype(np.float32))
c3.set('utt-2', np.random.random((3, 6)).astype(np.float32))
c3.set('utt-3', np.random.random((8, 6)).astype(np.float32))
loader = partitioning.PartitioningContainerLoader(['utt-1', 'utt-2', 'utt-3'],
[c1, c2, c3], '1000', shuffle=True, seed=88)
sizes = loader._scan()
assert sizes == {
'utt-1': (6 + 2 + 1) * 6 * np.dtype(np.float32).itemsize,
'utt-2': (2 + 1 + 3) * 6 * np.dtype(np.float32).itemsize,
'utt-3': (9 + 4 + 8) * 6 * np.dtype(np.float32).itemsize
}
def test_get_lengths_returns_correct_lengths_for_multiple_containers(self, tmpdir):
c1 = containers.Container(os.path.join(tmpdir.strpath, 'c1.h5'))
c2 = containers.Container(os.path.join(tmpdir.strpath, 'c2.h5'))
c3 = containers.Container(os.path.join(tmpdir.strpath, 'c3.h5'))
c1.open()
c1.set('utt-1', np.random.random((6, 6)).astype(np.float32))
c1.set('utt-2', np.random.random((2, 6)).astype(np.float32))
c1.set('utt-3', np.random.random((9, 6)).astype(np.float32))
c2.open()
c2.set('utt-1', np.random.random((2, 6)).astype(np.float32))
c2.set('utt-2', np.random.random((1, 6)).astype(np.float32))
c2.set('utt-3', np.random.random((4, 6)).astype(np.float32))
c3.open()
c3.set('utt-1', np.random.random((1, 6)).astype(np.float32))
c3.set('utt-2', np.random.random((3, 6)).astype(np.float32))
c3.set('utt-3', np.random.random((8, 6)).astype(np.float32))
loader = partitioning.PartitioningContainerLoader(['utt-1', 'utt-2', 'utt-3'],
[c1, c2, c3], '1000', shuffle=True, seed=88)
lengths = loader._get_all_lengths()
assert len(lengths) == 3
assert lengths['utt-1'] == (6, 2, 1)
assert lengths['utt-2'] == (2, 1, 3)
assert lengths['utt-3'] == (9, 4, 8)
def test_raises_error_if_utt_is_missing_in_container(self, tmpdir):
c1 = containers.Container(os.path.join(tmpdir.strpath, 'c1.h5'))
c1.open()
c1.set('utt-1', np.random.random((6, 6)).astype(np.float32))
c1.set('utt-3', np.random.random((9, 6)).astype(np.float32))
with pytest.raises(ValueError):
partitioning.PartitioningContainerLoader(['utt-1', 'utt-2', 'utt-3'],
c1, '250', shuffle=True, seed=88)
def test_raises_error_if_utt_is_to_large_for_partition_size(self, tmpdir):
c1 = containers.Container(os.path.join(tmpdir.strpath, 'c1.h5'))
c1.open()
c1.set('utt-1', np.random.random((6, 6)).astype(np.float32))
# Needs 264
c1.set('utt-3', np.random.random((11, 6)).astype(np.float32))
with pytest.raises(ValueError):
partitioning.PartitioningContainerLoader(['utt-1', 'utt-3'],
c1, '250', shuffle=True, seed=88)
def test_reload_creates_correct_partitions(self, tmpdir):
c1 = containers.Container(os.path.join(tmpdir.strpath, 'c1.h5'))
c1.open()
c1.set('utt-1', np.random.random((6, 6)).astype(np.float32))
c1.set('utt-2', np.random.random((2, 6)).astype(np.float32))
c1.set('utt-3', np.random.random((9, 6)).astype(np.float32))
c1.set('utt-4', np.random.random((2, 6)).astype(np.float32))
c1.set('utt-5', np.random.random((5, 6)).astype(np.float32))
loader = partitioning.PartitioningContainerLoader(['utt-1', 'utt-2', 'utt-3', 'utt-4', 'utt-5'],
c1, '250', shuffle=False)
assert len(loader.partitions) == 3
assert loader.partitions[0].utt_ids == ['utt-1', 'utt-2']
assert loader.partitions[0].utt_lengths == [(6,), (2,)]
assert loader.partitions[0].size == 192
assert loader.partitions[1].utt_ids == ['utt-3']
assert loader.partitions[1].utt_lengths == [(9,)]
assert loader.partitions[1].size == 216
assert loader.partitions[2].utt_ids == ['utt-4', 'utt-5']
assert loader.partitions[2].utt_lengths == [(2,), (5,)]
assert loader.partitions[2].size == 168
def test_reload_creates_no_partition_with_no_utterances(self, tmpdir):
c1 = containers.Container(os.path.join(tmpdir.strpath, 'c1.h5'))
c1.open()
loader = partitioning.PartitioningContainerLoader([], c1, '250', shuffle=False)
assert len(loader.partitions) == 0
def test_reload_creates_different_partitions_on_second_run(self, tmpdir):
c1 = containers.Container(os.path.join(tmpdir.strpath, 'c1.h5'))
c1.open()
c1.set('utt-1', np.random.random((6, 6)).astype(np.float32))
c1.set('utt-2', np.random.random((2, 6)).astype(np.float32))
c1.set('utt-3', np.random.random((9, 6)).astype(np.float32))
c1.set('utt-4', np.random.random((2, 6)).astype(np.float32))
c1.set('utt-5', np.random.random((5, 6)).astype(np.float32))
loader = partitioning.PartitioningContainerLoader(['utt-1', 'utt-2', 'utt-3', 'utt-4', 'utt-5'],
c1, '250', shuffle=True, seed=100)
partitions_one = loader.partitions
loader.reload()
partitions_two = loader.partitions
len_changed = len(partitions_one) == len(partitions_two)
if len_changed:
assert True
else:
utt_ids_changed = False
for x, y in zip(partitions_one, partitions_two):
if x.utt_ids != y.utt_ids:
utt_ids_changed = True
assert utt_ids_changed
def test_load_partition_data(self, tmpdir):
c1 = containers.Container(os.path.join(tmpdir.strpath, 'c1.h5'))
c1.open()
utt_1_data = np.random.random((6, 6)).astype(np.float32)
utt_2_data = np.random.random((2, 6)).astype(np.float32)
utt_3_data = np.random.random((9, 6)).astype(np.float32)
utt_4_data = np.random.random((2, 6)).astype(np.float32)
utt_5_data = np.random.random((5, 6)).astype(np.float32)
c1.set('utt-1', utt_1_data)
c1.set('utt-2', utt_2_data)
c1.set('utt-3', utt_3_data)
c1.set('utt-4', utt_4_data)
c1.set('utt-5', utt_5_data)
loader = partitioning.PartitioningContainerLoader(['utt-1', 'utt-2', 'utt-3', 'utt-4', 'utt-5'],
c1, '250', shuffle=False)
part_1 = loader.load_partition_data(0)
assert part_1.info.utt_ids == ['utt-1', 'utt-2']
assert np.allclose(part_1.utt_data[0], utt_1_data)
assert np.allclose(part_1.utt_data[1], utt_2_data)
part_2 = loader.load_partition_data(1)
assert part_2.info.utt_ids == ['utt-3']
assert np.allclose(part_2.utt_data[0], utt_3_data)
part_3 = loader.load_partition_data(2)
assert part_3.info.utt_ids == ['utt-4', 'utt-5']
assert np.allclose(part_3.utt_data[0], utt_4_data)
assert np.allclose(part_3.utt_data[1], utt_5_data)
class TestPartitionInfo:
def test_total_length_for_single_container(self):
info = partitioning.PartitionInfo()
info.utt_lengths = [(1,), (9,), (13,)]
assert info.total_lengths() == (23,)
def test_total_length_for_multiple_containers(self):
info = partitioning.PartitionInfo()
info.utt_lengths = [(1, 4), (9, 5), (13, 8)]
assert info.total_lengths() == (23, 17)
class TestPartitioningFeatureIterator(object):
def test_next_emits_no_features_if_file_is_empty(self, tmpdir):
file_path = os.path.join(tmpdir.strpath, 'features.h5')
file = h5py.File(file_path, 'w')
features = tuple(PartitioningFeatureIterator(file, 120))
assert 0 == len(features)
def test_next_emits_no_features_if_data_set_is_empty(self, tmpdir):
file_path = os.path.join(tmpdir.strpath, 'features.h5')
file = h5py.File(file_path, 'w')
file.create_dataset('utt-1', data=np.array([]))
features = tuple(PartitioningFeatureIterator(file, 120))
assert 0 == len(features)
def test_next_emits_all_features_in_sequential_order(self, tmpdir):
ds1 = np.array([[0.1, 0.1, 0.1, 0.1, 0.1], [0.2, 0.2, 0.2, 0.2, 0.2]])
ds2 = np.array([[0.3, 0.3, 0.3, 0.3, 0.3], [0.4, 0.4, 0.4, 0.4, 0.4], [0.5, 0.5, 0.5, 0.5, 0.5]])
file_path = os.path.join(tmpdir.strpath, 'features.h5')
file = h5py.File(file_path, 'w')
file.create_dataset('utt-1', data=ds1)
file.create_dataset('utt-2', data=ds2)
features = tuple(PartitioningFeatureIterator(file, 120, shuffle=False))
assert 5 == len(features)
self.assert_features_equal(('utt-1', 0, [0.1, 0.1, 0.1, 0.1, 0.1]), features[0])
self.assert_features_equal(('utt-1', 1, [0.2, 0.2, 0.2, 0.2, 0.2]), features[1])
self.assert_features_equal(('utt-2', 0, [0.3, 0.3, 0.3, 0.3, 0.3]), features[2])
self.assert_features_equal(('utt-2', 1, [0.4, 0.4, 0.4, 0.4, 0.4]), features[3])
self.assert_features_equal(('utt-2', 2, [0.5, 0.5, 0.5, 0.5, 0.5]), features[4])
def test_next_emits_all_features_in_random_order(self, tmpdir):
ds1 = np.array([[0.1, 0.1, 0.1, 0.1, 0.1], [0.2, 0.2, 0.2, 0.2, 0.2]])
ds2 = np.array([[0.3, 0.3, 0.3, 0.3, 0.3], [0.4, 0.4, 0.4, 0.4, 0.4], [0.5, 0.5, 0.5, 0.5, 0.5]])
ds3 = np.array([[0.6, 0.6, 0.6, 0.6, 0.6]])
file_path = os.path.join(tmpdir.strpath, 'features.h5')
file = h5py.File(file_path, 'w')
file.create_dataset('utt-1', data=ds1)
file.create_dataset('utt-2', data=ds2)
file.create_dataset('utt-3', data=ds3)
features = tuple(PartitioningFeatureIterator(file, 120, shuffle=True, seed=16))
assert 6 == len(features)
self.assert_features_equal(('utt-1', 1, [0.2, 0.2, 0.2, 0.2, 0.2]), features[0])
self.assert_features_equal(('utt-3', 0, [0.6, 0.6, 0.6, 0.6, 0.6]), features[1])
self.assert_features_equal(('utt-1', 0, [0.1, 0.1, 0.1, 0.1, 0.1]), features[2])
self.assert_features_equal(('utt-2', 2, [0.5, 0.5, 0.5, 0.5, 0.5]), features[3])
self.assert_features_equal(('utt-2', 0, [0.3, 0.3, 0.3, 0.3, 0.3]), features[4])
self.assert_features_equal(('utt-2', 1, [0.4, 0.4, 0.4, 0.4, 0.4]), features[5])
def test_next_emits_features_only_from_included_ds_in_sequential_order(self, tmpdir):
ds1 = np.array([[0.1, 0.1, 0.1, 0.1, 0.1], [0.2, 0.2, 0.2, 0.2, 0.2]])
ds2 = np.array([[0.3, 0.3, 0.3, 0.3, 0.3], [0.4, 0.4, 0.4, 0.4, 0.4], [0.5, 0.5, 0.5, 0.5, 0.5]])
ds3 = np.array([[0.6, 0.6, 0.6, 0.6, 0.6], [0.7, 0.7, 0.7, 0.7, 0.7]])
file_path = os.path.join(tmpdir.strpath, 'features.h5')
file = h5py.File(file_path, 'w')
file.create_dataset('utt-1', data=ds1)
file.create_dataset('utt-2', data=ds2)
file.create_dataset('utt-3', data=ds3)
features = tuple(PartitioningFeatureIterator(file, 120, shuffle=False, includes=['utt-1', 'utt-3', 'unknown']))
assert 4 == len(features)
self.assert_features_equal(('utt-1', 0, [0.1, 0.1, 0.1, 0.1, 0.1]), features[0])
self.assert_features_equal(('utt-1', 1, [0.2, 0.2, 0.2, 0.2, 0.2]), features[1])
self.assert_features_equal(('utt-3', 0, [0.6, 0.6, 0.6, 0.6, 0.6]), features[2])
self.assert_features_equal(('utt-3', 1, [0.7, 0.7, 0.7, 0.7, 0.7]), features[3])
def test_next_emits_features_only_from_included_ds_in_random_order(self, tmpdir):
ds1 = np.array([[0.1, 0.1, 0.1, 0.1, 0.1], [0.2, 0.2, 0.2, 0.2, 0.2]])
ds2 = np.array([[0.3, 0.3, 0.3, 0.3, 0.3], [0.4, 0.4, 0.4, 0.4, 0.4], [0.5, 0.5, 0.5, 0.5, 0.5]])
ds3 = np.array([[0.6, 0.6, 0.6, 0.6, 0.6], [0.7, 0.7, 0.7, 0.7, 0.7]])
file_path = os.path.join(tmpdir.strpath, 'features.h5')
file = h5py.File(file_path, 'w')
file.create_dataset('utt-1', data=ds1)
file.create_dataset('utt-2', data=ds2)
file.create_dataset('utt-3', data=ds3)
features = tuple(PartitioningFeatureIterator(file, 120, shuffle=True, seed=16,
includes=['utt-1', 'utt-3', 'unknown']))
assert 4 == len(features)
self.assert_features_equal(('utt-3', 0, [0.6, 0.6, 0.6, 0.6, 0.6]), features[0])
self.assert_features_equal(('utt-1', 0, [0.1, 0.1, 0.1, 0.1, 0.1]), features[1])
self.assert_features_equal(('utt-1', 1, [0.2, 0.2, 0.2, 0.2, 0.2]), features[2])
self.assert_features_equal(('utt-3', 1, [0.7, 0.7, 0.7, 0.7, 0.7]), features[3])
def test_next_emits_features_without_excluded_in_sequential_order(self, tmpdir):
ds1 = np.array([[0.1, 0.1, 0.1, 0.1, 0.1], [0.2, 0.2, 0.2, 0.2, 0.2]])
ds2 = np.array([[0.3, 0.3, 0.3, 0.3, 0.3], [0.4, 0.4, 0.4, 0.4, 0.4], [0.5, 0.5, 0.5, 0.5, 0.5]])
ds3 = np.array([[0.6, 0.6, 0.6, 0.6, 0.6], [0.7, 0.7, 0.7, 0.7, 0.7]])
file_path = os.path.join(tmpdir.strpath, 'features.h5')
file = h5py.File(file_path, 'w')
file.create_dataset('utt-1', data=ds1)
file.create_dataset('utt-2', data=ds2)
file.create_dataset('utt-3', data=ds3)
features = tuple(PartitioningFeatureIterator(file, 120, shuffle=False, excludes=['utt-2', 'unknown']))
assert 4 == len(features)
self.assert_features_equal(('utt-1', 0, [0.1, 0.1, 0.1, 0.1, 0.1]), features[0])
self.assert_features_equal(('utt-1', 1, [0.2, 0.2, 0.2, 0.2, 0.2]), features[1])
self.assert_features_equal(('utt-3', 0, [0.6, 0.6, 0.6, 0.6, 0.6]), features[2])
self.assert_features_equal(('utt-3', 1, [0.7, 0.7, 0.7, 0.7, 0.7]), features[3])
def test_next_emits_features_without_excluded_in_random_order(self, tmpdir):
ds1 = np.array([[0.1, 0.1, 0.1, 0.1, 0.1], [0.2, 0.2, 0.2, 0.2, 0.2]])
ds2 = np.array([[0.3, 0.3, 0.3, 0.3, 0.3], [0.4, 0.4, 0.4, 0.4, 0.4], [0.5, 0.5, 0.5, 0.5, 0.5]])
ds3 = np.array([[0.6, 0.6, 0.6, 0.6, 0.6], [0.7, 0.7, 0.7, 0.7, 0.7]])
file_path = os.path.join(tmpdir.strpath, 'features.h5')
file = h5py.File(file_path, 'w')
file.create_dataset('utt-1', data=ds1)
file.create_dataset('utt-2', data=ds2)
file.create_dataset('utt-3', data=ds3)
features = tuple(PartitioningFeatureIterator(file, 120, shuffle=True, seed=16, excludes=['utt-2', 'unknown']))
assert 4 == len(features)
self.assert_features_equal(('utt-3', 0, [0.6, 0.6, 0.6, 0.6, 0.6]), features[0])
self.assert_features_equal(('utt-1', 0, [0.1, 0.1, 0.1, 0.1, 0.1]), features[1])
self.assert_features_equal(('utt-1', 1, [0.2, 0.2, 0.2, 0.2, 0.2]), features[2])
self.assert_features_equal(('utt-3', 1, [0.7, 0.7, 0.7, 0.7, 0.7]), features[3])
def test_next_emits_features_only_from_included_ds_ignoring_filter_in_sequential_order(self, tmpdir):
ds1 = np.array([[0.1, 0.1, 0.1, 0.1, 0.1], [0.2, 0.2, 0.2, 0.2, 0.2]])
ds2 = np.array([[0.3, 0.3, 0.3, 0.3, 0.3], [0.4, 0.4, 0.4, 0.4, 0.4], [0.5, 0.5, 0.5, 0.5, 0.5]])
ds3 = np.array([[0.6, 0.6, 0.6, 0.6, 0.6], [0.7, 0.7, 0.7, 0.7, 0.7]])
file_path = os.path.join(tmpdir.strpath, 'features.h5')
file = h5py.File(file_path, 'w')
| |
College of Applied Technology-Morristown","Tennessee College of Applied Technology-Morristown"),
("Tennessee College of Applied Technology-Murfreesboro","Tennessee College of Applied Technology-Murfreesboro"),
("Tennessee College of Applied Technology-Nashville","Tennessee College of Applied Technology-Nashville"),
("Tennessee College of Applied Technology-Newbern","Tennessee College of Applied Technology-Newbern"),
("Tennessee College of Applied Technology-Oneida-Huntsville","Tennessee College of Applied Technology-Oneida-Huntsville"),
("Tennessee College of Applied Technology-Paris","Tennessee College of Applied Technology-Paris"),
("Tennessee College of Applied Technology-Pulaski","Tennessee College of Applied Technology-Pulaski"),
("Tennessee College of Applied Technology-Ripley","Tennessee College of Applied Technology-Ripley"),
("Tennessee College of Applied Technology-Shelbyville","Tennessee College of Applied Technology-Shelbyville"),
("Tennessee College of Applied Technology-Whiteville","Tennessee College of Applied Technology-Whiteville"),
("Tennessee School of Beauty of Knoxville Inc","Tennessee School of Beauty of Knoxville Inc"),
("Tennessee State University","Tennessee State University"),
("Tennessee Technological University","Tennessee Technological University"),
("Tennessee Temple University","Tennessee Temple University"),
("Tennessee Wesleyan College","Tennessee Wesleyan College"),
("Terra State Community College","Terra State Community College"),
("Teterboro School of Aeronautics","Teterboro School of Aeronautics"),
("Texarkana College","Texarkana College"),
("Texas A & M International University","Texas A & M International University"),
("Texas A & M University Health Science Center","Texas A & M University Health Science Center"),
("Texas A & M University-Central Texas","Texas A & M University-Central Texas"),
("Texas A & M University-College Station","Texas A & M University-College Station"),
("Texas A & M University-Commerce","Texas A & M University-Commerce"),
("Texas A & M University-Corpus Christi","Texas A & M University-Corpus Christi"),
("Texas A & M University-Galveston","Texas A & M University-Galveston"),
("Texas A & M University-Kingsville","Texas A & M University-Kingsville"),
("Texas A & M University-System Office","Texas A & M University-System Office"),
("Texas A & M University-Texarkana","Texas A & M University-Texarkana"),
("Texas Barber Colleges and Hairstyling Schools","Texas Barber Colleges and Hairstyling Schools"),
("Texas Beauty College","Texas Beauty College"),
("Texas Chiropractic College Foundation Inc","Texas Chiropractic College Foundation Inc"),
("Texas Christian University","Texas Christian University"),
("Texas College of Cosmetology-Abilene","Texas College of Cosmetology-Abilene"),
("Texas College of Cosmetology-San Angelo","Texas College of Cosmetology-San Angelo"),
("Texas College","Texas College"),
("Texas County Technical College","Texas County Technical College"),
("Texas Covenant Education","Texas Covenant Education"),
("Texas Health School","Texas Health School"),
("Texas Health and Science University","Texas Health and Science University"),
("Texas Lutheran University","Texas Lutheran University"),
("Texas School of Business-East","Texas School of Business-East"),
("Texas School of Business-Friendswood","Texas School of Business-Friendswood"),
("Texas School of Business-North Campus","Texas School of Business-North Campus"),
("Texas School of Business-Southwest","Texas School of Business-Southwest"),
("Texas Southern University","Texas Southern University"),
("Texas State Technical College-Harlingen","Texas State Technical College-Harlingen"),
("Texas State Technical College-Marshall","Texas State Technical College-Marshall"),
("Texas State Technical College-System","Texas State Technical College-System"),
("Texas State Technical College-Waco","Texas State Technical College-Waco"),
("Texas State Technical College-West Texas","Texas State Technical College-West Texas"),
("Texas State University","Texas State University"),
("Texas Tech University Health Sciences Center","Texas Tech University Health Sciences Center"),
("Texas Tech University System Administration","Texas Tech University System Administration"),
("Texas Tech University","Texas Tech University"),
("Texas Vocational Schools Inc","Texas Vocational Schools Inc"),
("Texas Wesleyan University","Texas Wesleyan University"),
("Texas Woman's University","Texas Woman's University"),
("Thaddeus Stevens College of Technology","Thaddeus Stevens College of Technology"),
("Thanh Le College School of Cosmetology","Thanh Le College School of Cosmetology"),
("The Academy Waukesha","The Academy Waukesha"),
("The Academy of Hair Design Six","The Academy of Hair Design Six"),
("The Academy of Radio and TV Broadcasting","The Academy of Radio and TV Broadcasting"),
("The Ailey School","The Ailey School"),
("The Art Institute of Atlanta","The Art Institute of Atlanta"),
("The Art Institute of Austin","The Art Institute of Austin"),
("The Art Institute of California-Argosy University Hollywood","The Art Institute of California-Argosy University Hollywood"),
("The Art Institute of California-Argosy University Inland Empire","The Art Institute of California-Argosy University Inland Empire"),
("The Art Institute of California-Argosy University Los Angeles","The Art Institute of California-Argosy University Los Angeles"),
("The Art Institute of California-Argosy University Orange County","The Art Institute of California-Argosy University Orange County"),
("The Art Institute of California-Argosy University Sacramento","The Art Institute of California-Argosy University Sacramento"),
("The Art Institute of California-Argosy University San Diego","The Art Institute of California-Argosy University San Diego"),
("The Art Institute of California-Argosy University San Francisco","The Art Institute of California-Argosy University San Francisco"),
("The Art Institute of California-Argosy University-Silicon Valley","The Art Institute of California-Argosy University-Silicon Valley"),
("The Art Institute of Charleston","The Art Institute of Charleston"),
("The Art Institute of Charlotte","The Art Institute of Charlotte"),
("The Art Institute of Cincinnati","The Art Institute of Cincinnati"),
("The Art Institute of Colorado","The Art Institute of Colorado"),
("The Art Institute of Fort Lauderdale","The Art Institute of Fort Lauderdale"),
("The Art Institute of Houston","The Art Institute of Houston"),
("The Art Institute of Indianapolis","The Art Institute of Indianapolis"),
("The Art Institute of Las Vegas","The Art Institute of Las Vegas"),
("The Art Institute of Michigan","The Art Institute of Michigan"),
("The Art Institute of New York City","The Art Institute of New York City"),
("The Art Institute of Ohio-Cincinnati","The Art Institute of Ohio-Cincinnati"),
("The Art Institute of Philadelphia","The Art Institute of Philadelphia"),
("The Art Institute of Phoenix","The Art Institute of Phoenix"),
("The Art Institute of Pittsburgh","The Art Institute of Pittsburgh"),
("The Art Institute of Pittsburgh-Online Division","The Art Institute of Pittsburgh-Online Division"),
("The Art Institute of Portland","The Art Institute of Portland"),
("The Art Institute of Raleigh-Durham","The Art Institute of Raleigh-Durham"),
("The Art Institute of Salt Lake City","The Art Institute of Salt Lake City"),
("The Art Institute of San Antonio","The Art Institute of San Antonio"),
("The Art Institute of Seattle","The Art Institute of Seattle"),
("The Art Institute of St Louis","The Art Institute of St Louis"),
("The Art Institute of Tennessee-Nashville","The Art Institute of Tennessee-Nashville"),
("The Art Institute of Tucson","The Art Institute of Tucson"),
("The Art Institute of Virginia Beach","The Art Institute of Virginia Beach"),
("The Art Institute of Washington","The Art Institute of Washington"),
("The Art Institute of Washington-Dulles","The Art Institute of Washington-Dulles"),
("The Art Institute of Wisconsin","The Art Institute of Wisconsin"),
("The Art Institutes International-Minnesota","The Art Institutes International-Minnesota"),
("The Art Institutes International�Kansas City","The Art Institutes International�Kansas City"),
("The Art Institutes of York-PA","The Art Institutes of York-PA"),
("The Artisan College of Cosmetology","The Artisan College of Cosmetology"),
("The Baptist College of Florida","The Baptist College of Florida"),
("The Barber School","The Barber School"),
("The Beauty Institute","The Beauty Institute"),
("The Boston Conservatory","The Boston Conservatory"),
("The Bryman School of Arizona","The Bryman School of Arizona"),
("The Chicago School of Professional Psychology at Chicago","The Chicago School of Professional Psychology at Chicago"),
("The Chicago School of Professional Psychology at Irvine","The Chicago School of Professional Psychology at Irvine"),
("The Chicago School of Professional Psychology at Los Angeles","The Chicago School of Professional Psychology at Los Angeles"),
("The Chicago School of Professional Psychology at Washington DC","The Chicago School of Professional Psychology at Washington DC"),
("The Chicago School of Professional Psychology at Westwood","The Chicago School of Professional Psychology at Westwood"),
("The Christ College of Nursing and Health Sciences","The Christ College of Nursing and Health Sciences"),
("The Collective School Of Music","The Collective School Of Music"),
("The College of Health Care Professions-Austin","The College of Health Care Professions-Austin"),
("The College of Health Care Professions-Dallas","The College of Health Care Professions-Dallas"),
("The College of Health Care Professions-Fort Worth","The College of Health Care Professions-Fort Worth"),
("The College of Health Care Professions-San Antonio","The College of Health Care Professions-San Antonio"),
("The College of Health Care Professions-Southwest Houston","The College of Health Care Professions-Southwest Houston"),
("The College of Idaho","The College of Idaho"),
("The College of New Jersey","The College of New Jersey"),
("The College of New Rochelle","The College of New Rochelle"),
("The College of Office Technology","The College of Office Technology"),
("The College of Saint Rose","The College of Saint Rose"),
("The College of Saint Scholastica","The College of Saint Scholastica"),
("The College of Westchester","The College of Westchester"),
("The College of Wooster","The College of Wooster"),
("The Colorlab Academy of Hair","The Colorlab Academy of Hair"),
("The Commonwealth Medical College","The Commonwealth Medical College"),
("The Community College of Baltimore County","The Community College of Baltimore County"),
("The Creative Center","The Creative Center"),
("The Creative Circus","The Creative Circus"),
("The Dickinson School of Law of the Pennsylvania State University","The Dickinson School of Law of the Pennsylvania State University"),
("The English Center","The English Center"),
("The Evergreen State College","The Evergreen State College"),
("The General Theological Seminary","The General Theological Seminary"),
("The Hair Academy Inc","The Hair Academy Inc"),
("The Hair Academy","The Hair Academy"),
("The Hair Design School-Charlotte","The Hair Design School-Charlotte"),
("The Hair Design School-Durham","The Hair Design School-Durham"),
("The Hair Design School-E Greensboro","The Hair Design School-E Greensboro"),
("The Hair Design School-N Memphis","The Hair Design School-N Memphis"),
("The Hair Design School-S Memphis","The Hair Design School-S Memphis"),
("The Hair Design School-Winston-Salem","The Hair Design School-Winston-Salem"),
("The Hair School","The Hair School"),
("The Illinois Institute of Art-Chicago","The Illinois Institute of Art-Chicago"),
("The Illinois Institute of Art-Schaumburg","The Illinois Institute of Art-Schaumburg"),
("The Institute for Health Education","The Institute for Health Education"),
("The Institute of Beauty and Wellness","The Institute of Beauty and Wellness"),
("The International Culinary Center","The International Culinary Center"),
("The John Marshall Law School","The John Marshall Law School"),
("The Juilliard School","The Juilliard School"),
("The King�s College","The King�s College"),
("The Lab-Paul Mitchell Partner School","The Lab-Paul Mitchell Partner School"),
("The Landing School","The Landing School"),
("The Leon Institute of Hair Design","The Leon Institute of Hair Design"),
("The Master's College and Seminary","The Master's College and Seminary"),
("The Medical Arts School","The Medical Arts School"),
("The National Hispanic University","The National | |
"""
gamd.py: Implements the GaMD integration method.
Portions copyright (c) 2020 University of Kansas
Authors: <NAME>, <NAME>
Contributors: <NAME>
"""
from __future__ import absolute_import
__author__ = "<NAME>"
__version__ = "1.0"
from simtk import unit as unit
from abc import ABCMeta, ABC
from abc import abstractmethod
from ..stage_integrator import GamdStageIntegrator
from ..stage_integrator import BoostType
class GamdLangevinIntegrator(GamdStageIntegrator, ABC):
def __init__(self, system_group, group_name,
dt=2.0 * unit.femtoseconds, ntcmdprep=200000, ntcmd=1000000,
ntebprep=200000, nteb=1000000, nstlim=3000000, ntave=50000,
collision_rate=1.0 / unit.picoseconds,
temperature=298.15 * unit.kelvin,
restart_filename=None):
"""
Parameters
----------
:param dt: The Amount of time between each time step.
:param ntcmdprep: The number of conventional MD steps for system equilibration.
:param ntcmd: The total number of conventional MD steps (including ntcmdprep). (must be multiple of ntave)
:param ntebprep: The number of GaMD pre-equilibration steps.
:param nteb: The number of GaMD equilibration steps (including ntebprep). (must be a multiple of ntave)
:param nstlim: The total number of simulation steps.
:param ntave: The number of steps used to smooth the average and sigma of potential energy (corresponds to
a running average window size).
:param collision_rate: Collision rate (gamma) compatible with 1/picoseconds, default: 1.0/unit.picoseconds
:param temperature: "Bath" temperature value compatible with units.kelvin, default: 298.15*unit.kelvin
:param restart_filename: The file name of the restart file. (default=None indicates new simulation.)
"""
self.collision_rate = collision_rate # gamma
self.temperature = temperature
self.restart_filename = restart_filename
self.kB = unit.BOLTZMANN_CONSTANT_kB * unit.AVOGADRO_CONSTANT_NA
self.thermal_energy = self.kB * self.temperature # kT
#self.current_velocity_component = numpy.exp(-self.collision_rate * dt) # a
#self.random_velocity_component = numpy.sqrt(1 - numpy.exp(- 2 * self.collision_rate * dt)) # b
#
# Generally, I'm trying to put variables here that I know will be used across all implementations WITHOUT the
# name being overloaded to have another meaning for an object that inherits from this base class. No guarantee
# I got it perfectly correct, but that is the idea.
#
self.global_variables = {"thermal_energy": self.thermal_energy,
#"current_velocity_component": self.current_velocity_component,
#"random_velocity_component": self.random_velocity_component,
"collision_rate": self.collision_rate,
"vscale": 0.0, "fscale": 0.0,
"noisescale": 0.0
}
self.per_dof_variables = {"sigma": 0}
#
# We need to run our super classes constructor last, since it's going to execute our other methods, which
# have dependencies on our variables above being setup.
#
super(GamdLangevinIntegrator, self).__init__(system_group, group_name, dt, ntcmdprep, ntcmd, ntebprep, nteb,
nstlim, ntave)
def _add_common_variables(self):
garbage = {self.addGlobalVariable(key, value) for key, value in self.global_variables.items()}
garbage = {self.addPerDofVariable(key, value) for key, value in self.per_dof_variables.items()}
@abstractmethod
def _add_conventional_md_pre_calc_step(self): # O Step
raise NotImplementedError("must implement _add_conventional_md_pre_calc_step")
'''
@abstractmethod
def _add_conventional_md_position_update_step(self): # R Step
raise NotImplementedError("must implement _add_conventional_md_position_update_step")
@abstractmethod
def _add_conventional_md_velocity_update_step(self): # V Step
raise NotImplementedError("must implement _add_conventional_md_velocity_update_step")
@abstractmethod
def _add_conventional_md_stochastic_velocity_update_step(self): # O Step
raise NotImplementedError("must implement _add_conventional_md_stochastic_velocity_update_step")
'''
@abstractmethod
def _add_conventional_md_update_step(self):
raise NotImplementedError("must implement _add_conventional_md_update_step")
'''
@abstractmethod
def _add_gamd_position_update_step(self): # R Step
raise NotImplementedError("must implement _add_gamd_position_update_step")
@abstractmethod
def _add_gamd_velocity_update_step(self): # V Step
raise NotImplementedError("must implement _add_gamd_velocity_update_step")
@abstractmethod
def _add_gamd_stochastic_velocity_update_step(self): # O Step
raise NotImplementedError("must implement _add_gamd_stochastic_velocity_update_step")
'''
@abstractmethod
def _add_gamd_update_step(self):
raise NotImplementedError("must implement _add_gamd_update_step")
@abstractmethod
def _add_gamd_pre_calc_step(self):
raise NotImplementedError("must implement _add_gamd_pre_calc_step")
@abstractmethod
def _add_gamd_boost_calculations_step(self):
raise NotImplementedError("must implement _add_gamd_boost_calculations_step")
@abstractmethod
def _add_instructions_to_calculate_primary_boost_statistics(self):
raise NotImplementedError("must implement _add_instructions_to_calculate_primary_boost_statistics")
@abstractmethod
def _add_instructions_to_calculate_secondary_boost_statistics(self):
raise NotImplementedError("must implement _add_instructions_to_calculate_secondary_boost_statistics")
def _add_conventional_md_instructions(self):
self._add_conventional_md_pre_calc_step()
'''
self._add_conventional_md_velocity_update_step()
self._add_conventional_md_position_update_step()
self._add_conventional_md_stochastic_velocity_update_step()
self._add_conventional_md_position_update_step()
self._add_conventional_md_velocity_update_step()
'''
self._add_conventional_md_update_step()
def _add_gamd_instructions(self):
self._add_gamd_pre_calc_step()
self._add_gamd_boost_calculations_step()
'''
self._add_gamd_velocity_update_step()
self._add_gamd_position_update_step()
self._add_gamd_stochastic_velocity_update_step()
self._add_gamd_position_update_step()
#
# We should only need to calculating the scaling factor once per step, since Vmax, Vmin, the threshold energy,
# and the effective harmonic constant don't change after being set. It's only a question if the energy changes
# somehow during the step.
#
#self._add_gamd_boost_calculations_step()
self._add_gamd_velocity_update_step()
'''
self._add_gamd_update_step()
#
# Debugging Methods
#
@staticmethod
def _get_debug_values_as_dictionary(dictionary, counter, function_to_retrieve_value):
results = {}
for key, value in dictionary.items():
results[str(counter) + "_" + key] = function_to_retrieve_value(counter, key)
return results
def _add_debug(self):
garbage = {self._save_global_debug(key) for key, value in self.global_variables.items()}
garbage = {self._save_per_dof_debug(key) for key, value in self.per_dof_variables.items()}
super(GamdLangevinIntegrator, self)._add_debug()
def get_debug_step(self, counter):
results = super(GamdLangevinIntegrator, self).get_debug_step(counter)
results.update(self._get_debug_values_as_dictionary(self.global_variables, counter, self._get_global_debug_value))
results.update(self._get_debug_values_as_dictionary(self.per_dof_variables, counter, self._get_per_dof_debug_value))
return results
#
# This integrator is the basis for all of our single boost type integrators
# to perform them in a generic way that will work across boost types.
#
class GroupBoostIntegrator(GamdLangevinIntegrator, ABC):
""" This class is an OpenMM Integrator for doing the dihedral boost for Gaussian accelerated molecular dynamics.
"""
def __init__(self, system_group, group_name, dt, ntcmdprep, ntcmd, ntebprep, nteb, nstlim, ntave, sigma0,
collision_rate, temperature, restart_filename):
"""
Parameters
----------
:param system_group: This value indicates what value should be appended to system names (energy, force) for accessing the correct group's variable.
:param group_name: This variable along with the system_group is used to create a unique name for each of our variables, so that if you are composing groups for boosts, they do not overwrite.
:param dt: The Amount of time between each time step.
:param ntcmdprep: The number of conventional MD steps for system equilibration.
:param ntcmd: The total number of conventional MD steps (including ntcmdprep). (must be a multiple of ntave)
:param ntebprep: The number of GaMD pre-equilibration steps.
:param nteb: The number of GaMD equilibration steps (including ntebprep). (must be a multiple of ntave)
:param nstlim: The total number of simulation steps.
:param ntave: The number of steps used to smooth the average and sigma of potential energy (corresponds to a
running average window size).
:param sigma0: The upper limit of the standard deviation of the potential boost that allows for
accurate reweighting.
:param collision_rate: Collision rate (gamma) compatible with 1/picoseconds, default: 1.0/unit.picoseconds
:param temperature: "Bath" temperature value compatible with units.kelvin, default: 298.15*unit.kelvin
:param restart_filename: The file name of the restart file. (default=None indicates new simulation.)
"""
#
# These variables are generated per type of boost being performed
#
self.global_variables_by_boost_type = {"Vmax": -1E99, "Vmin": 1E99, "Vavg": 0,
"oldVavg": 0, "sigmaV": 0, "M2": 0, "wVavg": 0, "k0": 0,
"k0prime": 0, "k0doubleprime": 0, "k0doubleprime_window": 0,
"boosted_energy": 0, "check_boost": 0, "sigma0": sigma0,
"threshold_energy": -1E99}
#
# These variables are always kept for reporting, regardless of boost type
#
self.boost_global_variables = {}
self.boost_per_dof_variables = {"newx": 0, "coordinates": 0}
self.debug_per_dof_variables = []
# self.debug_per_dof_variables = ["x", "v", "f", "m"]
self.debug_global_variables = ["dt", "energy", "energy0", "energy1", "energy2", "energy3", "energy4"]
self.sigma0 = sigma0
self.debuggingIsEnabled = True
super(GroupBoostIntegrator, self).__init__(system_group, group_name, dt, ntcmdprep, ntcmd, ntebprep, nteb, nstlim, ntave, collision_rate,
temperature, restart_filename)
#
# We have to set this value separate from the others, so that when we do a non-total boost, we will still
# have a total boost to report back. In that condition, the above ForceScalingFactor will get setup for
# appropriate boost type.
#
# NOTE: THIS VALUE WILL NEED TO BE FIXED SOMEHOW FOR DUAL BOOST.
#
self.addGlobalVariable(self._append_group_name_by_type("ForceScalingFactor", BoostType.TOTAL), 1.0)
self.addGlobalVariable(self._append_group_name_by_type("BoostPotential", BoostType.TOTAL), 0.0)
if self.get_boost_type() == BoostType.TOTAL or self.get_boost_type() == BoostType.DIHEDRAL:
self.addGlobalVariable(self._append_group_name_by_type("ForceScalingFactor", BoostType.DIHEDRAL), 1.0)
self.addGlobalVariable(self._append_group_name_by_type("BoostPotential", BoostType.DIHEDRAL), 0.0)
else:
self.addGlobalVariable(self._append_group_name("ForceScalingFactor"), 1.0)
self.addGlobalVariable(self._append_group_name("BoostPotential"), 0.0)
self.addComputePerDof("coordinates", "x")
#
#
#
#
# def get_starting_energy(self):
# return self.getGlobalVariableByName("starting_energy")
# def get_current_state(self):
# results = {"step": self.getGlobalVariableByName("stepCount")}
# return results
# pass
def _add_common_variables(self):
unused_return_values = {self.addGlobalVariable(key, value) for key, value in
self.boost_global_variables.items()}
unused_return_values = {self.addPerDofVariable(key, value) for key, value in
self.boost_per_dof_variables.items()}
unused_return_values = {self.addGlobalVariable(self._append_group_name(key), value) for key, value in
self.global_variables_by_boost_type.items()}
super(GroupBoostIntegrator, self)._add_common_variables()
def _update_potential_state_values_with_window_potential_state_values(self):
# Update window variables
self.addComputeGlobal(self._append_group_name("Vavg"), self._append_group_name("wVavg"))
self.addComputeGlobal(self._append_group_name("sigmaV"), "sqrt({0}/(windowCount-1))".format(
self._append_group_name("M2")))
# Reset variables
self.addComputeGlobal(self._append_group_name("M2"), "0")
self.addComputeGlobal(self._append_group_name("wVavg"), "0.0")
self.addComputeGlobal(self._append_group_name("oldVavg"), "0.0")
def _add_instructions_to_calculate_primary_boost_statistics(self):
self.addComputeGlobal(self._append_group_name("Vmax"), "max({0}, {1})".format(self._append_group_name("StartingPotentialEnergy"),
self._append_group_name("Vmax")))
self.addComputeGlobal(self._append_group_name("Vmin"), "min({0}, {1})".format(self._append_group_name("StartingPotentialEnergy"),
self._append_group_name("Vmin")))
def _add_instructions_to_calculate_secondary_boost_statistics(self):
#
# The following calculations are used to calculate the average and variance/standard deviation,
# rather than calculating the average at the ntave % 0 step
#
# Algorithm Description:
#
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
#
#
self.addComputeGlobal(self._append_group_name("oldVavg"), self._append_group_name("wVavg"))
self.addComputeGlobal(self._append_group_name("wVavg"), "{0} + ({1}-{0})/windowCount".format(
self._append_group_name("wVavg"), self._append_group_name("StartingPotentialEnergy")))
self.addComputeGlobal(self._append_group_name("M2"), "{0} + ({1}-{2})*({1}-{3})".format(
self._append_group_name("M2"),
self._append_group_name("StartingPotentialEnergy"),
self._append_group_name("oldVavg"),
self._append_group_name("wVavg")))
def _add_conventional_md_pre_calc_step(self):
self.addComputeGlobal("vscale", "exp(-dt*collision_rate)")
self.addComputeGlobal("fscale", "(1-vscale)/collision_rate")
self.addComputeGlobal("noisescale", "sqrt(thermal_energy*(1-vscale*vscale))")
def _add_conventional_md_update_step(self):
self.addComputePerDof("newx", "x")
self.addComputePerDof("v", "vscale*v + fscale*f/m + noisescale*gaussian/sqrt(m)")
self.addComputePerDof("x", "x+dt*v")
self.addConstrainPositions()
self.addComputePerDof("v", "(x-newx)/dt")
def _add_gamd_pre_calc_step(self):
self.addComputeGlobal("vscale", "exp(-dt*collision_rate)")
self.addComputeGlobal("fscale", "(1-vscale)/collision_rate")
self.addComputeGlobal("noisescale", "sqrt(thermal_energy*(1-vscale*vscale))")
#
# We do not apply the boost potential to the energy value since energy is read only.
#
self.addComputeGlobal(self._append_group_name("BoostPotential"), "0.5 * {0} * ({1} - {2})^2 / ({3} - {4})".
format(self._append_group_name("k0"), self._append_group_name("threshold_energy"),
self._append_group_name("StartingPotentialEnergy"),
self._append_group_name("Vmax"), self._append_group_name("Vmin")))
#
# | |
<reponame>choderalab/gin<gh_stars>10-100
# =============================================================================
# imports
# =============================================================================
import os
import sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
tf.autograph.set_verbosity(3)
from sklearn import metrics
import gin
import lime
import pandas as pd
import numpy as np
# import qcportal as ptl
# client = ptl.FractalClient()
HARTREE_TO_KCAL_PER_MOL = 627.509
BORN_TO_ANGSTROM = 0.529177
HARTREE_PER_BORN_TO_KCAL_PER_MOL_PER_ANGSTROM = 1185.993
from openeye import oechem
ifs = oechem.oemolistream()
TRANSLATION = {
6: 0,
7: 1,
8: 2,
16: 3,
15: 4,
9: 5,
17: 6,
35: 7,
53: 8,
1: 9
}
def data_generator():
for record_name in list(ds_qc.data.records):
try:
r = ds_qc.get_record(record_name, specification='default')
if r is not None:
traj = r.get_trajectory()
if traj is not None:
for snapshot in traj:
energy = tf.convert_to_tensor(
snapshot.properties.scf_total_energy * HARTREE_TO_KCAL_PER_MOL,
dtype=tf.float32)
mol = snapshot.get_molecule()
atoms = tf.convert_to_tensor(
[TRANSLATION[atomic_number] for atomic_number in mol.atomic_numbers],
dtype=tf.int64)
adjacency_map = tf.tensor_scatter_nd_update(
tf.zeros(
(
tf.shape(atoms, tf.int64)[0],
tf.shape(atoms, tf.int64)[0]
),
dtype=tf.float32),
tf.convert_to_tensor(
np.array(mol.connectivity)[:, :2],
dtype=tf.int64),
tf.convert_to_tensor(
np.array(mol.connectivity)[:, 2],
dtype=tf.float32))
features = gin.probabilistic.featurization.featurize_atoms(
atoms, adjacency_map)
xyz = tf.convert_to_tensor(
mol.geometry * BORN_TO_ANGSTROM,
dtype=tf.float32)
jacobian = tf.convert_to_tensor(
snapshot.return_result * HARTREE_PER_BORN_TO_KCAL_PER_MOL_PER_ANGSTROM,
dtype=tf.float32)
atoms = tf.concat(
[
features,
xyz,
jacobian
],
axis=1)
yield(atoms, adjacency_map, energy)
except:
pass
def data_loader(idx):
atoms_path = 'data/atoms/' + str(idx.numpy()) + '.npy'
adjacency_map_path = 'data/adjacency_map/' + str(idx.numpy()) + '.npy'
energy_path = 'data/energy/' + str(idx.numpy()) + '.npy'
atoms = tf.convert_to_tensor(
np.load(atoms_path))
adjacency_map = tf.convert_to_tensor(
np.load(adjacency_map_path))
energy = tf.convert_to_tensor(
np.load(energy_path))
return atoms, adjacency_map, energy
'''
ds = tf.data.Dataset.from_generator(
data_generator,
(tf.float32, tf.float32, tf.float32))
'''
ds_path = tf.data.Dataset.from_tensor_slices(list(range(500)))
ds = ds_path.map(
lambda idx: tf.py_function(
data_loader,
[idx],
[tf.float32, tf.float32, tf.float32]))
ds = ds.shuffle(10000, seed=2666)
ds = gin.probabilistic.gn.GraphNet.batch(
ds, 128, feature_dimension=18, atom_dtype=tf.float32)
n_batches = int(gin.probabilistic.gn.GraphNet.get_number_batches(ds))
n_te = n_batches // 10
ds_te = ds.take(n_te)
ds_vl = ds.skip(n_te).take(n_te)
ds_tr = ds.skip(2 * n_te)
def params_to_potential(
y_v, y_e, y_a, y_t, y_p,
bond_idxs, angle_idxs, torsion_idxs,
coordinates,
atom_in_mol=tf.constant(False),
bond_in_mol=tf.constant(False),
attr_in_mol=tf.constant(False)):
n_atoms = tf.shape(y_v, tf.int64)[0]
n_angles = tf.shape(angle_idxs, tf.int64)[0]
n_torsions = tf.shape(torsion_idxs, tf.int64)[0]
n_bonds = tf.shape(bond_idxs, tf.int64)[0]
if tf.logical_not(tf.reduce_any(atom_in_mol)):
atom_in_mol = tf.tile(
[[True]],
[n_atoms, 1])
if tf.logical_not(tf.reduce_any(bond_in_mol)):
bond_in_mol = tf.tile(
[[True]],
[n_bonds, 1])
if tf.logical_not(tf.reduce_any(attr_in_mol)):
attr_in_mol = tf.constant([[True]])
per_mol_mask = tf.stop_gradient(tf.matmul(
tf.where(
atom_in_mol,
tf.ones_like(atom_in_mol, dtype=tf.float32),
tf.zeros_like(atom_in_mol, dtype=tf.float32),
name='per_mol_mask_0'),
tf.transpose(
tf.where(
atom_in_mol,
tf.ones_like(atom_in_mol, dtype=tf.float32),
tf.zeros_like(atom_in_mol, dtype=tf.float32),
name='per_mol_mask_1'))))
distance_matrix = gin.deterministic.md.get_distance_matrix(
coordinates)
bond_distances = tf.gather_nd(
distance_matrix,
bond_idxs)
angle_angles = gin.deterministic.md.get_angles_cos(
coordinates,
angle_idxs)
torsion_dihedrals = gin.deterministic.md.get_dihedrals_cos(
coordinates,
torsion_idxs)
u_bond = tf.reduce_sum(
tf.math.multiply(
y_e,
tf.math.pow(
tf.expand_dims(
bond_distances,
1),
tf.expand_dims(
tf.range(
tf.shape(y_e)[1],
dtype=tf.float32),
0))),
axis=1)
u_angle = tf.reduce_sum(
tf.math.multiply(
y_a,
tf.math.pow(
tf.expand_dims(
angle_angles,
1),
tf.expand_dims(
tf.range(
tf.shape(y_a)[1],
dtype=tf.float32),
0))),
axis=1)
u_dihedral = tf.reduce_sum(
tf.math.multiply(
y_t[:, :-1],
tf.math.pow(
tf.expand_dims(
torsion_dihedrals,
1),
tf.expand_dims(
tf.range(
1,
tf.shape(y_t)[1],
dtype=tf.float32),
0))),
axis=1) + y_t[:, -1]
# (n_angles, n_atoms)
angle_is_connected_to_atoms = tf.reduce_any(
[
tf.equal(
tf.tile(
tf.expand_dims(
tf.range(n_atoms),
0),
[n_angles, 1]),
tf.tile(
tf.expand_dims(
angle_idxs[:, 0],
1),
[1, n_atoms])),
tf.equal(
tf.tile(
tf.expand_dims(
tf.range(n_atoms),
0),
[n_angles, 1]),
tf.tile(
tf.expand_dims(
angle_idxs[:, 1],
1),
[1, n_atoms])),
tf.equal(
tf.tile(
tf.expand_dims(
tf.range(n_atoms),
0),
[n_angles, 1]),
tf.tile(
tf.expand_dims(
angle_idxs[:, 2],
1),
[1, n_atoms]))
],
axis=0)
# (n_torsions, n_atoms)
torsion_is_connected_to_atoms = tf.reduce_any(
[
tf.equal(
tf.tile(
tf.expand_dims(
tf.range(n_atoms),
0),
[n_torsions, 1]),
tf.tile(
tf.expand_dims(
torsion_idxs[:, 0],
1),
[1, n_atoms])),
tf.equal(
tf.tile(
tf.expand_dims(
tf.range(n_atoms),
0),
[n_torsions, 1]),
tf.tile(
tf.expand_dims(
torsion_idxs[:, 1],
1),
[1, n_atoms])),
tf.equal(
tf.tile(
tf.expand_dims(
tf.range(n_atoms),
0),
[n_torsions, 1]),
tf.tile(
tf.expand_dims(
torsion_idxs[:, 2],
1),
[1, n_atoms])),
tf.equal(
tf.tile(
tf.expand_dims(
tf.range(n_atoms),
0),
[n_torsions, 1]),
tf.tile(
tf.expand_dims(
torsion_idxs[:, 3],
1),
[1, n_atoms]))
],
axis=0)
angle_in_mol = tf.greater(
tf.matmul(
tf.where(
angle_is_connected_to_atoms,
tf.ones_like(
angle_is_connected_to_atoms,
tf.int64),
tf.zeros_like(
angle_is_connected_to_atoms,
tf.int64)),
tf.where(
atom_in_mol,
tf.ones_like(
atom_in_mol,
tf.int64),
tf.zeros_like(
atom_in_mol,
tf.int64))),
tf.constant(0, dtype=tf.int64))
torsion_in_mol = tf.greater(
tf.matmul(
tf.where(
torsion_is_connected_to_atoms,
tf.ones_like(
torsion_is_connected_to_atoms,
tf.int64),
tf.zeros_like(
torsion_is_connected_to_atoms,
tf.int64)),
tf.where(
atom_in_mol,
tf.ones_like(
atom_in_mol,
tf.int64),
tf.zeros_like(
atom_in_mol,
tf.int64))),
tf.constant(0, dtype=tf.int64))
u_pair_mask = tf.tensor_scatter_nd_update(
per_mol_mask,
bond_idxs,
tf.zeros(
shape=(
tf.shape(bond_idxs, tf.int32)[0]),
dtype=tf.float32))
u_pair_mask = tf.tensor_scatter_nd_update(
u_pair_mask,
tf.stack(
[
angle_idxs[:, 0],
angle_idxs[:, 2]
],
axis=1),
tf.zeros(
shape=(
tf.shape(angle_idxs, tf.int32)[0]),
dtype=tf.float32))
u_pair_mask = tf.linalg.set_diag(
u_pair_mask,
tf.zeros(
shape=tf.shape(u_pair_mask)[0],
dtype=tf.float32))
u_pair_mask = tf.linalg.band_part(
u_pair_mask,
0, -1)
_distance_matrix = tf.where(
tf.greater(
u_pair_mask,
tf.constant(0, dtype=tf.float32)),
distance_matrix,
tf.ones_like(distance_matrix))
_distance_matrix_inverse = tf.multiply(
u_pair_mask,
tf.pow(
tf.math.add(
_distance_matrix,
tf.constant(1e-3, dtype=tf.float32)),
tf.constant(-1, dtype=tf.float32)))
u_pair = tf.reduce_sum(
tf.multiply(
tf.multiply(
tf.tile(
tf.expand_dims(
u_pair_mask,
2),
[1, 1, tf.shape(y_p)[2]-1]),
y_p[:, :, :-1]),
tf.pow(
tf.expand_dims(
_distance_matrix_inverse,
2),
tf.expand_dims(
tf.expand_dims(
tf.range(
1, tf.shape(y_p)[2],
dtype=tf.float32),
0),
0))),
axis=2) + y_p[:, :, -1]
u_bond_tot = tf.matmul(
tf.transpose(
tf.where(
bond_in_mol,
tf.ones_like(bond_in_mol, dtype=tf.float32),
tf.zeros_like(bond_in_mol, dtype=tf.float32))),
tf.expand_dims(
u_bond,
axis=1))
u_angle_tot = tf.matmul(
tf.transpose(
tf.where(
angle_in_mol,
tf.ones_like(angle_in_mol, dtype=tf.float32),
tf.zeros_like(angle_in_mol, dtype=tf.float32))),
tf.expand_dims(
u_angle,
axis=1))
u_dihedral_tot = tf.matmul(
tf.transpose(
tf.where(
torsion_in_mol,
tf.ones_like(torsion_in_mol, dtype=tf.float32),
tf.zeros_like(torsion_in_mol, dtype=tf.float32))),
tf.expand_dims(
u_dihedral,
axis=1))
u_pair_tot = tf.matmul(
tf.transpose(
tf.where(
atom_in_mol,
tf.ones_like(atom_in_mol, dtype=tf.float32),
tf.zeros_like(atom_in_mol, dtype=tf.float32))),
tf.reduce_sum(
u_pair,
axis=1,
keepdims=True))
u_tot = tf.squeeze(
u_pair_tot + u_bond_tot + u_angle_tot + u_dihedral_tot)
return u_tot
ds = tf.data.Dataset.from_generator(
data_generator,
(
tf.float32,
tf.float32,
tf.float32,
tf.float32,
tf.float32,
tf.float32))
ds = ds.map(
lambda atoms, adjacency_map, angles, bonds, torsions, particle_params:\
tf.py_function(
lambda atoms, adjacency_map, angles, bonds, torsions, particle_params:\
[
gin.probabilistic.featurization.featurize_atoms(
tf.cast(atoms, dtype=tf.int64), adjacency_map),
adjacency_map,
angles,
bonds,
torsions,
particle_params
],
[atoms, adjacency_map, angles, bonds, torsions, particle_params],
[tf.float32, tf.float32, tf.float32, tf.float32, tf.float32,
tf.float32])).cache('ds')
config_space = {
'D_V': [32, 64, 128],
'D_E': [32, 64, 128],
'D_U': [32, 64, 128],
'phi_e_0': [32, 64, 128],
'phi_e_a_0': ['elu', 'relu', 'leaky_relu', 'tanh', 'sigmoid'],
'phi_e_a_1': ['elu', 'relu', 'leaky_relu', 'tanh', 'sigmoid'],
'phi_v_0': [32, 64, 128],
'phi_v_a_0': ['elu', 'relu', 'leaky_relu', 'tanh', 'sigmoid'],
'phi_v_a_1': ['elu', 'relu', 'leaky_relu', 'tanh', 'sigmoid'],
'phi_u_0': [32, 64, 128],
'phi_u_a_0': ['elu', 'relu', 'leaky_relu', 'tanh', 'sigmoid'],
'phi_u_a_1': ['elu', 'relu', 'leaky_relu', 'tanh', 'sigmoid'],
'f_r_0': [32, 64, 128],
'f_r_1': [32, 64, 128],
'f_r_a_0': ['elu', 'relu', 'leaky_relu', 'tanh', 'sigmoid'],
'f_r_a_1': ['elu', 'relu', 'leaky_relu', 'tanh', 'sigmoid'],
'learning_rate': [1e-5, 1e-4, 1e-3]
}
def init(point):
global gn
global opt
class f_v(tf.keras.Model):
def __init__(self, units=point['D_V']):
super(f_v, self).__init__()
self.d = tf.keras.layers.Dense(units)
@tf.function
def call(self, x):
return self.d(x)
class f_r(tf.keras.Model):
""" Readout function
"""
def __init__(self, units=point['f_r_0'], f_r_a=point['f_r_a_0']):
super(f_r, self).__init__()
self.d_v_0 = tf.keras.layers.Dense(units, activation='tanh')
self.d_v_1 = tf.keras.layers.Dense(
16,
kernel_initializer=tf.random_normal_initializer(0, 1e-5),
bias_initializer=tf.random_normal_initializer(0, 1e-5))
self.d_e_0 = tf.keras.layers.Dense(units, activation='tanh')
self.d_e_1 = tf.keras.layers.Dense(
16,
kernel_initializer=tf.random_normal_initializer(0, 1e-5),
bias_initializer=tf.random_normal_initializer(0, 1e-5))
self.d_a_0 = tf.keras.layers.Dense(units, activation='tanh')
self.d_a_1 = tf.keras.layers.Dense(
16,
kernel_initializer=tf.random_normal_initializer(0, 1e-5),
bias_initializer=tf.random_normal_initializer(0, 1e-5))
self.d_t_0 = tf.keras.layers.Dense(units, activation='tanh')
self.d_t_1 = tf.keras.layers.Dense(
16,
kernel_initializer=tf.random_normal_initializer(0, 1e-5),
bias_initializer=tf.random_normal_initializer(0, 1e-5))
self.d_p_0 = tf.keras.layers.Dense(units, activation='tanh')
self.d_p_1 = tf.keras.layers.Dense(
16,
kernel_initializer=tf.random_normal_initializer(0, 1e-5),
bias_initializer=tf.random_normal_initializer(0, 1e-5))
self.units = units
self.d_v = point['D_V']
self.d_e = point['D_E']
self.d_a = point['D_E']
self.d_t = point['D_E']
self.d_u = point['D_U']
# @tf.function
def call(self,
h_e, h_v, h_u,
h_e_history, h_v_history, h_u_history,
atom_in_mol, bond_in_mol, attr_in_mol,
bond_idxs, angle_idxs, torsion_idxs,
coordinates):
n_atoms = tf.shape(h_v, tf.int64)[0]
h_v_history.set_shape([None, 6, self.d_v])
h_v = tf.reshape(
h_v_history,
[-1, 6 * self.d_v])
h_e = tf.math.add(
tf.gather(
h_v,
bond_idxs[:, 0]),
tf.gather(
h_v,
bond_idxs[:, 1]))
h_a = tf.concat(
[
tf.gather(
h_v,
angle_idxs[:, 1]),
tf.math.add(
tf.gather(
h_v,
angle_idxs[:, 0]),
tf.gather(
h_v,
angle_idxs[:, 2]))
],
axis=1)
h_t = tf.concat(
[
tf.math.add(
tf.gather(
h_v,
torsion_idxs[:, 0]),
tf.gather(
h_v,
torsion_idxs[:, 3])),
tf.math.add(
tf.gather(
h_v,
torsion_idxs[:, 1]),
tf.gather(
h_v,
torsion_idxs[:, 2])),
],
axis=1)
h_p = tf.concat(
[
tf.tile(
tf.expand_dims(
h_v,
0),
[n_atoms, 1, 1]),
tf.tile(
tf.expand_dims(
h_v,
1),
[1, n_atoms, 1])
],
axis=2)
y_v = self.d_v_1(
self.d_v_0(
h_v))
y_e = self.d_e_1(
self.d_e_0(
h_e))
y_a = self.d_a_1(
self.d_a_0(
h_a))
y_t = self.d_t_1(
self.d_t_0(
h_t))
y_p = self.d_p_1(
self.d_p_0(
h_p))
u = params_to_potential(
y_v, y_e, y_a, y_t, y_p,
bond_idxs, angle_idxs, torsion_idxs,
coordinates,
atom_in_mol,
bond_in_mol,
attr_in_mol)
return u
gn = gin.probabilistic.gn_plus.GraphNet(
f_e=lime.nets.for_gn.ConcatenateThenFullyConnect(
(point['D_E'], 'elu', point['D_E'], 'tanh')),
f_v=f_v(),
f_u=(lambda atoms, adjacency_map, batched_attr_in_mol: \
tf.tile(
tf.zeros((1, point['D_U'])),
[
tf.math.count_nonzero(batched_attr_in_mol),
1
]
)),
phi_e=lime.nets.for_gn.ConcatenateThenFullyConnect(
(point['phi_e_0'], point['phi_e_a_0'], point['D_E'],
point['phi_e_a_1'])),
phi_v=lime.nets.for_gn.ConcatenateThenFullyConnect(
(point['phi_v_0'], point['phi_v_a_0'], point['D_V'],
point['phi_v_a_1'])),
phi_u=lime.nets.for_gn.ConcatenateThenFullyConnect(
(point['phi_u_0'], point['phi_u_a_0'], point['D_U'],
point['phi_v_a_1'])),
f_r=f_r(),
repeat=5)
opt = tf.keras.optimizers.Ftrl(1e-3)
def obj_fn(point):
point = dict(zip(config_space.keys(), point))
init(point)
for dummy_idx in range(50):
for atoms_, adjacency_map, atom_in_mol, bond_in_mol, u, attr_in_mol in ds_tr:
atoms = atoms_[:, :12]
coordinates = tf.Variable(atoms_[:, 12:15] * BORN_TO_ANGSTROM)
jacobian = atoms_[:, 15:] * HARTREE_PER_BORN_TO_KCAL_PER_MOL_PER_ANGSTROM
with tf.GradientTape() as tape:
bond_idxs, angle_idxs, torsion_idxs = gin.probabilistic.gn_hyper\
.get_geometric_idxs(atoms, adjacency_map)
with tf.GradientTape() as tape1:
u_hat = gn(
atoms, adjacency_map, atom_in_mol,
bond_in_mol,
attr_in_mol,
attr_in_mol=attr_in_mol,
bond_idxs=bond_idxs,
angle_idxs=angle_idxs,
torsion_idxs=torsion_idxs,
coordinates=coordinates)
jacobian_hat = tape1.gradient(u_hat, coordinates)
jacobian_hat = -tf.boolean_mask(
jacobian_hat,
tf.reduce_any(
atom_in_mol,
axis=1))
jacobian = tf.boolean_mask(
jacobian,
tf.reduce_any(
atom_in_mol,
axis=1))
u = tf.boolean_mask(
u,
attr_in_mol)
'''
loss_0 = tf.reduce_sum(tf.keras.losses.MAE(
tf.math.log(
tf.norm(
jacobian,
axis=1)),
tf.math.log(
tf.norm(
jacobian_hat,
axis=1))))
loss_1 = tf.reduce_sum(tf.losses.cosine_similarity(
jacobian,
jacobian_hat,
axis=1))
print(loss_0, loss_1)
loss = loss_0 + loss_1
'''
loss = tf.reduce_sum(tf.keras.losses.MSE(jacobian,
jacobian_hat))
print(loss, flush=True)
variables = gn.variables
grad = tape.gradient(loss, variables)
# if not tf.reduce_any([tf.reduce_any(tf.math.is_nan(_grad)) for _grad | |
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
from thrift.Thrift import *
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
def setInactivityTimeoutPeriod(self, periodInSeconds):
"""
Parameters:
- periodInSeconds
"""
pass
def shutdown(self, status):
"""
Parameters:
- status
"""
pass
def create(self, path):
"""
Parameters:
- path
"""
pass
def createFile(self, path, mode, overwrite, bufferSize, block_replication, blocksize):
"""
Parameters:
- path
- mode
- overwrite
- bufferSize
- block_replication
- blocksize
"""
pass
def open(self, path):
"""
Parameters:
- path
"""
pass
def append(self, path):
"""
Parameters:
- path
"""
pass
def write(self, handle, data):
"""
Parameters:
- handle
- data
"""
pass
def read(self, handle, offset, size):
"""
Parameters:
- handle
- offset
- size
"""
pass
def close(self, out):
"""
Parameters:
- out
"""
pass
def rm(self, path, recursive):
"""
Parameters:
- path
- recursive
"""
pass
def rename(self, path, dest):
"""
Parameters:
- path
- dest
"""
pass
def mkdirs(self, path):
"""
Parameters:
- path
"""
pass
def exists(self, path):
"""
Parameters:
- path
"""
pass
def stat(self, path):
"""
Parameters:
- path
"""
pass
def listStatus(self, path):
"""
Parameters:
- path
"""
pass
def chmod(self, path, mode):
"""
Parameters:
- path
- mode
"""
pass
def chown(self, path, owner, group):
"""
Parameters:
- path
- owner
- group
"""
pass
def setReplication(self, path, replication):
"""
Parameters:
- path
- replication
"""
pass
def getFileBlockLocations(self, path, start, length):
"""
Parameters:
- path
- start
- length
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot != None:
self._oprot = oprot
self._seqid = 0
def setInactivityTimeoutPeriod(self, periodInSeconds):
"""
Parameters:
- periodInSeconds
"""
self.send_setInactivityTimeoutPeriod(periodInSeconds)
self.recv_setInactivityTimeoutPeriod()
def send_setInactivityTimeoutPeriod(self, periodInSeconds):
self._oprot.writeMessageBegin('setInactivityTimeoutPeriod', TMessageType.CALL, self._seqid)
args = setInactivityTimeoutPeriod_args()
args.periodInSeconds = periodInSeconds
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_setInactivityTimeoutPeriod(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = setInactivityTimeoutPeriod_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
return
def shutdown(self, status):
"""
Parameters:
- status
"""
self.send_shutdown(status)
self.recv_shutdown()
def send_shutdown(self, status):
self._oprot.writeMessageBegin('shutdown', TMessageType.CALL, self._seqid)
args = shutdown_args()
args.status = status
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_shutdown(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = shutdown_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
return
def create(self, path):
"""
Parameters:
- path
"""
self.send_create(path)
return self.recv_create()
def send_create(self, path):
self._oprot.writeMessageBegin('create', TMessageType.CALL, self._seqid)
args = create_args()
args.path = path
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_create(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = create_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch != None:
raise result.ouch
raise TApplicationException(TApplicationException.MISSING_RESULT, "create failed: unknown result");
def createFile(self, path, mode, overwrite, bufferSize, block_replication, blocksize):
"""
Parameters:
- path
- mode
- overwrite
- bufferSize
- block_replication
- blocksize
"""
self.send_createFile(path, mode, overwrite, bufferSize, block_replication, blocksize)
return self.recv_createFile()
def send_createFile(self, path, mode, overwrite, bufferSize, block_replication, blocksize):
self._oprot.writeMessageBegin('createFile', TMessageType.CALL, self._seqid)
args = createFile_args()
args.path = path
args.mode = mode
args.overwrite = overwrite
args.bufferSize = bufferSize
args.block_replication = block_replication
args.blocksize = blocksize
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_createFile(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = createFile_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch != None:
raise result.ouch
raise TApplicationException(TApplicationException.MISSING_RESULT, "createFile failed: unknown result");
def open(self, path):
"""
Parameters:
- path
"""
self.send_open(path)
return self.recv_open()
def send_open(self, path):
self._oprot.writeMessageBegin('open', TMessageType.CALL, self._seqid)
args = open_args()
args.path = path
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_open(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = open_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch != None:
raise result.ouch
raise TApplicationException(TApplicationException.MISSING_RESULT, "open failed: unknown result");
def append(self, path):
"""
Parameters:
- path
"""
self.send_append(path)
return self.recv_append()
def send_append(self, path):
self._oprot.writeMessageBegin('append', TMessageType.CALL, self._seqid)
args = append_args()
args.path = path
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_append(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = append_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch != None:
raise result.ouch
raise TApplicationException(TApplicationException.MISSING_RESULT, "append failed: unknown result");
def write(self, handle, data):
"""
Parameters:
- handle
- data
"""
self.send_write(handle, data)
return self.recv_write()
def send_write(self, handle, data):
self._oprot.writeMessageBegin('write', TMessageType.CALL, self._seqid)
args = write_args()
args.handle = handle
args.data = data
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_write(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = write_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch != None:
raise result.ouch
raise TApplicationException(TApplicationException.MISSING_RESULT, "write failed: unknown result");
def read(self, handle, offset, size):
"""
Parameters:
- handle
- offset
- size
"""
self.send_read(handle, offset, size)
return self.recv_read()
def send_read(self, handle, offset, size):
self._oprot.writeMessageBegin('read', TMessageType.CALL, self._seqid)
args = read_args()
args.handle = handle
args.offset = offset
args.size = size
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_read(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = read_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch != None:
raise result.ouch
raise TApplicationException(TApplicationException.MISSING_RESULT, "read failed: unknown result");
def close(self, out):
"""
Parameters:
- out
"""
self.send_close(out)
return self.recv_close()
def send_close(self, out):
self._oprot.writeMessageBegin('close', TMessageType.CALL, self._seqid)
args = close_args()
args.out = out
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_close(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = close_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch != None:
raise result.ouch
raise TApplicationException(TApplicationException.MISSING_RESULT, "close failed: unknown result");
def rm(self, path, recursive):
"""
Parameters:
- path
- recursive
"""
self.send_rm(path, recursive)
return self.recv_rm()
def send_rm(self, path, recursive):
self._oprot.writeMessageBegin('rm', TMessageType.CALL, self._seqid)
args = rm_args()
args.path = path
args.recursive = recursive
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_rm(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = rm_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch != None:
raise result.ouch
raise TApplicationException(TApplicationException.MISSING_RESULT, "rm failed: unknown result");
def rename(self, path, dest):
"""
Parameters:
- path
- dest
"""
self.send_rename(path, dest)
return self.recv_rename()
def send_rename(self, path, dest):
self._oprot.writeMessageBegin('rename', TMessageType.CALL, self._seqid)
args = rename_args()
args.path = path
args.dest = dest
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_rename(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = rename_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch != None:
raise result.ouch
raise TApplicationException(TApplicationException.MISSING_RESULT, "rename failed: unknown result");
def mkdirs(self, path):
"""
Parameters:
- path
"""
self.send_mkdirs(path)
return self.recv_mkdirs()
def send_mkdirs(self, path):
self._oprot.writeMessageBegin('mkdirs', TMessageType.CALL, self._seqid)
args = mkdirs_args()
args.path = path
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_mkdirs(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = mkdirs_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch != None:
raise result.ouch
raise TApplicationException(TApplicationException.MISSING_RESULT, "mkdirs failed: unknown result");
def exists(self, path):
"""
Parameters:
- path
"""
self.send_exists(path)
return self.recv_exists()
def send_exists(self, path):
self._oprot.writeMessageBegin('exists', TMessageType.CALL, self._seqid)
args = exists_args()
args.path = path
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_exists(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = exists_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch != None:
raise result.ouch
raise TApplicationException(TApplicationException.MISSING_RESULT, "exists failed: unknown result");
def stat(self, path):
"""
Parameters:
- path
"""
self.send_stat(path)
return self.recv_stat()
def send_stat(self, path):
self._oprot.writeMessageBegin('stat', TMessageType.CALL, self._seqid)
args = stat_args()
args.path = path
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_stat(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = stat_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch != None:
raise result.ouch
raise TApplicationException(TApplicationException.MISSING_RESULT, "stat failed: unknown result");
def listStatus(self, path):
"""
| |
window length of `STFT`
hop_length (int): hop length of `STFT`
window_name (str or None): *Name* of `tf.signal` function that returns a 1D tensor window that is used in analysis.
Defaults to `hann_window` which uses `tf.signal.hann_window`.
Window availability depends on Tensorflow version. More details are at `kapre.backend.get_window()`.
pad_begin(bool): Whether to pad with zeros along time axis (length: win_length - hop_length). Defaults to `False`.
pad_end (bool): whether to pad the input signal at the end in `STFT`.
sample_rate (int): sample rate of the input audio
log_n_bins (int): number of the bins in the log-frequency filterbank
log_f_min (float): lowest frequency of the filterbank
log_bins_per_octave (int): number of bins in each octave in the filterbank
log_spread (float): spread constant (Q value) in the log filterbank.
return_decibel (bool): whether to apply decibel scaling at the end
db_amin (float): noise floor of decibel scaling input. See `MagnitudeToDecibel` for more details.
db_ref_value (float): reference value of decibel scaling. See `MagnitudeToDecibel` for more details.
db_dynamic_range (float): dynamic range of the decibel scaling result.
input_data_format (str): the audio data format of input waveform batch.
`'channels_last'` if it's `(batch, time, channels)`
`'channels_first'` if it's `(batch, channels, time)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
output_data_format (str): the data format of output mel spectrogram.
`'channels_last'` if you want `(batch, time, frequency, channels)`
`'channels_first'` if you want `(batch, channels, time, frequency)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
name (str): name of the returned layer
Note:
Log-frequency spectrogram is similar to melspectrogram but its frequency axis is perfectly linear to octave scale.
For some pitch-related applications, a log-frequency spectrogram can be a good choice.
Example:
::
input_shape = (2048, 2) # stereo signal, audio is channels_last
logfreq_stft_mag = get_log_frequency_spectrogram_layer(
input_shape=input_shape, n_fft=1024, return_decibel=True,
log_n_bins=84, input_data_format='channels_last', output_data_format='channels_last')
model = Sequential()
model.add(logfreq_stft_mag)
# now the shape is (batch, n_frame=3, n_bins=84, n_ch=2) because output_data_format is 'channels_last'
# and the dtype is float
"""
backend.validate_data_format_str(input_data_format)
backend.validate_data_format_str(output_data_format)
stft_kwargs = {}
if input_shape is not None:
stft_kwargs['input_shape'] = input_shape
waveform_to_stft = STFT(
**stft_kwargs,
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
window_name=window_name,
pad_begin=pad_begin,
pad_end=pad_end,
input_data_format=input_data_format,
output_data_format=output_data_format,
)
stft_to_stftm = Magnitude()
_log_filterbank = backend.filterbank_log(
sample_rate=sample_rate,
n_freq=n_fft // 2 + 1,
n_bins=log_n_bins,
bins_per_octave=log_bins_per_octave,
f_min=log_f_min,
spread=log_spread,
)
kwargs = {
'sample_rate': sample_rate,
'n_freq': n_fft // 2 + 1,
'n_bins': log_n_bins,
'bins_per_octave': log_bins_per_octave,
'f_min': log_f_min,
'spread': log_spread,
}
stftm_to_loggram = ApplyFilterbank(
type='log', filterbank_kwargs=kwargs, data_format=output_data_format
)
layers = [waveform_to_stft, stft_to_stftm, stftm_to_loggram]
if return_decibel:
mag_to_decibel = MagnitudeToDecibel(
ref_value=db_ref_value, amin=db_amin, dynamic_range=db_dynamic_range
)
layers.append(mag_to_decibel)
return Sequential(layers, name=name)
def get_perfectly_reconstructing_stft_istft(
stft_input_shape=None,
istft_input_shape=None,
n_fft=2048,
win_length=None,
hop_length=None,
forward_window_name=None,
waveform_data_format='default',
stft_data_format='default',
stft_name='stft',
istft_name='istft',
):
"""A function that returns two layers, stft and inverse stft, which would be perfectly reconstructing pair.
Args:
stft_input_shape (tuple): Input shape of single waveform.
Must specify this if the returned stft layer is going to be used as first layer of a Sequential model.
istft_input_shape (tuple): Input shape of single STFT.
Must specify this if the returned istft layer is going to be used as first layer of a Sequential model.
n_fft (int): Number of FFTs. Defaults to `2048`
win_length (`int` or `None`): Window length in sample. Defaults to `n_fft`.
hop_length (`int` or `None`): Hop length in sample between analysis windows. Defaults to `n_fft // 4` following librosa.
forward_window_name (function or `None`): *Name* of `tf.signal` function that returns a 1D tensor window that is used.
Defaults to `hann_window` which uses `tf.signal.hann_window`.
Window availability depends on Tensorflow version. More details are at `kapre.backend.get_window()`.
waveform_data_format (str): The audio data format of waveform batch.
`'channels_last'` if it's `(batch, time, channels)`
`'channels_first'` if it's `(batch, channels, time)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
stft_data_format (str): The data format of STFT.
`'channels_last'` if you want `(batch, time, frequency, channels)`
`'channels_first'` if you want `(batch, channels, time, frequency)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
stft_name (str): name of the returned STFT layer
istft_name (str): name of the returned ISTFT layer
Note:
Without a careful setting, `tf.signal.stft` and `tf.signal.istft` is not perfectly reconstructing.
Note:
Imagine `x` --> `STFT` --> `InverseSTFT` --> `y`.
The length of `x` will be longer than `y` due to the padding at the beginning and the end.
To compare them, you would need to trim `y` along time axis.
The formula: if `trim_begin = win_length - hop_length` and `len_signal` is length of `x`,
`y_trimmed = y[trim_begin: trim_begin + len_signal, :]` (in the case of `channels_last`).
Example:
::
stft_input_shape = (2048, 2) # stereo and channels_last
stft_layer, istft_layer = get_perfectly_reconstructing_stft_istft(
stft_input_shape=stft_input_shape
)
unet = get_unet() input: stft (complex value), output: stft (complex value)
model = Sequential()
model.add(stft_layer) # input is waveform
model.add(unet)
model.add(istft_layer) # output is also waveform
"""
backend.validate_data_format_str(waveform_data_format)
backend.validate_data_format_str(stft_data_format)
if win_length is None:
win_length = n_fft
if hop_length is None:
hop_length = win_length // 4
if (win_length / hop_length) % 2 != 0:
raise RuntimeError(
'The ratio of win_length and hop_length must be power of 2 to get a '
'perfectly reconstructing stft-istft pair.'
)
stft_kwargs = {}
if stft_input_shape is not None:
stft_kwargs['input_shape'] = stft_input_shape
istft_kwargs = {}
if istft_input_shape is not None:
istft_kwargs['input_shape'] = istft_input_shape
waveform_to_stft = STFT(
**stft_kwargs,
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
window_name=forward_window_name,
pad_begin=True,
pad_end=True,
input_data_format=waveform_data_format,
output_data_format=stft_data_format,
name=stft_name,
)
stft_to_waveform = InverseSTFT(
**istft_kwargs,
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
forward_window_name=forward_window_name,
input_data_format=stft_data_format,
output_data_format=waveform_data_format,
name=istft_name,
)
return waveform_to_stft, stft_to_waveform
def get_stft_mag_phase(
input_shape,
n_fft=2048,
win_length=None,
hop_length=None,
window_name=None,
pad_begin=False,
pad_end=False,
return_decibel=False,
db_amin=1e-5,
db_ref_value=1.0,
db_dynamic_range=80.0,
input_data_format='default',
output_data_format='default',
name='stft_mag_phase',
):
"""A function that returns magnitude and phase of input audio.
Args:
input_shape (None or tuple of integers): input shape of the stft layer.
Because this mag_phase is based on keras.Functional model, it is required to specify the input shape.
E.g., (44100, 2) for 44100-sample stereo audio with `input_data_format=='channels_last'`.
n_fft (int): number of FFT points in `STFT`
win_length (int): window length of `STFT`
hop_length (int): hop length of `STFT`
window_name (str or None): *Name* of `tf.signal` function that returns a 1D tensor window that is used in analysis.
Defaults to `hann_window` which uses `tf.signal.hann_window`.
Window availability depends on Tensorflow version. More details are at `kapre.backend.get_window()`
.pad_begin(bool): Whether to pad with zeros along time axis (length: win_length - hop_length). Defaults to `False`.
pad_end (bool): whether to pad the input signal at the end in `STFT`.
return_decibel (bool): whether to apply decibel scaling at the end
db_amin (float): noise floor of decibel scaling input. See `MagnitudeToDecibel` for more details.
db_ref_value (float): reference value of decibel scaling. See `MagnitudeToDecibel` for more details.
db_dynamic_range (float): dynamic range of the decibel scaling result.
input_data_format (str): the audio data format of input waveform batch.
`'channels_last'` if it's `(batch, time, channels)`
`'channels_first'` if it's `(batch, channels, time)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
output_data_format (str): the data format of output mel spectrogram.
`'channels_last'` if you want `(batch, time, frequency, channels)`
`'channels_first'` if you want `(batch, channels, time, frequency)`
Defaults to the setting of your Keras configuration. (tf.keras.backend.image_data_format())
name (str): name of the returned layer
Example:
::
input_shape = (2048, 3) # stereo and channels_last
model = Sequential()
model.add(
get_stft_mag_phase(input_shape=input_shape, return_decibel=True, n_fft=1024)
)
# now output shape is (batch, n_frame=3, freq=513, ch=6). 6 channels = [3 mag ch; 3 phase ch]
"""
backend.validate_data_format_str(input_data_format)
backend.validate_data_format_str(output_data_format)
waveform_to_stft = STFT(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
window_name=window_name,
pad_begin=pad_begin,
pad_end=pad_end,
input_data_format=input_data_format,
output_data_format=output_data_format,
)
stft_to_stftm = Magnitude()
stft_to_stftp = Phase()
waveforms = keras.Input(shape=input_shape)
stfts = waveform_to_stft(waveforms)
mag_stfts = stft_to_stftm(stfts) # magnitude
phase_stfts = stft_to_stftp(stfts) # phase
if return_decibel:
mag_to_decibel = MagnitudeToDecibel(
ref_value=db_ref_value, amin=db_amin, dynamic_range=db_dynamic_range
)
mag_stfts = mag_to_decibel(mag_stfts)
ch_axis = 1 if output_data_format == _CH_FIRST_STR else 3
concat_layer = keras.layers.Concatenate(axis=ch_axis)
stfts_mag_phase = concat_layer([mag_stfts, phase_stfts])
model = Model(inputs=waveforms, outputs=stfts_mag_phase, name=name)
return model
def get_frequency_aware_conv2d(
data_format='default', freq_aware_name='frequency_aware_conv2d', *args, **kwargs
):
"""Returns a frequency-aware conv2d layer.
Args:
data_format (str): specifies the data format of batch input/output.
freq_aware_name (str): name of the returned layer
*args: position args for `keras.layers.Conv2D`.
**kwargs: keyword args for `keras.layers.Conv2D`.
Returns:
A sequential model of ConcatenateFrequencyMap and Conv2D.
References:
<NAME>., <NAME>., & <NAME>. (2019).
`Receptive-Field-Regularized CNN Variants for Acoustic Scene Classification | |
from base64 import b64encode, b64decode
from bs4 import BeautifulSoup as soup
from bz2 import BZ2File
from collections import Counter, OrderedDict
from copy import deepcopy
from datetime import datetime as dt, timedelta
try:
from etk.extractors.date_extractor import DateExtractor
except OSError:
from spacy.cli import download
download('en_core_web_sm')
from etk.extractors.date_extractor import DateExtractor
from etk.extractors.spacy_ner_extractor import SpacyNerExtractor
from hashlib import sha256
from json import load, dump, loads, dumps
from math import sqrt, ceil, floor
from nltk import word_tokenize, pos_tag, ne_chunk, download as nltk_download
from nltk.corpus import stopwords
from numpy import array
from os import makedirs, listdir, rename, remove, chmod
from os.path import dirname, abspath, exists, join
from pandas import DataFrame
from pickle import load as pload, dump as pdump
from pprint import pprint
from random import choices, shuffle, seed
from regex import findall, sub, search, compile, match, DOTALL, MULTILINE, VERBOSE
from requests import get, post, head
from selenium.common.exceptions import TimeoutException
from selenium.webdriver import Firefox
from selenium.webdriver.firefox.options import Options
from selenium.common.exceptions import WebDriverException
from shutil import rmtree
from sklearn.cluster import KMeans
from sys import stdout, exc_info
from tarfile import open as tar_open
from threading import Thread
from time import strftime, sleep, time
from traceback import print_exc, format_exc
from urllib.parse import urljoin, quote
from hashlib import md5
from xml.etree.cElementTree import iterparse
from wikipediaapi import Wikipedia
# --- constants ---------------------------------------------------------------
PATH_RESOURCES = join(dirname(__file__), 'resources')
PATH_LOG = join(PATH_RESOURCES, 'log_%s.txt')
PATH_ALL_TABLES = join(PATH_RESOURCES, 'all_tables.jsonl')
PATTERN_LOG = '[%s] %s\n'
SCRIPT_ADD_RENDER = """
function pathTo(element) {
if (element === document) return ""
var ix = 0
var siblings = element.parentNode.childNodes
for (var i = 0; i < siblings.length; i++) {
if (siblings[i] === element) return pathTo(element.parentNode) + '/' + element.tagName + '[' + (ix + 1) + ']'
if (siblings[i].nodeType === 1 && siblings[i].tagName === element.tagName) ix++
}
}
var removeElements = []
function addRender(subtree) {
var style = getComputedStyle(subtree)
if (subtree.tagName == "TR" && subtree.children.length == 0 || subtree.offsetWidth == undefined || style["display"] == "none" || subtree.tagName == "SUP" && subtree.className == "reference") {
removeElements.push(subtree)
return
}
var serialStyle = ""
for (let prop of style) {
if (prop[0] != "-") {
serialStyle += prop + ":" + style[prop].replace(/:/g, "") + "|"
}
}
serialStyle += "width:" + subtree.offsetWidth / document.body.offsetWidth + "|height:" + subtree.offsetHeight / document.body.offsetHeight
if (subtree.tagName == "TD" || subtree.tagName == "TH") {
serialStyle += "|colspan:" + subtree.colSpan + "|rowspan:" + subtree.rowSpan
}
subtree.setAttribute("data-computed-style", serialStyle)
subtree.setAttribute("data-xpath", pathTo(subtree).toLowerCase())
for (let child of subtree.children) addRender(child)
}
function preprocess() {
var elements = document.querySelectorAll(injected_script_selector)
for (let subtree of elements) addRender(subtree)
for (let elem of removeElements) elem.remove()
}
const injected_script_selector = arguments[0]
if (document.readyState == 'complete') {
preprocess()
} else {
window.onload = function(){preprocess()}
}
"""
# --- import directives -------------------------------------------------------
makedirs(PATH_RESOURCES, exist_ok=True)
try:
stopwords.words("english")
except:
nltk_download('stopwords')
# --- format ------------------------------------------------------------------
def date_stamp():
''' Return the current timestamp. '''
return strftime('%Y-%m-%d, %H:%M:%S')
def bytes_to_human(size, decimal_places=2):
''' Returns a human readable file size from a number of bytes. '''
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']:
if size < 1024: break
size /= 1024
return f'{size:.{decimal_places}f}{unit}B'
def seconds_to_human(seconds):
''' Returns a human readable string from a number of seconds. '''
return str(timedelta(seconds=int(seconds))).zfill(8)
def hashed(text):
''' Returns the md5 hash of a text. Not recommended for security concerns. '''
return md5(text.encode()).hexdigest()
def fname_escape(text):
return sub(r'([^\w\s\.])', lambda x: f'_{ord(x.group())}_', text.replace('_', '_95_'))
def fname_unescape(text):
return sub(r'(_\d+_)', lambda x: chr(int(x.group()[1:-1])), text)
# --- log ---------------------------------------------------------------------
def log(log_name, text):
''' Logs the given text to the log specified, and prints it. '''
text = PATTERN_LOG % (date_stamp(), text)
print('[%s] %s' % (log_name, text), end='')
with open(PATH_LOG % log_name, 'a', encoding='utf-8') as fp:
fp.write(text)
def log_error():
''' Used inside an except sentence, logs the error to the error log. '''
log('error', format_exc())
def cache(target, args, identifier=None, cache_life=3 * 24 * 3600):
''' Run the target function with the given args, and store it to a pickled
cache folder using the given identifier or the name of the function. The
next time it is executed, the cached output is returned unless cache_life
time expires. '''
if identifier == None: identifier = target.__name__
identifier = sub(r'[/\\\*;\[\]\'\":=,<>]', '_', identifier)
path = join(PATH_RESOURCES, f'.pickled/{identifier}.pk')
makedirs(dirname(path), exist_ok=True)
now = time()
if exists(path):
with open(path, 'rb') as fp:
save_time, value = pload(fp)
if now - save_time <= cache_life:
return value
res = target(*args)
with open(path, 'wb') as fp:
pdump((now, res), fp, protocol=3)
return res
# --- network -----------------------------------------------------------------
def download_file(url, path=None, chunk_size=10**5):
''' Downloads a file keeping track of the progress. '''
if path == None: path = url.split('/')[-1]
r = get(url, stream=True)
total_bytes = int(r.headers.get('content-length'))
bytes_downloaded = 0
start = time()
print('Downloading %s (%s)' % (url, bytes_to_human(total_bytes)))
with open(path, 'wb') as fp:
for chunk in r.iter_content(chunk_size=chunk_size):
if not chunk: continue
fp.write(chunk)
bytes_downloaded += len(chunk)
percent = bytes_downloaded / total_bytes
bar = ('█' * int(percent * 32)).ljust(32)
time_delta = time() - start
eta = seconds_to_human((total_bytes - bytes_downloaded) * time_delta / bytes_downloaded)
avg_speed = bytes_to_human(bytes_downloaded / time_delta).rjust(9)
stdout.flush()
stdout.write('\r %6.02f%% |%s| %s/s eta %s' % (100 * percent, bar, avg_speed, eta))
print()
_driver = None
def get_driver(headless=True, disable_images=True, open_links_same_tab=False):
''' Returns a Firefox webdriver, and run one if there is no any active. '''
global _driver
if _driver == None:
opts = Options()
opts.set_preference('dom.ipc.plugins.enabled.libflashplayer.so', 'false')
if open_links_same_tab:
opts.set_preference('browser.link.open_newwindow.restriction', 0)
opts.set_preference('browser.link.open_newwindow', 1)
if headless: opts.set_headless()
if disable_images: opts.set_preference('permissions.default.image', 2)
_driver = Firefox(options=opts)
_driver.set_page_load_timeout(15)
return _driver
def close_driver():
''' Close the current Firefox webdriver, if any. '''
global _driver
if _driver != None:
print('Closing Firefox driver')
_driver.close()
def get_with_render(url, render_selector='table', headless=True, disable_images=True, open_links_same_tab=False):
''' Downloads a page and renders it to return the page source, the width,
and the height in pixels. Elements on the subtree selected using
render_selector contain a data-computed-style attribute and a data-xpath. '''
driver = get_driver(headless, disable_images, open_links_same_tab)
driver.get(url)
driver.execute_script(SCRIPT_ADD_RENDER, render_selector)
sleep(.5)
return driver.page_source
# --- vector ------------------------------------------------------------------
def vectors_average(vectors):
''' Given a list of mixed feature vectors, returns the average of all them.
For numerical features, aritmetic average is used. For categorical ones,
the most common is used. '''
vectors = [v for v in vectors if len(v)]
res = {}
if len(vectors):
for feat in vectors[0]:
if type(vectors[0][feat]) == str:
val = Counter(v[feat] for v in vectors).most_common(1)[0][0]
else:
val = sum(v[feat] for v in vectors) / len(vectors)
res[feat] = val
return res
def vectors_weighted_average(vectors):
''' Given a list of tuples of type <weight, mixed feature vector>, returns
the weighted average of all them. For numerical features, aritmetic average
is used. For categorical ones, weighted frequencies are used to return the
most common. '''
if len(vectors) == 1: return vectors[0][1]
res = {}
total_weight = sum(v[0] for v in vectors)
if total_weight == 0:
total_weight = len(vectors)
for n in range(total_weight):
vectors[n][0] = 1
vectors = [(w / total_weight, fs) for w, fs in vectors]
for f in vectors[0][1]:
if type(vectors[0][1][f]) == str:
sum_feat = {}
for weight, features in vectors:
if features[f] in sum_feat:
sum_feat[features[f]] += weight
else:
sum_feat[features[f]] = weight
res[f] = max(sum_feat.items(), key=lambda v: v[1])[0]
else:
val = 0
for weight, features in vectors:
val += weight * features[f]
res[f] = val
return res
def vectors_difference(v1, v2, prefix=''):
''' Given two mixed feature vectors, return another vector with the
differences amongst them. For numerical features, absolute value difference
is computed. For categorical features, Gower distance is used. '''
res = {}
for feat in v1:
if type(v1[feat]) == str:
res[prefix + feat] = 0 if v1[feat] == v2[feat] else 1
else:
res[prefix + feat] = abs(v1[feat] - v2[feat])
return res
def vector_module(vector):
''' Given a mixed feature vector, return the norm of their numerical
attributes. '''
nums = [v**2 for v in vector.values() if type(v) != str]
return sqrt(sum(nums))
def binarize_categorical(vectors):
''' Given a 2-D list of mixed feature vectors, transform every categorical
feature into a binary one, using the seen values of all the vectors. '''
vectors = deepcopy(vectors)
cat_vector = next([k for k, v in cell.items() if type(v) == str] for row in vectors for cell in row if len(cell))
for f in cat_vector:
values = list(set(cell[f] for row in vectors for cell in row if len(cell)))
for r, row in enumerate(vectors):
for c, cell in enumerate(row):
if len(cell) == 0: continue
for v in values:
vectors[r][c][f'{f}-{v}'] = 1 if v == cell[f] else 0
del vectors[r][c][f]
return vectors
# --- parsing -----------------------------------------------------------------
_find_dates_extractor = DateExtractor()
def find_dates(text):
try:
| |
works on file streams.
Args:
fh: The stream to get the length of.
Returns:
The length of the stream.
"""
pos = fh.tell()
fh.seek(0, 2)
length = fh.tell()
fh.seek(pos, 0)
return length
def GetUserAgent(get_version=sdk_update_checker.GetVersionObject,
get_platform=appengine_rpc.GetPlatformToken,
sdk_product=SDK_PRODUCT):
"""Determines the value of the 'User-agent' header to use for HTTP requests.
If the 'APPCFG_SDK_NAME' environment variable is present, that will be
used as the first product token in the user-agent.
Args:
get_version: Used for testing.
get_platform: Used for testing.
sdk_product: Used as part of sdk/version product token.
Returns:
String containing the 'user-agent' header value, which includes the SDK
version, the platform information, and the version of Python;
e.g., 'appcfg_py/1.0.1 Darwin/9.2.0 Python/2.5.2'.
"""
product_tokens = []
sdk_name = os.environ.get('APPCFG_SDK_NAME')
if sdk_name:
product_tokens.append(sdk_name)
else:
version = get_version()
if version is None:
release = 'unknown'
else:
release = version['release']
product_tokens.append('%s/%s' % (sdk_product, release))
product_tokens.append(get_platform())
python_version = '.'.join(str(i) for i in sys.version_info)
product_tokens.append('Python/%s' % python_version)
return ' '.join(product_tokens)
def GetSourceName(get_version=sdk_update_checker.GetVersionObject):
"""Gets the name of this source version."""
version = get_version()
if version is None:
release = 'unknown'
else:
release = version['release']
return 'Google-appcfg-%s' % (release,)
def _ReadUrlContents(url):
"""Reads the contents of a URL into a string.
Args:
url: a string that is the URL to read.
Returns:
A string that is the contents read from the URL.
Raises:
urllib2.URLError: If the URL cannot be read.
"""
req = urllib2.Request(url)
return urllib2.urlopen(req).read()
class AppCfgApp(object):
"""Singleton class to wrap AppCfg tool functionality.
This class is responsible for parsing the command line and executing
the desired action on behalf of the user. Processing files and
communicating with the server is handled by other classes.
Attributes:
actions: A dictionary mapping action names to Action objects.
action: The Action specified on the command line.
parser: An instance of optparse.OptionParser.
options: The command line options parsed by 'parser'.
argv: The original command line as a list.
args: The positional command line args left over after parsing the options.
error_fh: Unexpected HTTPErrors are printed to this file handle.
Attributes for testing:
parser_class: The class to use for parsing the command line. Because
OptionsParser will exit the program when there is a parse failure, it
is nice to subclass OptionsParser and catch the error before exiting.
read_url_contents: A function to read the contents of a URL.
"""
def __init__(self, argv, parser_class=optparse.OptionParser,
rpc_server_class=None,
out_fh=sys.stdout,
error_fh=sys.stderr,
update_check_class=sdk_update_checker.SDKUpdateChecker,
throttle_class=None,
opener=open,
file_iterator=FileIterator,
time_func=time.time,
wrap_server_error_message=True,
oauth_client_id=APPCFG_CLIENT_ID,
oauth_client_secret=APPCFG_CLIENT_NOTSOSECRET,
oauth_scopes=APPCFG_SCOPES):
"""Initializer. Parses the cmdline and selects the Action to use.
Initializes all of the attributes described in the class docstring.
Prints help or error messages if there is an error parsing the cmdline.
Args:
argv: The list of arguments passed to this program.
parser_class: Options parser to use for this application.
rpc_server_class: RPC server class to use for this application.
out_fh: All normal output is printed to this file handle.
error_fh: Unexpected HTTPErrors are printed to this file handle.
update_check_class: sdk_update_checker.SDKUpdateChecker class (can be
replaced for testing).
throttle_class: A class to use instead of ThrottledHttpRpcServer
(only used in the bulkloader).
opener: Function used for opening files.
file_iterator: Callable that takes (basepath, skip_files, file_separator)
and returns a generator that yields all filenames in the file tree
rooted at that path, skipping files that match the skip_files compiled
regular expression.
time_func: A time.time() compatible function, which can be overridden for
testing.
wrap_server_error_message: If true, the error messages from
urllib2.HTTPError exceptions in Run() are wrapped with
'--- begin server output ---' and '--- end server output ---',
otherwise the error message is printed as is.
oauth_client_id: The client ID of the project providing Auth. Defaults to
the SDK default project client ID, the constant APPCFG_CLIENT_ID.
oauth_client_secret: The client secret of the project providing Auth.
Defaults to the SDK default project client secret, the constant
APPCFG_CLIENT_NOTSOSECRET.
oauth_scopes: The scope or set of scopes to be accessed by the OAuth2
token retrieved. Defaults to APPCFG_SCOPES. Can be a string or
iterable of strings, representing the scope(s) to request.
"""
self.parser_class = parser_class
self.argv = argv
self.rpc_server_class = rpc_server_class
self.out_fh = out_fh
self.error_fh = error_fh
self.update_check_class = update_check_class
self.throttle_class = throttle_class
self.time_func = time_func
self.wrap_server_error_message = wrap_server_error_message
self.oauth_client_id = oauth_client_id
self.oauth_client_secret = oauth_client_secret
self.oauth_scopes = oauth_scopes
self.read_url_contents = _ReadUrlContents
self.parser = self._GetOptionParser()
for action in self.actions.itervalues():
action.options(self, self.parser)
self.options, self.args = self.parser.parse_args(argv[1:])
if len(self.args) < 1:
self._PrintHelpAndExit()
if not self.options.allow_any_runtime:
if self.options.runtime:
if self.options.runtime not in appinfo.GetAllRuntimes():
_PrintErrorAndExit(self.error_fh,
'"%s" is not a supported runtime\n' %
self.options.runtime)
else:
appinfo.AppInfoExternal.ATTRIBUTES[appinfo.RUNTIME] = (
'|'.join(appinfo.GetAllRuntimes()))
if self.options.redundant_oauth2:
print >>sys.stderr, (
'\nNote: the --oauth2 flag is now the default and can be omitted.\n')
action = self.args.pop(0)
def RaiseParseError(actionname, action):
self.parser, self.options = self._MakeSpecificParser(action)
error_desc = action.error_desc
if not error_desc:
error_desc = "Expected a <directory> argument after '%s'." % (
actionname.split(' ')[0])
self.parser.error(error_desc)
if action == BACKENDS_ACTION:
if len(self.args) < 1:
RaiseParseError(action, self.actions[BACKENDS_ACTION])
backend_action_first = BACKENDS_ACTION + ' ' + self.args[0]
if backend_action_first in self.actions:
self.args.pop(0)
action = backend_action_first
elif len(self.args) > 1:
backend_directory_first = BACKENDS_ACTION + ' ' + self.args[1]
if backend_directory_first in self.actions:
self.args.pop(1)
action = backend_directory_first
if len(self.args) < 1 or action == BACKENDS_ACTION:
RaiseParseError(action, self.actions[action])
if action not in self.actions:
self.parser.error("Unknown action: '%s'\n%s" %
(action, self.parser.get_description()))
self.action = self.actions[action]
if not self.action.uses_basepath or self.options.help:
self.basepath = None
else:
if not self.args:
RaiseParseError(action, self.action)
self.basepath = self.args.pop(0)
self.parser, self.options = self._MakeSpecificParser(self.action)
if self.options.help:
self._PrintHelpAndExit()
if self.options.verbose == 2:
logging.getLogger().setLevel(logging.INFO)
elif self.options.verbose == 3:
logging.getLogger().setLevel(logging.DEBUG)
global verbosity
verbosity = self.options.verbose
if self.options.oauth2_client_id:
self.oauth_client_id = self.options.oauth2_client_id
if self.options.oauth2_client_secret:
self.oauth_client_secret = self.options.oauth2_client_secret
self.opener = opener
self.file_iterator = file_iterator
def Run(self):
"""Executes the requested action.
Catches any HTTPErrors raised by the action and prints them to stderr.
Returns:
1 on error, 0 if successful.
"""
try:
self.action(self)
except urllib2.HTTPError, e:
body = e.read()
if self.wrap_server_error_message:
error_format = ('Error %d: --- begin server output ---\n'
'%s\n--- end server output ---')
else:
error_format = 'Error %d: %s'
print >>self.error_fh, (error_format % (e.code, body.rstrip('\n')))
return 1
except yaml_errors.EventListenerError, e:
print >>self.error_fh, ('Error parsing yaml file:\n%s' % e)
return 1
except CannotStartServingError:
print >>self.error_fh, 'Could not start serving the given version.'
return 1
return 0
def _GetActionDescriptions(self):
"""Returns a formatted string containing the short_descs for all actions."""
action_names = self.actions.keys()
action_names.sort()
desc = ''
for action_name in action_names:
if not self.actions[action_name].hidden:
desc += ' %s: %s\n' % (action_name,
self.actions[action_name].short_desc)
return desc
def _GetOptionParser(self):
"""Creates an OptionParser with generic usage and description strings.
Returns:
An OptionParser instance.
"""
def AppendSourceReference(option, opt_str, value, parser):
"""Validates the source reference string and appends it to the list."""
try:
appinfo.ValidateSourceReference(value)
except validation.ValidationError, e:
raise optparse.OptionValueError('option %s: %s' % (opt_str, e.message))
getattr(parser.values, option.dest).append(value)
class Formatter(optparse.IndentedHelpFormatter):
"""Custom help formatter that does not reformat the description."""
def format_description(self, description):
"""Very simple formatter."""
return description + '\n'
class AppCfgOption(optparse.Option):
"""Custom Option for AppCfg.
Adds an 'update' action for storing key-value pairs as a dict.
"""
_ACTION = 'update'
ACTIONS = optparse.Option.ACTIONS + (_ACTION,)
STORE_ACTIONS = optparse.Option.STORE_ACTIONS + (_ACTION,)
TYPED_ACTIONS = optparse.Option.TYPED_ACTIONS + (_ACTION,)
ALWAYS_TYPED_ACTIONS = optparse.Option.ALWAYS_TYPED_ACTIONS + (_ACTION,)
def take_action(self, action, dest, opt, value, values, parser):
if action != self._ACTION:
return optparse.Option.take_action(
self, action, dest, opt, value, values, parser)
try:
key, value = value.split(':', 1)
except ValueError:
raise optparse.OptionValueError(
'option %s: invalid value: %s (must match NAME:VALUE)' % (
opt, value))
values.ensure_value(dest, {})[key] = value
desc = self._GetActionDescriptions()
desc = ('Action must be one of:\n%s'
'Use \'help <action>\' for a detailed description.') % desc
parser = self.parser_class(usage='%prog [options] <action>',
description=desc,
formatter=Formatter(),
conflict_handler='resolve',
option_class=AppCfgOption)
parser.add_option('-h', '--help', action='store_true',
dest='help', help='Show the help message and exit.')
parser.add_option('-q', '--quiet', action='store_const', const=0,
dest='verbose', help='Print errors only.')
parser.add_option('-v', '--verbose', action='store_const', const=2,
dest='verbose', default=1,
help='Print info level logs.')
parser.add_option('--noisy', action='store_const', const=3,
dest='verbose', help='Print all logs.')
parser.add_option('-s', '--server', action='store', dest='server',
default='appengine.google.com',
metavar='SERVER', help='The App Engine server.')
parser.add_option('--secure', action='store_true', dest='secure',
default=True, help=optparse.SUPPRESS_HELP)
parser.add_option('--ignore_bad_cert', action='store_true',
dest='ignore_certs', default=False,
help=optparse.SUPPRESS_HELP)
parser.add_option('--insecure', action='store_false', dest='secure',
help=optparse.SUPPRESS_HELP)
parser.add_option('-e', '--email', action='store', dest='email',
metavar='EMAIL', default=None,
help='The username to use. Will prompt if omitted.')
parser.add_option('-H', '--host', action='store', dest='host',
metavar='HOST', default=None,
| |
<reponame>dbmi-pitt/DIKB-Evidence-analytics
"""Low-level interface to NCBI's EUtils for Entrez search and retrieval.
For higher-level interfaces, see DBIdsClient (which works with a set
of database identifiers) and HistoryClient (which does a much better
job of handling history).
There are five classes of services:
ESearch - search a database
EPost - upload a list of indicies for further use
ESummary - get document summaries for a given set of records
EFetch - get the records translated to a given format
ELink - find related records in other databases
You can find more information about them at
http://www.ncbi.nlm.nih.gov/entrez/query/static/eutils_help.html
but that document isn't very useful. Perhaps the following is better.
EUtils offers a structured way to query Entrez, get the results in
various formats, and get information about related documents. The way
to start off is create an EUtils object.
>>> from Bio import EUtils
>>> from Bio.EUtils.ThinClient import ThinClient
>>> eutils = ThinClient.ThinClient()
>>>
You can search Entrez with the "esearch" method. This does a query on
the server, which generates a list of identifiers for records that
matched the query. However, not all the identifiers are returned.
You can request only a subset of the matches (using the 'retstart' and
'retmax') terms. This is useful because searches like 'cancer' can
have over 1.4 million matches. Most people would rather change the
query or look at more details about the first few hits than wait to
download all the identifiers before doing anything else.
The esearch method, and indeed all these methods, returns a
'urllib.addinfourl' which is an HTTP socket connection that has
already parsed the HTTP header and is ready to read the data from the
server.
For example, here's a query and how to use it
Search in PubMed for the term cancer for the entrez date from the
last 60 days and retrieve the first 10 IDs and translations using
the history parameter.
>>> infile = eutils.esearch("cancer",
... daterange = EUtils.WithinNDays(60, "edat"),
... retmax = 10)
>>>
>>> print infile.read()
<?xml version="1.0"?>
<!DOCTYPE eSearchResult PUBLIC "-//NLM//DTD eSearchResult, 11 May 2002//EN" "http://www.ncbi.nlm.nih.gov/entrez/query/DTD/eSearch_020511.dtd">
<eSearchResult>
<Count>7228</Count>
<RetMax>10</RetMax>
<RetStart>0</RetStart>
<IdList>
<Id>12503096</Id>
<Id>12503075</Id>
<Id>12503073</Id>
<Id>12503033</Id>
<Id>12503030</Id>
<Id>12503028</Id>
<Id>12502932</Id>
<Id>12502925</Id>
<Id>12502881</Id>
<Id>12502872</Id>
</IdList>
<TranslationSet>
<Translation>
<From>cancer%5BAll+Fields%5D</From>
<To>(%22neoplasms%22%5BMeSH+Terms%5D+OR+cancer%5BText+Word%5D)</To>
</Translation>
</TranslationSet>
<TranslationStack>
<TermSet>
<Term>"neoplasms"[MeSH Terms]</Term>
<Field>MeSH Terms</Field>
<Count>1407151</Count>
<Explode>Y</Explode>
</TermSet>
<TermSet>
<Term>cancer[Text Word]</Term>
<Field>Text Word</Field>
<Count>382919</Count>
<Explode>Y</Explode>
</TermSet>
<OP>OR</OP>
<TermSet>
<Term>2002/10/30[edat]</Term>
<Field>edat</Field>
<Count>-1</Count>
<Explode>Y</Explode>
</TermSet>
<TermSet>
<Term>2002/12/29[edat]</Term>
<Field>edat</Field>
<Count>-1</Count>
<Explode>Y</Explode>
</TermSet>
<OP>RANGE</OP>
<OP>AND</OP>
</TranslationStack>
</eSearchResult>
>>>
You get a raw XML input stream which you can process in many ways.
(The appropriate DTDs are included in the subdirectory "DTDs" and see
also the included POM reading code.)
WARNING! As of this writing (2002/12/3) NCBI returns their
XML encoded as Latin-1 but their processing instruction says
it is UTF-8 because they leave out the "encoding" attribute.
Until they fix it you will need to recode the input stream
before processing it with XML tools, like this
import codecs
infile = codecs.EncodedFile(infile, "utf-8", "iso-8859-1")
The XML fields are mostly understandable:
Count -- the total number of matches from this search
RetMax -- the number of <ID> values returned in this subset
RetStart -- the start position of this subset in the list of
all matches
IDList and ID -- the identifiers in this subset
TranslationSet / Translation -- if the search field is not
explicitly specified ("qualified"), then the server will
apply a set of hueristics to improve the query. Eg, in
this case "cancer" is first parsed as
cancer[All Fields]
then turned into the query
"neoplasms"[MeSH Terms] OR cancer[Text Word]
Note that these terms are URL escaped.
For details on how the translation is done, see
http://www.ncbi.nlm.nih.gov/entrez/query/static/help/pmhelp.html#AutomaticTermMapping
TranslationStack -- The (possibly 'improved' query) fully
parsed out and converted into a postfix (RPN) notation.
The above example is written in the Entrez query language as
("neoplasms"[MeSH Terms] OR cancer[Text Word]) AND
2002/10/30:2002/12/29[edat]
Note that these terms are *not* URL escaped. Nothing like
a bit of inconsistency for the soul.
The "Count" field shows how many matches were found for each
term of the expression. I don't know what "Explode" does.
Let's get more information about the first record, which has an id of
12503096. There are two ways to query for information, one uses a set
of identifiers and the other uses the history. I'll talk about the
history one in a bit. To use a set of identifiers you need to make a
DBIds object containing the that list.
>>> dbids = EUtils.DBIds("pubmed", ["12503096"])
>>>
Now get the summary using dbids
>>> infile = eutils.esummary_using_dbids(dbids)
>>> print infile.read()
<?xml version="1.0"?>
<!DOCTYPE eSummaryResult PUBLIC "-//NLM//DTD eSummaryResult, 11 May 2002//EN" "http://www.ncbi.nlm.nih.gov/entrez/query/DTD/eSummary_020511.dtd">
<eSummaryResult>
<DocSum>
<Id>12503096</Id>
<Item Name="PubDate" Type="Date">2003 Jan 30</Item>
<Item Name="Source" Type="String">Am J Med Genet</Item>
<Item Name="Authors" Type="String"><NAME>, <NAME>, <NAME>, <NAME>, <NAME></Item>
<Item Name="Title" Type="String">What do ratings of cancer-specific distress mean among women at high risk of breast and ovarian cancer?</Item>
<Item Name="Volume" Type="String">116</Item>
<Item Name="Pages" Type="String">222-8</Item>
<Item Name="EntrezDate" Type="Date">2002/12/28 04:00</Item>
<Item Name="PubMedId" Type="Integer">12503096</Item>
<Item Name="MedlineId" Type="Integer">22390532</Item>
<Item Name="Lang" Type="String">English</Item>
<Item Name="PubType" Type="String"></Item>
<Item Name="RecordStatus" Type="String">PubMed - in process</Item>
<Item Name="Issue" Type="String">3</Item>
<Item Name="SO" Type="String">2003 Jan 30;116(3):222-8</Item>
<Item Name="DOI" Type="String">10.1002/ajmg.a.10844</Item>
<Item Name="JTA" Type="String">3L4</Item>
<Item Name="ISSN" Type="String">0148-7299</Item>
<Item Name="PubId" Type="String"></Item>
<Item Name="PubStatus" Type="Integer">4</Item>
<Item Name="Status" Type="Integer">5</Item>
<Item Name="HasAbstract" Type="Integer">1</Item>
<Item Name="ArticleIds" Type="List">
<Item Name="PubMedId" Type="String">12503096</Item>
<Item Name="DOI" Type="String">10.1002/ajmg.a.10844</Item>
<Item Name="MedlineUID" Type="String">22390532</Item>
</Item>
</DocSum>
</eSummaryResult>
>>>
This is just a summary. To get the full details, including an
abstract (if available) use the 'efetch' method. I'll only print a
bit to convince you it has an abstract.
>>> s = eutils.efetch_using_dbids(dbids).read()
>>> print s[587:860]
<ArticleTitle>What do ratings of cancer-specific distress mean among women at high risk of breast and ovarian cancer?</ArticleTitle>
<Pagination>
<MedlinePgn>222-8</MedlinePgn>
</Pagination>
<Abstract>
<AbstractText>Women recruited from a hereditary cancer registry provided
>>>
Suppose instead you want the data in a text format. Different
databases have different text formats. For example, PubMed has a
"docsum" format which gives just the summary of a document and
"medline" format as needed for a citation database. To get these, use
a "text" "retmode" ("return mode") and select the appropriate
"rettype" ("return type").
Here are examples of those two return types
>>> print eutils.efetch_using_dbids(dbids, "text", "docsum").read()[:497]
1: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
What do ratings of cancer-specific distress mean among women at high risk of breast and ovarian cancer?
Am J Med Genet. 2003 Jan 30;116(3):222-8.
PMID: 12503096 [PubMed - in process]
>>> print eutils.efetch_using_dbids(dbids, "text", "medline").read()[:369]
UI - 22390532
PMID- 12503096
DA - 20021227
IS - 0148-7299
VI - 116
IP - 3
DP - 2003 Jan 30
TI - What do ratings of cancer-specific distress mean among women at high risk
of breast and ovarian cancer?
PG - 222-8
AB - Women recruited from a hereditary cancer registry provided ratings of
distress associated with different aspects of high-risk status
>>>
It's also possible to get a list of records related to a given
article. This is done through the "elink" method. For example,
here's how to get the list of PubMed articles related to the above
PubMed record. (Again, truncated because otherwise there is a lot of
data.)
>>> print eutils.elink_using_dbids(dbids).read()[:590]
<?xml version="1.0"?>
<!DOCTYPE eLinkResult PUBLIC "-//NLM//DTD eLinkResult, 11 May 2002//EN" "http://www.ncbi.nlm.nih.gov/entrez/query/DTD/eLink_020511.dtd">
<eLinkResult>
<LinkSet>
<DbFrom>pubmed</DbFrom>
<IdList>
<Id>12503096</Id>
</IdList>
<LinkSetDb>
<DbTo>pubmed</DbTo>
<LinkName>pubmed_pubmed</LinkName>
<Link>
<Id>12503096</Id>
<Score>2147483647</Score>
</Link>
<Link>
<Id>11536413</Id>
<Score>30817790</Score>
</Link>
<Link>
<Id>11340606</Id>
<Score>29939219</Score>
</Link>
<Link>
<Id>10805955</Id>
<Score>29584451</Score>
</Link>
>>>
For a change of pace, let's work with the protein database to learn
how to work with history. Suppose I want to do a multiple sequene
alignment of bacteriorhodopsin with all of its neighbors, where
"neighbors" is defined by NCBI. There are good programs for this -- I
just need to get the records in the right format, like FASTA.
The bacteriorhodopsin I'm interested in is BAA75200, which is
GI:4579714, so I'll start by asking for its neighbors.
>>> results = eutils.elink_using_dbids(
... EUtils.DBIds("protein", ["4579714"]),
... db = "protein").read()
>>> print results[:454]
<?xml version="1.0"?>
<!DOCTYPE eLinkResult PUBLIC "-//NLM//DTD eLinkResult, 11 May 2002//EN" "http://www.ncbi.nlm.nih.gov/entrez/query/DTD/eLink_020511.dtd">
<eLinkResult>
<LinkSet>
<DbFrom>protein</DbFrom>
<IdList>
<Id>4579714</Id>
</IdList>
<LinkSetDb>
<DbTo>protein</DbTo>
<LinkName>protein_protein</LinkName>
<Link>
<Id>4579714</Id>
<Score>2147483647</Score>
</Link>
<Link>
<Id>11277596</Id>
<Score>1279</Score>
</Link>
>>>
Let's get all the <Id> fields. (While the following isn't a good way
to parse XML, it is easy to understand and works well enough for this
example.) Note that I remove the first <Id> because that's from the
query and not from the results.
>>> import re
>>> ids = re.findall(r"<Id>(\d+)</Id>", results)
>>> ids = ids[1:]
>>> len(ids)
222
>>> dbids = EUtils.DBIds("protein", ids)
>>>
That's a lot of records. I could use 'efetch_using_dbids' but there's
a problem with that. Efetch uses the HTTP GET protocol to | |
# Script for Da-Tacos cover song identification from Feature Fused Matrices
#Importing
import librosa
import numpy as np
import scipy
from scipy.spatial.distance import pdist, squareform
from scipy.interpolate import interp2d
from scipy.sparse.csgraph import laplacian
from scipy.spatial.distance import directed_hausdorff
from scipy.cluster import hierarchy
from scipy.linalg import eigh
from scipy.ndimage import median_filter
from sklearn.metrics import average_precision_score
from sklearn.preprocessing import normalize
import cv2
from sklearn import metrics
import dill
import sys
import glob
import os
import random
import json
import deepdish as dd
#change matplotlib backend to save rendered plots correctly on linux
import matplotlib as mpl
mpl.use('Agg')
from matplotlib import pyplot as plt
# #--supress warnings--#
# import warnings
# warnings.filterwarnings("ignore")
#---Load metadata---#
with open('/home/ismir/Documents/ISMIR/Datasets/da-tacos/da-tacos_benchmark_subset_metadata.json') as f:
benchmark_metadata = json.load(f)
#---Segmentation parameters---#
rs_size = 128
kmin = 8
kmax = 12
#---Counters---#
count = 0
W_count=0
P_count = 0
#---Loading limits---#
min_covers = 5 #load works for which there are at least min_covers performances
max_covers = 5 #stop loading performances if over max_covers per work
max_works = 15
#---Storage---#
all_sets = []
#all_shapeDNAs = []
all_WP = []
y = []
#for all Works
for W in benchmark_metadata.keys():
if len(benchmark_metadata[W].keys()) >= min_covers: #if it contains at least 5 covers
P_count = 0
#for all performances
for P in benchmark_metadata[W].keys():
P_count += 1
#Computations
try:
SSM = dd.io.load("/home/ismir/Documents/ISMIR/Datasets/da-tacosSSMs/StructureLaplacian_datacos_crema_" + P + ".h5")['WFused']
except:
print("Couldn't load " + P + ".")
continue
N = dd.io.load("/home/ismir/Documents/ISMIR/Datasets/da-tacosSSMs/StructureLaplacian_datacos_crema_" + P + ".h5")['N']
#Construct square matrix from flattened upper triangle
A = np.zeros((N,N))
iN = np.triu_indices(N) #return indices for upper-triangle of (N,N) matrix
for i in range(len(SSM)):
A[iN[0][i]][iN[1][i]] = SSM[i]
B = np.transpose(A)
square_SSM = A+B
#Resample
SSM_ds = cv2.resize(square_SSM, (rs_size,rs_size))
#Compute the Laplacian
L = laplacian(SSM_ds, normed=True)
#Laplacian eigenvalues and eigenvectors
evals, evecs = eigh(L)
# #Shape DNA
# shapeDNA = evals[:30]
# all_shapeDNAs.append(shapeDNA)
#Hierarchical structure
evecs = median_filter(evecs, size=(9, 1))
Cnorm = np.cumsum(evecs**2, axis=1)**0.5
# #temporary replacement for bug
# a_min_value = 3.6934424e-08
# Cnorm[Cnorm == 0.0] = a_min_value
# if (np.isnan(np.sum(Cnorm))):
# print("WOOOOOAH")
dist_set = []
for k in range(kmin, kmax):
X = evecs[:, :k] / Cnorm[:, k-1:k]
distance = squareform(pdist(X, metric='euclidean'))
dist_set.append(distance)
all_sets.append(dist_set)
y.append(W)
#append W and P
all_WP.append([W, P])
#plt.matshow()
#plt.colorbar()
#plt.show()
if (P_count >=max_covers):
break
W_count +=1
sys.stdout.write("\rLoading %i works." % W_count)
sys.stdout.flush()
if (W_count >= max_works):
break
all_sets = np.asarray(all_sets)
file_no = all_sets.shape[0]
# all_shapeDNAs = np.asarray(all_shapeDNAs)
print("\nLoaded Da-TACOS SMMs.")
print("Data shape:", all_sets.shape)
#------------#
#-Formatting-#
#------------#
all_flat = [] #kmin-kmin sets each with a flattened matrix
all_merged = [] #single concatenated vector with all flattened matrices
all_shingled2 = [] #shingle adjacent pairs of flat approoximations
all_shingled3 = [] #shingle adjacent triples of flat approoximations
#traverse songs
for f in range(file_no):
#formatting
flat_approximations = []
merged_approximations = np.empty((0))
for j in range(kmax-kmin):
flat_approximations.append(all_sets[f][j].flatten())
merged_approximations = np.concatenate((merged_approximations, flat_approximations[j]))
all_flat.append(np.asarray(flat_approximations))
all_merged.append(merged_approximations)
#shingling per 2
shingled = []
for j in range(kmax-kmin-1):
#shingled.append(np.array([all_flat[f][j],all_flat[f][j+1]]))
shingled.append(np.concatenate((all_flat[f][j],all_flat[f][j+1]), axis=None))
all_shingled2.append(np.asarray(shingled))
#shingling per 3
shingled = []
for j in range(kmax-kmin-2):
#shingled.append(np.array([all_flat[f][j],all_flat[f][j+1],all_flat[f][j+2]]))
shingled.append(np.concatenate((all_flat[f][j],all_flat[f][j+1],all_flat[f][j+2]), axis=None))
all_shingled3.append(np.asarray(shingled))
#progress
sys.stdout.write("\rFormatted %i/%s approximation sets." % ((f+1), str(file_no)))
sys.stdout.flush()
print('')
all_flat = np.asarray(all_flat)
all_merged = np.asarray(all_merged)
all_shingled2 = np.asarray(all_shingled2)
all_shingled3 = np.asarray(all_shingled3)
#----------------------#
#-Covers vs Non-covers-#
#----------------------#
#True if cover, False if non-cover
covers = np.zeros((len(all_WP), len(all_WP)), dtype=np.bool_)
for i in range(len(all_WP)):
for j in range(len(all_WP)):
if (all_WP[i][0] == all_WP[j][0]):
covers[i][j] = True
else:
covers[i][j] = False
#-----------#
#-Distances-#
#-----------#
fig_dir = '/home/ismir/Documents/ISMIR/figures/datacos/'
#---L1---#
L1_distances = np.zeros((max_works, max_works*max_covers))
for i in range(max_works):
for j in range(max_covers*max_works):
L1_distances[i][j] = np.linalg.norm(all_merged[i*max_covers]-all_merged[j], ord=1)
# #Histogram
# L1_distances_covers = []
# L1_distances_noncovers = []
# for i in range(file_no):
# for j in range(file_no):
# if covers[i][j]:
# if (L1_distances[i][j] != 0):
# L1_distances_covers.append(L1_distances[i][j])
# else:
# L1_distances_noncovers.append(L1_distances[i][j])
# plt.figure()
# plt.hist(L1_distances_covers, bins=200, alpha=0.5, label='Covers', density=1)
# plt.hist(L1_distances_noncovers, bins=200, alpha=0.5, label='Non-covers', density=1)
# plt.title("Histogram of L1 distances between cover and non-cover pairs")
# plt.legend(loc='upper right')
# plt.savefig(fig_dir+'Histogram-L1norm.png')
#Mean position of first hit
all_cvrs = []
hit_positions = []
for i in range(max_works):
d = L1_distances[i]
d = np.argsort(d)
hits = []
cvrs = []
for j in range(max_covers-1):
cvrs.append((i*max_covers)+j+1)
for c in range(len(cvrs)): #traverse covers
hits.append(np.where(d==cvrs[c])[0][0])
hit_positions.append(min(hits))
cvrs.insert(0,cvrs[0]-1)
all_cvrs.append(cvrs)
L1_average_hit = np.mean(hit_positions)
print('L1 mean position of first hit:', L1_average_hit)
#Mean Average Precision
for i in range(max_works):
#get all distances to selected song, normalize [0,1], convert to similarity metric, not dissimilarity
d = 1-(L1_distances[i]/np.linalg.norm(L1_distances[i]))
cr = np.zeros((max_works*max_covers)) #get all cover relationships to selected song
for c in all_cvrs[i]:
cr[c] = 1
mAP = 0
for j in range(max_works):
mAP += average_precision_score(cr, d)
mAP = mAP/float(max_works)
print('L1 mean average precision:', mAP)
#---Frobenius norm---#
fro_distances = np.zeros((max_works, max_works*max_covers))
for i in range(max_works):
for j in range(max_covers*max_works):
fro_distances[i][j] = np.linalg.norm(all_merged[i*max_covers]-all_merged[j])
# #Histogram
# fro_distances_covers = []
# fro_distances_noncovers = []
# for i in range(file_no):
# for j in range(file_no):
# if covers[i][j]:
# if (fro_distances[i][j] != 0):
# fro_distances_covers.append(fro_distances[i][j])
# else:
# fro_distances_noncovers.append(fro_distances[i][j])
# plt.figure()
# plt.hist(fro_distances_covers, bins=200, alpha=0.5, label='Covers', density=1)
# plt.hist(fro_distances_noncovers, bins=200, alpha=0.5, label='Non-covers', density=1)
# plt.title("Histogram of fro distances between cover and non-cover pairs")
# plt.legend(loc='upper right')
# plt.savefig(fig_dir+'Histogram-fronorm.png')
#Mean position of first hit
all_cvrs = []
hit_positions = []
for i in range(max_works):
d = fro_distances[i]
d = np.argsort(d)
hits = []
cvrs = []
for j in range(max_covers-1):
cvrs.append((i*max_covers)+j+1)
for c in range(len(cvrs)): #traverse covers
hits.append(np.where(d==cvrs[c])[0][0])
hit_positions.append(min(hits))
cvrs.insert(0,cvrs[0]-1)
all_cvrs.append(cvrs)
fro_average_hit = np.mean(hit_positions)
print('fro mean position of first hit:', fro_average_hit)
#Mean Average Precision
for i in range(max_works):
#get all distances to selected song, normalize [0,1], convert to similarity metric, not dissimilarity
d = 1-(fro_distances[i]/np.linalg.norm(fro_distances[i]))
cr = np.zeros((max_works*max_covers)) #get all cover relationships to selected song
for c in all_cvrs[i]:
cr[c] = 1
mAP = 0
for j in range(max_works):
mAP += average_precision_score(cr, d)
mAP = mAP/float(max_works)
print('fro mean average precision:', mAP)
#---Sub-sequence Dynamic Time Warping Cost---#
dtw_cost = np.zeros((max_works, max_works*max_covers))
for i in range(max_works):
for j in range(max_covers*max_works):
costs = []
for k in range(kmax-kmin):
costs.append(librosa.sequence.dtw(all_sets[i*max_covers][k], all_sets[j][k], subseq=False, metric='euclidean')[0][rs_size-1,rs_size-1])
dtw_cost[i][j] = sum(costs)/len(costs)
# dtw_cost_covers = []
# dtw_cost_noncovers = []
# for i in range(file_no):
# for j in range(file_no):
# if covers[i][j]:
# if (dtw_cost[i][j] != 0):
# dtw_cost_covers.append(dtw_cost[i][j])
# else:
# dtw_cost_noncovers.append(dtw_cost[i][j])
# plt.figure()
# plt.hist(dtw_cost_covers, bins=200, alpha=0.5, label='Covers', density=1)
# plt.hist(dtw_cost_noncovers, bins=200, alpha=0.5, label='Non-covers', density=1)
# plt.title("Histogram of subsequence DTW cost between cover and non-cover pairs")
# plt.legend(loc='upper right')
# plt.savefig(fig_dir+'Histogram-dtw.png')
#Mean position of first hit
all_cvrs = []
hit_positions = []
for i in range(max_works):
d = dtw_cost[i]
d = np.argsort(d)
hits = []
cvrs = []
for j in range(max_covers-1):
cvrs.append((i*max_covers)+j+1)
for c in range(len(cvrs)): #traverse covers
hits.append(np.where(d==cvrs[c])[0][0])
hit_positions.append(min(hits))
cvrs.insert(0,cvrs[0]-1)
all_cvrs.append(cvrs)
dtw_average_hit = np.mean(hit_positions)
print('dtw mean position of first hit:', dtw_average_hit)
#Mean Average Precision
for i in range(max_works):
#get all distances to selected song, normalize [0,1], convert to similarity metric, not dissimilarity
d = 1-(dtw_cost[i]/np.linalg.norm(dtw_cost[i]))
cr = np.zeros((max_works*max_covers)) #get all cover relationships to selected song
for c in all_cvrs[i]:
cr[c] = 1
mAP = 0
for j in range(max_works):
mAP += average_precision_score(cr, d)
mAP = mAP/float(max_works)
print('dtw mean average precision:', mAP)
#---Directed Hausdorff distance---#
hausdorff_distances = np.zeros((max_works, max_works*max_covers))
for i in range(max_works):
for j in range(max_covers*max_works):
hausdorff_distances[i][j] = (directed_hausdorff(all_flat[i*max_covers], all_flat[j]))[0]
# hausdorff_distances_covers = []
# hausdorff_distances_noncovers = []
# for i in range(file_no):
# for j in range(file_no):
# if covers[i][j]:
# if (hausdorff_distances[i][j] != 0):
# hausdorff_distances_covers.append(hausdorff_distances[i][j])
# else:
# hausdorff_distances_noncovers.append(hausdorff_distances[i][j])
# plt.figure()
# plt.hist(hausdorff_distances_covers, bins=200, alpha=0.5, label='Covers', density=1)
# plt.hist(hausdorff_distances_noncovers, bins=200, alpha=0.5, label='Non-covers', density=1)
# plt.title("Histogram of Hausdorff distances between cover and non-cover pairs")
# plt.legend(loc='upper right')
# plt.savefig(fig_dir+'Histogram-hau.png')
#Mean position of first hit
all_cvrs = []
hit_positions = []
for i in range(max_works):
d = hausdorff_distances[i]
d = np.argsort(d)
hits = []
cvrs = []
for j in range(max_covers-1):
cvrs.append((i*max_covers)+j+1)
for c in range(len(cvrs)): #traverse covers
hits.append(np.where(d==cvrs[c])[0][0])
hit_positions.append(min(hits))
cvrs.insert(0,cvrs[0]-1)
all_cvrs.append(cvrs)
hau_average_hit = np.mean(hit_positions)
print('hau mean position of first hit:', hau_average_hit)
#Mean Average Precision
for i in range(max_works):
#get all distances to selected song, normalize [0,1], convert to similarity metric, not dissimilarity
d = 1-(hausdorff_distances[i]/np.linalg.norm(hausdorff_distances[i]))
cr = np.zeros((max_works*max_covers)) #get all cover relationships to selected song
for c in all_cvrs[i]:
cr[c] = 1
mAP = 0
for j in range(max_works):
mAP += average_precision_score(cr, d)
mAP = mAP/float(max_works)
print('hau mean average precision:', mAP)
#---Minimum distance across all pairs---#
min_distances = np.zeros((max_works, max_works*max_covers))
for i in range(max_works):
for j in range(max_covers*max_works):
dists = []
for n in range(kmax-kmin):
for m in range(kmax-kmin):
dists.append(np.linalg.norm(all_sets[i*max_covers][n]-all_sets[j][m]))
min_distances[i][j] = min(dists)
# min_distances_covers = []
# min_distances_noncovers = []
# for i in range(file_no):
# for j in range(file_no):
# if covers[i][j]:
# if (min_distances[i][j] != 0):
# min_distances_covers.append(min_distances[i][j])
# else:
# min_distances_noncovers.append(min_distances[i][j])
# plt.figure()
# plt.hist(min_distances_covers, bins=200, alpha=0.5, label='Covers', density=1)
# plt.hist(min_distances_noncovers, bins=200, alpha=0.5, label='Non-covers', density=1)
# plt.title("Histogram of min pair distances between cover and non-cover pairs")
# plt.legend(loc='upper right')
# plt.savefig(fig_dir+'Histogram-pair.png')
#Mean position of first hit
all_cvrs = []
hit_positions = []
for i in range(max_works):
d = min_distances[i]
d = np.argsort(d)
hits = []
cvrs = []
for j in range(max_covers-1):
cvrs.append((i*max_covers)+j+1)
for c in range(len(cvrs)): #traverse covers
hits.append(np.where(d==cvrs[c])[0][0])
hit_positions.append(min(hits))
cvrs.insert(0,cvrs[0]-1)
all_cvrs.append(cvrs)
pair_average_hit = np.mean(hit_positions)
print('pair mean position of first hit:', pair_average_hit)
#Mean Average Precision
for i in range(max_works):
#get | |
op2.MixedDataSet(dsets)
assert not op2.MixedDataSet(dsets) != op2.MixedDataSet(dsets)
def test_mixed_dset_ne(self, dset, diterset, dtoset):
"MixedDataSets created from different DataSets should not compare equal."
mds1 = op2.MixedDataSet((dset, diterset, dtoset))
mds2 = op2.MixedDataSet((dset, dtoset, diterset))
assert mds1 != mds2
assert not mds1 == mds2
def test_mixed_dset_ne_dset(self, diterset, dtoset):
"MixedDataSets should not compare equal to a scalar DataSet."
assert op2.MixedDataSet((diterset, dtoset)) != diterset
assert not op2.MixedDataSet((diterset, dtoset)) == diterset
def test_mixed_dset_repr(self, mdset):
"MixedDataSet repr should produce a MixedDataSet object when eval'd."
from pyop2.op2 import Set, DataSet, MixedDataSet # noqa: needed by eval
assert isinstance(eval(repr(mdset)), base.MixedDataSet)
def test_mixed_dset_str(self, mdset):
"MixedDataSet should have the expected string representation."
assert str(mdset) == "OP2 MixedDataSet composed of DataSets: %s" % (mdset._dsets,)
class TestDatAPI:
"""
Dat API unit tests
"""
def test_dat_illegal_set(self):
"Dat set should be DataSet."
with pytest.raises(exceptions.DataSetTypeError):
op2.Dat('illegalset', 1)
def test_dat_illegal_name(self, dset):
"Dat name should be string."
with pytest.raises(exceptions.NameTypeError):
op2.Dat(dset, name=2)
def test_dat_initialise_data(self, dset):
"""Dat initilialised without the data should initialise data with the
correct size and type."""
d = op2.Dat(dset)
assert d.data.size == dset.size * dset.cdim and d.data.dtype == np.float64
def test_dat_initialise_data_type(self, dset):
"""Dat intiialised without the data but with specified type should
initialise its data with the correct type."""
d = op2.Dat(dset, dtype=np.int32)
assert d.data.dtype == np.int32
def test_dat_subscript(self, dat):
"""Extracting component 0 of a Dat should yield self."""
assert dat[0] is dat
def test_dat_illegal_subscript(self, dat):
"""Extracting component 0 of a Dat should yield self."""
with pytest.raises(exceptions.IndexValueError):
dat[1]
def test_dat_arg_default_map(self, dat):
"""Dat __call__ should default the Arg map to None if not given."""
assert dat(op2.READ).map is None
def test_dat_arg_illegal_map(self, dset):
"""Dat __call__ should not allow a map with a toset other than this
Dat's set."""
d = op2.Dat(dset)
set1 = op2.Set(3)
set2 = op2.Set(2)
to_set2 = op2.Map(set1, set2, 1, [0, 0, 0])
with pytest.raises(exceptions.MapValueError):
d(op2.READ, to_set2)
def test_dat_on_set_builds_dim_one_dataset(self, set):
"""If a Set is passed as the dataset argument, it should be
converted into a Dataset with dim=1"""
d = op2.Dat(set)
assert d.cdim == 1
assert isinstance(d.dataset, op2.DataSet)
assert d.dataset.cdim == 1
def test_dat_dtype_type(self, dset):
"The type of a Dat's dtype property should by numpy.dtype."
d = op2.Dat(dset)
assert type(d.dtype) == np.dtype
d = op2.Dat(dset, [1.0] * dset.size * dset.cdim)
assert type(d.dtype) == np.dtype
def test_dat_split(self, dat):
"Splitting a Dat should yield a tuple with self"
for d in dat.split:
d == dat
def test_dat_dtype(self, dset):
"Default data type should be numpy.float64."
d = op2.Dat(dset)
assert d.dtype == np.double
def test_dat_float(self, dset):
"Data type for float data should be numpy.float64."
d = op2.Dat(dset, [1.0] * dset.size * dset.cdim)
assert d.dtype == np.double
def test_dat_int(self, dset):
"Data type for int data should be numpy.int."
d = op2.Dat(dset, [1] * dset.size * dset.cdim)
assert d.dtype == np.int
def test_dat_convert_int_float(self, dset):
"Explicit float type should override NumPy's default choice of int."
d = op2.Dat(dset, [1] * dset.size * dset.cdim, np.double)
assert d.dtype == np.float64
def test_dat_convert_float_int(self, dset):
"Explicit int type should override NumPy's default choice of float."
d = op2.Dat(dset, [1.5] * dset.size * dset.cdim, np.int32)
assert d.dtype == np.int32
def test_dat_illegal_dtype(self, dset):
"Illegal data type should raise DataTypeError."
with pytest.raises(exceptions.DataTypeError):
op2.Dat(dset, dtype='illegal_type')
def test_dat_illegal_length(self, dset):
"Mismatching data length should raise DataValueError."
with pytest.raises(exceptions.DataValueError):
op2.Dat(dset, [1] * (dset.size * dset.cdim + 1))
def test_dat_reshape(self, dset):
"Data should be reshaped according to the set's dim."
d = op2.Dat(dset, [1.0] * dset.size * dset.cdim)
shape = (dset.size,) + (() if dset.cdim == 1 else dset.dim)
assert d.data.shape == shape
def test_dat_properties(self, dset):
"Dat constructor should correctly set attributes."
d = op2.Dat(dset, [1] * dset.size * dset.cdim, 'double', 'bar')
assert d.dataset.set == dset.set and d.dtype == np.float64 and \
d.name == 'bar' and d.data.sum() == dset.size * dset.cdim
def test_dat_iter(self, dat):
"Dat should be iterable and yield self."
for d in dat:
assert d is dat
def test_dat_len(self, dat):
"Dat len should be 1."
assert len(dat) == 1
def test_dat_repr(self, dat):
"Dat repr should produce a Dat object when eval'd."
from pyop2.op2 import Dat, DataSet, Set # noqa: needed by eval
from numpy import dtype # noqa: needed by eval
assert isinstance(eval(repr(dat)), op2.Dat)
def test_dat_str(self, dset):
"Dat should have the expected string representation."
d = op2.Dat(dset, dtype='double', name='bar')
s = "OP2 Dat: %s on (%s) with datatype %s" \
% (d.name, d.dataset, d.data.dtype.name)
assert str(d) == s
def test_dat_ro_accessor(self, dat):
"Attempting to set values through the RO accessor should raise an error."
x = dat.data_ro
with pytest.raises((RuntimeError, ValueError)):
x[0] = 1
def test_dat_ro_write_accessor(self, dat):
"Re-accessing the data in writeable form should be allowed."
x = dat.data_ro
with pytest.raises((RuntimeError, ValueError)):
x[0] = 1
x = dat.data
x[0] = -100
assert (dat.data_ro[0] == -100).all()
def test_dat_lazy_allocation(self, dset):
"Temporary Dats should not allocate storage until accessed."
d = op2.Dat(dset)
assert not d._is_allocated
def test_dat_zero_cdim(self, set):
"A Dat built on a DataSet with zero dim should be allowed."
dset = set**0
d = op2.Dat(dset)
assert d.shape == (set.total_size, 0)
assert d.data.size == 0
assert d.data.shape == (set.total_size, 0)
class TestMixedDatAPI:
"""
MixedDat API unit tests
"""
def test_mixed_dat_illegal_arg(self):
"""Constructing a MixedDat from anything other than a MixedSet, a
MixedDataSet or an iterable of Dats should fail."""
with pytest.raises(exceptions.DataSetTypeError):
op2.MixedDat('illegalarg')
def test_mixed_dat_illegal_dtype(self, set):
"""Constructing a MixedDat from Dats of different dtype should fail."""
with pytest.raises(exceptions.DataValueError):
op2.MixedDat((op2.Dat(set, dtype=np.int), op2.Dat(set)))
def test_mixed_dat_dats(self, dats):
"""Constructing a MixedDat from an iterable of Dats should leave them
unchanged."""
assert op2.MixedDat(dats).split == dats
def test_mixed_dat_dsets(self, mdset):
"""Constructing a MixedDat from an iterable of DataSets should leave
them unchanged."""
assert op2.MixedDat(mdset).dataset == mdset
def test_mixed_dat_upcast_sets(self, mset):
"Constructing a MixedDat from an iterable of Sets should upcast."
assert op2.MixedDat(mset).dataset == op2.MixedDataSet(mset)
def test_mixed_dat_getitem(self, mdat):
"MixedDat should return the corresponding Dat when indexed."
for i, d in enumerate(mdat):
assert mdat[i] == d
assert mdat[:-1] == tuple(mdat)[:-1]
def test_mixed_dat_dim(self, mdset):
"MixedDat dim should return a tuple of the DataSet dims."
assert op2.MixedDat(mdset).dim == mdset.dim
def test_mixed_dat_cdim(self, mdset):
"MixedDat cdim should return a tuple of the DataSet cdims."
assert op2.MixedDat(mdset).cdim == mdset.cdim
def test_mixed_dat_data(self, mdat):
"MixedDat data should return a tuple of the Dat data arrays."
assert all((d1 == d2.data).all() for d1, d2 in zip(mdat.data, mdat))
def test_mixed_dat_data_ro(self, mdat):
"MixedDat data_ro should return a tuple of the Dat data_ro arrays."
assert all((d1 == d2.data_ro).all() for d1, d2 in zip(mdat.data_ro, mdat))
def test_mixed_dat_data_with_halos(self, mdat):
"""MixedDat data_with_halos should return a tuple of the Dat
data_with_halos arrays."""
assert all((d1 == d2.data_with_halos).all() for d1, d2 in zip(mdat.data_with_halos, mdat))
def test_mixed_dat_data_ro_with_halos(self, mdat):
"""MixedDat data_ro_with_halos should return a tuple of the Dat
data_ro_with_halos arrays."""
assert all((d1 == d2.data_ro_with_halos).all() for d1, d2 in zip(mdat.data_ro_with_halos, mdat))
def test_mixed_dat_needs_halo_update(self, mdat):
"""MixedDat needs_halo_update should indicate if at least one contained
Dat needs a halo update."""
assert mdat.halo_valid
mdat[0].halo_valid = False
assert not mdat.halo_valid
def test_mixed_dat_needs_halo_update_setter(self, mdat):
"""Setting MixedDat needs_halo_update should set the property for all
contained Dats."""
assert mdat.halo_valid
mdat.halo_valid = False
assert not any(d.halo_valid for d in mdat)
def test_mixed_dat_iter(self, mdat, dats):
"MixedDat should be iterable and yield the Dats."
assert tuple(s for s in mdat) == dats
def test_mixed_dat_len(self, dats):
"""MixedDat should have length equal to the number of contained Dats."""
assert len(op2.MixedDat(dats)) == len(dats)
def test_mixed_dat_eq(self, dats):
"MixedDats created from the same Dats should compare equal."
assert op2.MixedDat(dats) == op2.MixedDat(dats)
assert not op2.MixedDat(dats) != op2.MixedDat(dats)
def test_mixed_dat_ne(self, dats):
"MixedDats created from different Dats should not compare equal."
mdat1 = op2.MixedDat(dats)
mdat2 = op2.MixedDat(reversed(dats))
assert mdat1 != mdat2
assert not mdat1 == mdat2
def test_mixed_dat_ne_dat(self, dats):
"A MixedDat should not compare equal to a Dat."
assert op2.MixedDat(dats) != dats[0]
assert not op2.MixedDat(dats) == dats[0]
def test_mixed_dat_repr(self, mdat):
"MixedDat repr should produce a MixedDat object when eval'd."
from pyop2.op2 import Set, DataSet, MixedDataSet, Dat, MixedDat # noqa: needed by eval
from numpy import dtype # noqa: needed by eval
assert isinstance(eval(repr(mdat)), base.MixedDat)
def test_mixed_dat_str(self, mdat):
"MixedDat should have the expected string representation."
assert str(mdat) == "OP2 MixedDat composed of Dats: %s" % (mdat.split,)
class TestSparsityAPI:
"""
Sparsity API unit tests
"""
@pytest.fixture
def | |
as error:
LOG.debug('Unmount attempt %(attempt)s failed: %(error)s, '
'retrying unmount NFS share %(share)s mounted '
'at %(mntpoint)s',
{'attempt': attempt, 'error': error,
'share': share, 'mntpoint': mntpoint})
if attempt == attempts:
LOG.error('Failed to unmount NFS share %(share)s '
'mounted at %(mntpoint)s after %(attempt)s '
'attempts: %(error)s',
{'share': share, 'mntpoint': mntpoint,
'attempt': attempt, 'error': error})
raise
greenthread.sleep(DEFAULT_RETRY_DELAY)
else:
LOG.debug('NFS share %(share)s has been unmounted at '
'%(mntpoint)s after %(attempt)s attempts',
{'share': share, 'mntpoint': mntpoint,
'attempt': attempt})
break
self._delete(mntpoint)
def _delete(self, mntpoint):
"""Override parent method for safe remove mountpoint."""
try:
self._execute('rm', '-d', mntpoint, run_as_root=True)
except processutils.ProcessExecutionError as error:
LOG.debug('Failed to remove mountpoint %(mntpoint)s: %(error)s',
{'mntpoint': mntpoint, 'error': error})
else:
LOG.debug('The mountpoint %(mntpoint)s has been removed',
{'mntpoint': mntpoint})
def _mount_volume(self, volume):
"""Ensure that volume is activated and mounted on the host."""
share = self._get_volume_share(volume)
self._ensure_share_mounted(share)
def _unmount_volume(self, volume):
"""Ensure that volume is unmounted on the host."""
try:
share = self._get_volume_share(volume)
except jsonrpc.NmsException as error:
if error.code == 'ENOENT':
return
self._ensure_share_unmounted(share)
def create_export(self, ctxt, volume, connector):
"""Driver entry point to get the export info for a new volume."""
pass
def ensure_export(self, ctxt, volume):
"""Driver entry point to get the export info for an existing volume."""
pass
@coordination.synchronized('{self.nms.lock}-{volume[id]}')
def remove_export(self, ctxt, volume):
"""Driver entry point to remove an export for a volume."""
self._unmount_volume(volume)
def terminate_connection(self, volume, connector, **kwargs):
"""Terminate a connection to a volume.
:param volume: a volume object
:param connector: a connector object
:returns: dictionary of connection information
"""
pass
@coordination.synchronized('{self.nms.lock}-{volume[id]}')
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info.
:param volume: volume reference
:param connector: connector reference
:returns: dictionary of connection information
"""
LOG.debug('Initialize volume connection for %(volume)s',
{'volume': volume['name']})
share = self._get_volume_share(volume)
data = {
'export': share,
'name': 'volume'
}
if self.mount_options:
data['options'] = '-o %s' % self.mount_options
info = {
'driver_volume_type': self.driver_volume_type,
'mount_point_base': self.mount_point_base,
'data': data
}
return info
def _get_bound_host(self, host):
"""Get user@host:port from SSH bindings."""
try:
bindings = self.nms.appliance.ssh_list_bindings()
except jsonrpc.NmsException as error:
LOG.error('Failed to get SSH bindings: %(error)s',
{'error': error})
return None
for user_host_port in bindings:
binding = bindings[user_host_port]
if not (isinstance(binding, list) and len(binding) == 4):
LOG.warning('Skip incompatible SSH binding: %(binding)s',
{'binding': binding})
continue
data = binding[2]
items = data.split(',')
for item in items:
if host == item.strip():
return user_host_port
return None
def _svc_state(self, fmri, state):
retries = DEFAULT_RETRY_COUNT
while retries:
greenthread.sleep(DEFAULT_RETRY_DELAY)
retries -= 1
try:
status = self.nms.autosvc.get_state(fmri)
except jsonrpc.NmsException as error:
LOG.error('Failed to get state of migration '
'service %(fmri)s: %(error)s',
{'fmri': fmri, 'error': error})
continue
if status == 'uninitialized':
continue
elif status == state:
return True
if state == 'online':
method = getattr(self.nms.autosvc, 'enable')
elif state == 'disabled':
method = getattr(self.nms.autosvc, 'disable')
else:
LOG.error('Request unknown service state: %(state)s',
{'state': state})
return False
try:
method(fmri)
except jsonrpc.NmsException as error:
LOG.error('Failed to change state of migration service '
'%(fmri)s to %(state)s: %(error)s',
{'fmri': fmri, 'state': state, 'error': error})
LOG.error('Unable to change state of migration service %(fmri)s '
'to %(state)s: maximum retries exceeded (%(retries)s)',
{'fmri': fmri, 'state': state, 'retries': retries})
return False
def _svc_progress(self, fmri):
"""Get progress for SMF service."""
progress = 0
try:
estimations = self.nms.autosync.get_estimations(fmri)
except jsonrpc.NmsException as error:
LOG.error('Failed to get estimations for migration '
'service %(fmri)s: %(error)s',
{'fmri': fmri, 'error': error})
return progress
size = estimations.get('curt_siz')
sent = estimations.get('curt_sen')
try:
size = float(size)
sent = float(sent)
if size > 0:
progress = int(100 * sent / size)
except (TypeError, ValueError) as error:
LOG.error('Failed to parse estimations statistics '
'%(estimations)s for migration service '
'%(fmri)s: %(error)s',
{'estimations': estimations,
'fmri': fmri, 'error': error})
return progress
def _svc_result(self, fmri):
try:
props = self.nms.autosvc.get_child_props(fmri, '')
except jsonrpc.NmsException as error:
LOG.error('Failed to get properties of migration service '
'%(fmri)s: %(error)s',
{'fmri': fmri, 'error': error})
return False
history = props.get('zfs/run_history')
if not history:
LOG.error('Failed to get history of migration service '
'%(fmri)s: %(props)s',
{'fmri': fmri, 'props': props})
return False
results = history.split()
if len(results) > 1:
LOG.warning('Found unexpected replication sessions for '
'migration service %(fmri)s: %(history)s',
{'fmri': fmri, 'history': history})
latest = results.pop()
start, stop, code = latest.split('::')
try:
start = int(start)
stop = int(stop)
code = int(code)
except (TypeError, ValueError) as error:
LOG.error('Failed to parse history %(history)s for migration '
'service %(fmri)s: %(error)s',
{'history': history, 'fmri': fmri, 'error': error})
return False
delta = stop - start
if code != 1:
LOG.error('Migration service %(fmri)s failed after %(delta)s '
'seconds, please check the service log below',
{'fmri': fmri, 'delta': delta})
return False
LOG.info('Migration service %(fmri)s successfully finished in '
'%(delta)s seconds',
{'fmri': fmri, 'delta': delta})
return True
def _svc_cleanup(self, fmri, migrated=False):
props = None
flags = {
'src_properties': '1',
'dst_properties': '1',
'src_snapshots': '1',
'dst_snapshots': '1'
}
if not migrated:
flags['dst_datasets'] = '1'
try:
props = self.nms.autosvc.get_child_props(fmri, '')
props['zfs/sync-recursive'] = '1'
except jsonrpc.NmsException as error:
LOG.error('Failed to get properties of migration '
'service %(fmri)s: %(error)s',
{'fmri': fmri, 'error': error})
try:
self.nms.autosvc.unschedule(fmri)
except jsonrpc.NmsException as error:
LOG.error('Failed to unschedule migration service '
'%(fmri)s: %(error)s',
{'fmri': fmri, 'error': error})
self._svc_state(fmri, 'disabled')
try:
self.nms.autosvc.destroy(fmri)
except jsonrpc.NmsException as error:
LOG.error('Failed to destroy migration service '
'%(fmri)s: %(error)s',
{'fmri': fmri, 'error': error})
if not props:
return
try:
src_pid, dst_pid = self.nms.autosync.cleanup(props, flags)
except jsonrpc.NmsException as error:
src_pid = dst_pid = 0
LOG.error('Failed to cleanup migration service %(fmri)s: '
'%(error)s',
{'fmri': fmri, 'error': error})
for pid in [src_pid, dst_pid]:
while pid:
try:
self.nms.job.get_jobparams(pid)
except jsonrpc.NmsException as error:
if error.code == 'ENOENT':
break
greenthread.sleep(DEFAULT_RETRY_DELAY)
if migrated:
return
path = props.get('restarter/logfile')
if not path:
return
try:
content = self.nms.logviewer.get_tail(path, units.Mi)
except jsonrpc.NmsException as error:
LOG.error('Failed to get log file content for migration '
'service %(fmri)s: %(error)s',
{'fmri': fmri, 'error': error})
return
log = '\n'.join(content)
LOG.error('Migration service %(fmri)s log: %(log)s',
{'fmri': fmri, 'log': log})
def _migrate_volume(self, volume, host, path):
src_path = self._get_volume_path(volume)
dst_path = posixpath.join(path, volume['name'])
hosts = self._get_host_addresses()
if host in hosts:
dst_host = 'localhost'
service_direction = '0'
service_proto = 'zfs'
if src_path == dst_path:
LOG.info('Skip local to local replication: source '
'volume %(src_path)s and destination volume '
'%(dst_path)s are the same local volume',
{'src_path': src_path, 'dst_path': dst_path})
return True
else:
service_direction = '1'
service_proto = 'zfs+rr'
dst_host = self._get_bound_host(host)
if not dst_host:
LOG.error('Storage assisted volume migration is '
'unavailable: the destination host '
'%(host)s should be SSH bound',
{'host': host})
return False
service_name = '%(prefix)s-%(volume)s' % {
'prefix': self.migration_service_prefix,
'volume': volume['name']
}
comment = 'Migrate %(src)s to %(host)s:%(dst)s' % {
'src': src_path,
'host': dst_host,
'dst': dst_path
}
yesterday = timeutils.utcnow() - datetime.timedelta(days=1)
dst_path = path
rate_limit = 0
if self.migration_throttle:
rate_limit = self.migration_throttle * units.Ki
payload = {
'comment': comment,
'custom_name': service_name,
'from-fs': src_path,
'to-host': dst_host,
'to-fs': dst_path,
'direction': service_direction,
'marker_name': self.migration_snapshot_prefix,
'proto': service_proto,
'day': six.text_type(yesterday.day),
'rate_limit': six.text_type(rate_limit),
'_unique': 'type from-host from-fs to-host to-fs',
'method': 'sync',
'from-host': 'localhost',
'period_multiplier': '1',
'keep_src': '1',
'keep_dst': '1',
'trace_level': '30',
'type': 'monthly',
'nconn': '2',
'period': '12',
'mbuffer_size': '16',
'minute': '0',
'hour': '0',
'flags': '0',
'estimations': '0',
'force': '0',
'reverse_capable': '0',
'sync-recursive': '0',
'auto-clone': '0',
'flip_options': '0',
'direction_flipped': '0',
'retry': '0',
'success_counter': '0',
'dircontent': '0',
'zip_level': '0',
'auto-mount': '0',
'marker': '',
'exclude': '',
'run_history': '',
'progress-marker': '',
'from-snapshot': '',
'latest-suffix': '',
'trunk': '',
'options': ''
}
try:
fmri = self.nms.autosvc.fmri_create('auto-sync', comment,
src_path, 0, payload)
except jsonrpc.NmsException as error:
LOG.error('Failed to create migration service '
'with payload %(payload)s: %(error)s',
{'payload': payload, 'error': error})
return False
if not self._svc_state(fmri, 'online'):
self._svc_cleanup(fmri)
return False
service_running = False
try:
self.nms.autosvc.execute(fmri)
service_running = True
LOG.info('Migration service %(fmri)s successfully started',
{'fmri': fmri})
except jsonrpc.NmsException as error:
LOG.error('Failed to start migration service %(fmri)s: %(error)s',
{'fmri': fmri, 'error': error})
if not service_running:
LOG.error('Migration service %(fmri)s is offline',
{'fmri': fmri})
self._svc_cleanup(fmri)
return False
service_history = None
service_retries = DEFAULT_RETRY_COUNT
service_progress = 0
while service_retries and not service_history:
greenthread.sleep(DEFAULT_RETRY_DELAY)
service_retries -= 1
try:
service_props = self.nms.autosvc.get_child_props(fmri, '')
except jsonrpc.NmsException as error:
LOG.error('Failed to get properties of migration service '
'%(fmri)s: %(error)s',
{'fmri': fmri, 'error': error})
continue
service_history = service_props.get('zfs/run_history')
service_started = service_props.get('zfs/time_started')
if service_started == 'N/A':
continue
progress = self._svc_progress(fmri)
if progress | |
<filename>model/validation.py
import os
import numpy as np
from sklearn.metrics import jaccard_score, precision_score, recall_score, f1_score
from sklearn.model_selection import StratifiedShuffleSplit, StratifiedKFold
from data_processing.sliding_window import apply_sliding_window
from model.DeepConvLSTM import DeepConvLSTM
from model.evaluate import evaluate_participant_scores
from model.train import train, init_optimizer, init_loss
def cross_participant_cv(data, custom_net, custom_loss, custom_opt, args, log_date, log_timestamp):
"""
Method to apply cross-participant cross-validation (also known as leave-one-subject-out cross-validation).
:param data: data used for applying cross-validation
:param custom_net: custom network object
:param custom_loss: custom loss object
:param custom_opt: custom optimizer object
:param args: args object containing all relevant hyperparameters and settings
:param log_date: date information needed for saving
:param log_timestamp: timestamp information needed for saving
:return trained network
"""
print('\nCALCULATING CROSS-PARTICIPANT SCORES USING LOSO CV.\n')
cp_scores = np.zeros((4, args.nb_classes, int(np.max(data[:, 0]) + 1)))
train_val_gap = np.zeros((4, int(np.max(data[:, 0]) + 1)))
all_eval_output = None
orig_lr = args.lr
for i, sbj in enumerate(np.unique(data[:, 0])):
# for i, sbj in enumerate([0, 1]):
print('\n VALIDATING FOR SUBJECT {0} OF {1}'.format(int(sbj) + 1, int(np.max(data[:, 0])) + 1))
train_data = data[data[:, 0] != sbj]
val_data = data[data[:, 0] == sbj]
args.lr = orig_lr
# Sensor data is segmented using a sliding window mechanism
X_train, y_train = apply_sliding_window(train_data[:, :-1], train_data[:, -1],
sliding_window_size=args.sw_length,
unit=args.sw_unit,
sampling_rate=args.sampling_rate,
sliding_window_overlap=args.sw_overlap,
)
X_val, y_val = apply_sliding_window(val_data[:, :-1], val_data[:, -1],
sliding_window_size=args.sw_length,
unit=args.sw_unit,
sampling_rate=args.sampling_rate,
sliding_window_overlap=args.sw_overlap,
)
X_train, X_val = X_train[:, :, 1:], X_val[:, :, 1:]
args.window_size = X_train.shape[1]
args.nb_channels = X_train.shape[2]
# network initialization
if args.network == 'deepconvlstm':
net = DeepConvLSTM(config=vars(args))
elif args.network == 'custom':
net = custom_net
else:
print("Did not provide a valid network name!")
# optimizer initialization
if args.optimizer != 'custom':
opt = init_optimizer(net, args)
elif args.optimizer == 'custom':
opt = custom_opt
else:
print("Did not provide a valid optimizer name!")
# optimizer initialization
if args.loss != 'custom':
loss = init_loss(args)
elif args.loss == 'custom':
loss = custom_loss
else:
print("Did not provide a valid loss name!")
net, val_output, train_output = train(X_train, y_train, X_val, y_val,
network=net, optimizer=opt, loss=loss,
config=vars(args), log_date=log_date, log_timestamp=log_timestamp)
if all_eval_output is None:
all_eval_output = val_output
else:
all_eval_output = np.concatenate((all_eval_output, val_output), axis=0)
# fill values for normal evaluation
cls = np.array(range(args.nb_classes))
cp_scores[0, :, int(sbj)] = jaccard_score(val_output[:, 1], val_output[:, 0], average=None,
labels=cls)
cp_scores[1, :, int(sbj)] = precision_score(val_output[:, 1], val_output[:, 0], average=None,
labels=cls)
cp_scores[2, :, int(sbj)] = recall_score(val_output[:, 1], val_output[:, 0], average=None, labels=cls)
cp_scores[3, :, int(sbj)] = f1_score(val_output[:, 1], val_output[:, 0], average=None, labels=cls)
# fill values for train val gap evaluation
train_val_gap[0, int(sbj)] = jaccard_score(train_output[:, 1], train_output[:, 0], average='macro') - \
jaccard_score(val_output[:, 1], val_output[:, 0], average='macro')
train_val_gap[1, int(sbj)] = precision_score(train_output[:, 1], train_output[:, 0], average='macro') - \
precision_score(val_output[:, 1], val_output[:, 0], average='macro')
train_val_gap[2, int(sbj)] = recall_score(train_output[:, 1], train_output[:, 0], average='macro') - \
recall_score(val_output[:, 1], val_output[:, 0], average='macro')
train_val_gap[3, int(sbj)] = f1_score(train_output[:, 1], train_output[:, 0], average='macro') - \
f1_score(val_output[:, 1], val_output[:, 0], average='macro')
print("SUBJECT {0} VALIDATION RESULTS: ".format(int(sbj) + 1))
print("Accuracy: {0}".format(
jaccard_score(val_output[:, 1], val_output[:, 0], average=None, labels=cls)))
print("Precision: {0}".format(
precision_score(val_output[:, 1], val_output[:, 0], average=None, labels=cls)))
print(
"Recall: {0}".format(recall_score(val_output[:, 1], val_output[:, 0], average=None, labels=cls)))
print("F1: {0}".format(f1_score(val_output[:, 1], val_output[:, 0], average=None, labels=cls)))
evaluate_participant_scores(participant_scores=cp_scores,
gen_gap_scores=train_val_gap,
input_cm=all_eval_output,
class_names=args.class_names,
nb_subjects=int(np.max(data[:, 0]) + 1),
filepath=os.path.join('logs', log_date, log_timestamp),
filename='cross-participant',
args=args
)
return net
def per_participant_cv(data, custom_net, custom_loss, custom_opt, args, log_date, log_timestamp):
"""
Method to apply per-participant cross-validation.
:param data: data used for applying cross-validation
:param custom_net: custom network object
:param custom_loss: custom loss object
:param custom_opt: custom optimizer object
:param args: args object containing all relevant hyperparameters and settings
:param log_date: date information needed for saving
:param log_timestamp: timestamp information needed for saving
:return trained network
"""
print('\nCALCULATING PER-PARTICIPANT SCORES USING STRATIFIED SHUFFLE SPLIT.\n')
pp_scores = np.zeros((4, args.nb_classes, int(np.max(data[:, 0]) + 1)))
all_eval_output = None
train_val_gap = np.zeros((4, int(np.max(data[:, 0]) + 1)))
orig_lr = args.lr
for i, sbj in enumerate(np.unique(data[:, 0])):
print('\n VALIDATING FOR SUBJECT {0} OF {1}'.format(int(sbj) + 1, int(np.max(data[:, 0])) + 1))
sss = StratifiedShuffleSplit(train_size=args.size_sss,
n_splits=args.splits_sss,
random_state=args.seed)
subject_data = data[data[:, 0] == sbj]
# sensor data is segmented using a sliding window mechanism
X, y = apply_sliding_window(subject_data[:, :-1], subject_data[:, -1],
sliding_window_size=args.sw_length,
unit=args.sw_unit,
sampling_rate=args.sampling_rate,
sliding_window_overlap=args.sw_overlap,
)
X = X[:, :, 1:]
classes = np.array(range(args.nb_classes))
subject_accuracy = np.zeros(args.nb_classes)
subject_precision = np.zeros(args.nb_classes)
subject_recall = np.zeros(args.nb_classes)
subject_f1 = np.zeros(args.nb_classes)
subject_accuracy_gap = 0
subject_precision_gap = 0
subject_recall_gap = 0
subject_f1_gap = 0
for j, (train_index, test_index) in enumerate(sss.split(X, y)):
print('SPLIT {0}/{1}'.format(j + 1, args.splits_sss))
X_train, X_val = X[train_index], X[test_index]
y_train, y_val = y[train_index], y[test_index]
args.lr = orig_lr
args.window_size = X_train.shape[1]
args.nb_channels = X_train.shape[2]
# network initialization
if args.network == 'deepconvlstm':
net = DeepConvLSTM(config=vars(args))
elif args.network == 'custom':
net = custom_net
else:
print("Did not provide a valid network name!")
# optimizer initialization
if args.optimizer != 'custom':
opt = init_optimizer(net, args)
elif args.optimizer == 'custom':
opt = custom_opt
else:
print("Did not provide a valid optimizer name!")
# optimizer initialization
if args.loss != 'custom':
loss = init_loss(args)
elif args.loss == 'custom':
loss = custom_loss
else:
print("Did not provide a valid loss name!")
net, val_output, train_output = train(X_train, y_train, X_val, y_val,
network=net, optimizer=opt, loss=loss,
config=vars(args), log_date=log_date, log_timestamp=log_timestamp)
if all_eval_output is None:
all_eval_output = val_output
else:
all_eval_output = np.concatenate((all_eval_output, val_output), axis=0)
subject_accuracy += jaccard_score(val_output[:, 1], val_output[:, 0], average=None,
labels=classes)
subject_precision += precision_score(val_output[:, 1], val_output[:, 0], average=None,
labels=classes)
subject_recall += recall_score(val_output[:, 1], val_output[:, 0], average=None, labels=classes)
subject_f1 += f1_score(val_output[:, 1], val_output[:, 0], average=None, labels=classes)
# add up train val gap evaluation
subject_accuracy_gap += jaccard_score(train_output[:, 1], train_output[:, 0], average='macro') - \
jaccard_score(val_output[:, 1], val_output[:, 0], average='macro')
subject_precision_gap += precision_score(train_output[:, 1], train_output[:, 0], average='macro') - \
precision_score(val_output[:, 1], val_output[:, 0], average='macro')
subject_recall_gap += recall_score(train_output[:, 1], train_output[:, 0], average='macro') - \
recall_score(val_output[:, 1], val_output[:, 0], average='macro')
subject_f1_gap += f1_score(train_output[:, 1], train_output[:, 0], average='macro') - \
f1_score(val_output[:, 1], val_output[:, 0], average='macro')
pp_scores[0, :, int(sbj)] = subject_accuracy / args.splits_sss
pp_scores[1, :, int(sbj)] = subject_precision / args.splits_sss
pp_scores[2, :, int(sbj)] = subject_recall / args.splits_sss
pp_scores[3, :, int(sbj)] = subject_f1 / args.splits_sss
train_val_gap[0, int(sbj)] = subject_accuracy_gap / args.splits_sss
train_val_gap[1, int(sbj)] = subject_precision_gap / args.splits_sss
train_val_gap[2, int(sbj)] = subject_recall_gap / args.splits_sss
train_val_gap[3, int(sbj)] = subject_f1_gap / args.splits_sss
print("SUBJECT {0} VALIDATION RESULTS: ".format(int(sbj)))
print("Accuracy: {0}".format(pp_scores[0, :, int(sbj)]))
print("Precision: {0}".format(pp_scores[1, :, int(sbj)]))
print("Recall: {0}".format(pp_scores[2, :, int(sbj)]))
print("F1: {0}".format(pp_scores[3, :, int(sbj)]))
evaluate_participant_scores(participant_scores=pp_scores,
gen_gap_scores=train_val_gap,
input_cm=all_eval_output,
class_names=args.class_names,
nb_subjects=int(np.max(data[:, 0]) + 1),
filepath=os.path.join('logs', log_date, log_timestamp),
filename='per-participant',
args=args
)
return net
def train_valid_split(train_data, valid_data, custom_net, custom_loss, custom_opt, args, log_date, log_timestamp):
"""
Method to apply normal cross-validation, i.e. one set split into train, validation and testing data.
:param train_data: train features & labels used for applying cross-validation
:param valid_data: validation features & labels used for applying cross-validation
:param custom_net: custom network object
:param custom_loss: custom loss object
:param custom_opt: custom optimizer object
:param args: args object containing all relevant hyperparameters and settings
:param log_date: date information needed for saving
:param log_timestamp: timestamp information needed for saving
:return trained network
"""
print('\nCALCULATING TRAIN-VALID-SPLIT SCORES.\n')
# Sensor data is segmented using a sliding window mechanism
X_train, y_train = apply_sliding_window(train_data[:, :-1], train_data[:, -1],
sliding_window_size=args.sw_length,
unit=args.sw_unit,
sampling_rate=args.sampling_rate,
sliding_window_overlap=args.sw_overlap,
)
X_val, y_val = apply_sliding_window(valid_data[:, :-1], valid_data[:, -1],
sliding_window_size=args.sw_length,
unit=args.sw_unit,
sampling_rate=args.sampling_rate,
sliding_window_overlap=args.sw_overlap,
)
X_train, X_val = X_train[:, :, 1:], X_val[:, :, 1:]
args.window_size = X_train.shape[1]
args.nb_channels = X_train.shape[2]
# network initialization
if args.network == 'deepconvlstm':
net = DeepConvLSTM(config=vars(args))
elif args.network == 'custom':
net = custom_net
else:
print("Did not provide a valid network name!")
# optimizer initialization
if args.optimizer != 'custom':
opt = init_optimizer(net, args)
elif args.optimizer == 'custom':
opt = custom_opt
else:
print("Did not provide a valid optimizer name!")
# optimizer initialization
if args.loss != 'custom':
loss = init_loss(args)
elif args.loss == 'custom':
loss = custom_loss
else:
print("Did not provide a valid loss name!")
net, val_output, train_output = train(X_train, y_train, X_val, y_val,
network=net, optimizer=opt, loss=loss,
config=vars(args), log_date=log_date, log_timestamp=log_timestamp)
cls = np.array(range(args.nb_classes))
print('VALIDATION RESULTS: ')
print("Avg. Accuracy: {0}".format(jaccard_score(val_output[:, 1], val_output[:, 0], average='macro')))
print("Avg. Precision: {0}".format(precision_score(val_output[:, 1], val_output[:, 0], average='macro')))
print("Avg. Recall: {0}".format(recall_score(val_output[:, 1], val_output[:, 0], average='macro')))
print("Avg. F1: {0}".format(f1_score(val_output[:, 1], val_output[:, 0], average='macro')))
print("VALIDATION RESULTS (PER CLASS): ")
print("Accuracy: {0}".format(jaccard_score(val_output[:, 1], val_output[:, 0], average=None, labels=cls)))
print("Precision: {0}".format(precision_score(val_output[:, 1], val_output[:, 0], average=None, labels=cls)))
print("Recall: {0}".format(recall_score(val_output[:, 1], val_output[:, 0], average=None, labels=cls)))
print("F1: {0}".format(f1_score(val_output[:, 1], val_output[:, 0], average=None, labels=cls)))
print("GENERALIZATION GAP ANALYSIS: | |
<filename>OpticalRS/AlbedoIndex.py
# -*- coding: utf-8 -*-
"""
AlbedoIndex
===========
Code for generating a water column corrected image from multispectral imagery.
This is a method of water column correction for habitat mapping. It is based on
Maritorena et al. 1994 and is described in detail in Chapters 4 and 5 of my PhD
thesis (Kibele, In Review).
References
----------
<NAME>. (In Review). Submerged habitats from space: Increasing map production
capacity with new methods and software. University of Auckland. PhD Thesis
<NAME>., <NAME>., <NAME>., 1994. Diffuse Reflectance of Oceanic
Shallow Waters: Influence of Water Depth and Bottom Albedo. Limnology and
Oceanography 39, 1689–1703.
<NAME>., 1987. Radiative transfer in stratified waters: a single-
scattering approximation for irradiance. Applied Optics 26, 4123.
doi:10.1364/AO.26.004123
<NAME>., 1989. Bathymetric mapping with passive multispectral imagery.
Appl. Opt. 28, 1569–1578. doi:10.1364/AO.28.001569
<NAME>., <NAME>., 2003. Integrating in situ reef-top reflectance
spectra with Landsat TM imagery to aid shallow-tropical benthic habitat mapping.
Coral Reefs 23, 5–20. doi:10.1007/s00338-003-0351-0
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., 2010. Using bottom surface reflectance to map coastal marine
areas: a new application method for Lyzenga’s model. International Journal of
Remote Sensing 31, 3051–3064. doi:10.1080/01431160903154341
"""
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
from pylab import subplots
from matplotlib.pyplot import tight_layout
from Const import wv2_center_wavelength, jerlov_Kd
def myR0(z,Rinf,Ad,Kg):
"""
This is the singly scattering irradiance (SSI) model (Philpot 1987) for
irradiance reflectance immediately below the water surface for optically
shallow, homogeneous water (eq. 2 from Philpot 1989). This model is
essentially the same as the one discussed in appendix A of Lyzenga 1978.
I've rearranged it a bit (from eq.2, Philpot 1989) but it's equivalent. This
model is probably best described in Maritorena et al. 1994.
Parameters
----------
z : array-like
Depth of water column.
Rinf : float
Irradiance reflectance of an optically deep water column.
Ad : float or array-like of same size as `z`.
Irradiance reflectance (albedo) of the bottom.
Kg : float
A 2 way effective attenuation coefficient of the water. Really
equivalent to attenuation coefficient (K) times geometric factor (g).
Returns
-------
R(0-) : array of floats
Irradiance reflectance immediately below the water surface.
"""
return Rinf + (Ad - Rinf) * np.exp(-1*Kg*z)
def param_df(zsand, Rsand, p0=None, geometric_factor=2.0):
"""
Estimate the curve parameters using `est_curve_params` and return the
results in a pandas dataframe.
Parameters
----------
zsand : array-like
Depth of water column.
Rsand : array-like
Irradiance reflectance immediately below the water surface or, if you
want to ignore units, atmospheric correction, and whatnot, just
radiance values. This is a single band.
p0 : None, scalar, or N-length sequence, optional
Initial guess for the curve fitting parameters. If None, then the
initial values will all be 1
geometric_factor : float
The geometric factor 'g' used to calculate the attenuation coefficient
(K) from the estimated value for (Kg). For more information see the
docstring for `OpticalRS.ParameterEstimator.geometric_factor`. To
calculate the geometric factor for WorldView-2 imagery, you can use
`OpticalRS.ParameterEstimator.geometric_factor_from_imd`.
Returns
-------
pandas.dataframe
A data frame with columns for 'Rinf', 'Ad', 'Kg', and 'K'. Each row
represents one band of the imagery. Index is by wavelength for
WorldView-2 imagery. Contact the author if you'd like to use this with
some other type of imagery. It wouldn't be hard to change it to be more
general but I don't have time right now and there's a good chance I'll
forget all about it.
"""
if Rsand.ndim > 2:
nbands = Rsand.shape[-1]
else:
nbands = 1
ind = wv2_center_wavelength[:nbands]
params = est_curve_params(zsand, Rsand, p0=p0)
cols = ['Rinf', 'Ad', 'Kg']
paramdf = pd.DataFrame(params, columns=cols, index=ind)
paramdf['K'] = paramdf.Kg / geometric_factor
return paramdf
def est_curve_params(zsand, Rsand, p0=None):
"""
Estimate `Rinf`, `Ad`, and `Kg` given sand depths `zsand` and corresponging
radiances `Rsand`. Estimate is made by curve fitting using
`scipy.optimize.curve_fit`.
Parameters
----------
zsand : array-like
Depth of water column.
Rsand : array-like
Irradiance reflectance immediately below the water surface or, if you
want to ignore units, atmospheric correction, and whatnot, just
radiance values. This is a single band.
p0 : None, scalar, or N-length sequence, optional
Initial guess for the curve fitting parameters. If None, then the
initial values will all be 1
Returns
-------
np.array
A 3 column row of parameters for each band of `Rsand`. Column 1 is the
Rinf values, col 2 is the estAd values, and col 3 is the est_Kg values.
"""
nbands = Rsand.shape[-1]
outlist = []
for i in range(nbands):
params = est_curve_params_one_band(zsand, Rsand[...,i], p0=p0)
outlist.append(params)
return np.array(outlist)
def est_curve_params_one_band(zsand,Rsand,p0=None):
"""
Estimate `Rinf`, `Ad`, and `Kg` given sand depths `zsand` and corresponging
radiances `Rsand`. Estimate is made by curve fitting using
`scipy.optimize.curve_fit`.
Parameters
----------
zsand : array-like
Depth of water column.
Rsand : array-like
Irradiance reflectance immediately below the water surface or, if you
want to ignore units, atmospheric correction, and whatnot, just
radiance values. This is a single band.
p0 : None, scalar, or N-length sequence, optional
Initial guess for the curve fitting parameters. If None, then the
initial values will all be 1
Returns
-------
estRinf : float
Estimated irradiance reflectance of an optically deep water column.
estAd : float
Estimated bottom albedo for `Rsand`.
est_Kg : float
Estimated 2 way effective attenuation coefficient of the water. Really
equivalent to attenuation coefficient (K) times geometric factor (g).
Notes
-----
`curve_fit` was failing to find a solution when the image array (`Rsand`)
had a dtype of 'float64'. I don't really understand why that was a problem
but explicitly casting the arrays to 'float32' seems to work. `curve_fit`
uses `leastsq` which is a wrapper aound `MINPACK` which was writtin in
Fortran a long time ago so, for now, it'll have to remain a mystery.
"""
if np.ma.is_masked(zsand):
zsand = zsand.compressed()
if np.ma.is_masked(Rsand):
Rsand = Rsand.compressed()
p, pcov = curve_fit(myR0,zsand.astype('float32'),Rsand.astype('float32'),p0)
estRinf, estAd, est_Kg = p
return estRinf, estAd, est_Kg
def estAd_single_band(z,L,Rinf,Kg):
"""
Estimate the albedo `Ad` for radiance `L` at depth `z` assuming `Rinf` and
`Kg`. This method assumes that L is a single band and will return estimated
Ad (albedo index) values for that single band.
Parameters
----------
z : array-like
Depth of water column.
L : array-like
Irradiance reflectance immediately below the water surface or, if you
want to ignore units, atmospheric correction, and whatnot, just
radiance values. This is a single band.
Rinf : float
Irradiance reflectance of an optically deep water column.
Kg : float
A 2 way effective attenuation coefficient of the water. Really
equivalent to attenuation coefficient (K) times geometric factor (g).
Returns
-------
Ad : float or array-like of same size as `z`.
Irradiance reflectance (albedo) of the bottom.
"""
Ad = (L - Rinf + Rinf * np.exp(-1*Kg*z)) / np.exp(-1*Kg*z)
return Ad
def estAd(z,L,Rinf,Kg):
"""
Estimate the albedo `Ad` for radiance `L` at depth `z` assuming `Rinf` and
`Kg`.
Parameters
----------
z : array-like
Depth of water column.
L : array-like
Irradiance reflectance immediately below the water surface or, if you
want to ignore units, atmospheric correction, and whatnot, just
radiance values. Shape: (rows, columns, bands)
Rinf : float
Irradiance reflectance of an optically deep water column.
Kg : float
A 2 way effective attenuation coefficient of the water. Really
equivalent to attenuation coefficient (K) times geometric factor (g).
Returns
-------
Ad : float or array-like of same shape as `L`.
Irradiance reflectance (albedo) of the bottom in each band.
"""
nbands = L.shape[-1]
Rinf = Rinf[:nbands]
Kg = Kg[:nbands]
z = np.repeat(np.atleast_3d(z), nbands, axis=2)
Ad = (L - Rinf + Rinf * np.exp(-1*Kg*z)) / np.exp(-1*Kg*z)
return Ad
def surface_reflectance_correction(imarr, nir_bands=[6,7]):
nbands = imarr.shape[-1]
nbandsvisible = nbands - len(nir_bands)
nir_mean = imarr[...,nir_bands].mean(2)
sbtrct = np.repeat(np.atleast_3d(nir_mean), nbandsvisible, axis=2)
corrected = imarr[...,:nbandsvisible] - sbtrct
return corrected
def surface_refraction_correction(imarr):
return imarr * 0.54
## Visualization #############################################################
def albedo_parameter_plots(imarr, darr, params=None, plot_params=True,
ylabel='Reflectance', visible_only=True,
figsize=(12,7)):
# from matplotlib import style
# style.use('ggplot')
if params is None:
params = est_curve_params(darr, imarr)
if visible_only:
fig, axs = subplots(2, 3, figsize=figsize, sharey=False, sharex=True)
else:
fig, axs = subplots(2, 4, figsize=figsize, sharey=False, sharex=True)
for i, ax in enumerate(axs.ravel()):
if i >= imarr.shape[-1]:
# This means | |
import tkinter as tk
from tkinter import ttk
from tkinter.messagebox import showinfo
import random
from Color import color
# Clearer Ui using ctypes
import ctypes
ctypes.windll.shcore.SetProcessDpiAwareness(1)
class SAVApp(tk.Tk):
def __init__(self):
super().__init__()
self.array = []
self.no_of_items = tk.IntVar()
self.no_of_items.set(100)
self.switch = tk.BooleanVar()
self.switch.set(False)
self.algo_name = tk.StringVar()
self.algo_list = ['Quick Sort', 'Merge Sort', "Odd Even Sort", 'Bubble Sort', 'Selection Sort', "Insertion Sort", 'Bogo Sort']
self.speed_name = tk.StringVar()
self.speed_list = ["Real-Time", 'Fast', 'Medium', 'Slow', 'Slowest']
self.timespeed = 10
# configure the root window
self.title('Sorting Algorithms Visualizer')
self.geometry('980x710')
self.resizable(0,0)
self.config(bg = color["LIGHT_GRAY"])
self.grid_columnconfigure(0, weight = 1)
self.grid_rowconfigure(0, weight = 1)
self.controller_ui = tk.Frame(self, bg=color["WHITE"])
self.controller_ui.grid(row=0, column=0, padx = 10, pady = 5)
self.controller_ui.grid_columnconfigure(0, weight = 1)
self.algo_ui = tk.Label(self.controller_ui, text = 'Algorithms: ', bg = color['WHITE'])
self.algo_ui.grid(row=0, column=0, padx=12 , pady=10, sticky = tk.W)
self.algo_menu = ttk.Combobox(self.controller_ui, textvariable = self.algo_name, values = self.algo_list)
self.algo_menu.grid(row = 0, column = 2, padx = 12, pady = 10)
self.algo_menu.current(0)
self.speed_ui = tk.Label(self.controller_ui, text = 'Speed:', bg = color['WHITE'])
self.speed_ui.grid(row = 1, column = 0, padx = 12, pady = 10, sticky = tk.W)
self.speed_menu = ttk.Combobox(self.controller_ui, textvariable = self.speed_name, values = self.speed_list)
self.speed_menu.grid(row = 1, column = 2, padx = 12, pady = 10)
self.speed_menu.current(1)
self.no_of_columns_label = tk.Label(self.controller_ui, text = 'Number of Columns:', bg = color['WHITE'])
self.no_of_columns_label.grid(row = 2, column = 0, padx = 12, pady = 10, sticky = tk.W)
self.no_of_columns_entry = tk.Entry(self.controller_ui, textvariable = self.no_of_items, bg = color["WHITE"],width = 23)
self.no_of_columns_entry.grid(row = 2, column = 2, padx = 12, pady = 10, sticky = tk.W)
self.generate_Button = tk.Button(self.controller_ui, text = 'Generate', command = self.generate, bg = color['WHITE'], width = 14)
self.generate_Button.grid(row = 3, column = 0, padx = 12, pady = 10)
self.sort_Button = tk.Button(self.controller_ui, text = 'Sort', command = self.switch_on, bg = color['WHITE'], width = 14)
self.sort_Button.grid(row = 3, column = 1, padx = 12, pady = 10)
self.stop_Button = tk.Button(self.controller_ui, text = 'Stop', command = self.stop, bg = color['WHITE'], width = 14)
self.stop_Button.grid(row = 3, column = 2, padx = 12, pady = 10)
self.exit_Button = tk.Button(self.controller_ui, text = 'Exit', command = exit, bg = color['WHITE'], background = color['RED'], fg = color['WHITE'], width = 14)
self.exit_Button.grid(row = 3, column = 3, padx = 12, pady = 10)
self.visual_Canvas = tk.Canvas(self, width = 980, height = 480, bg = color['WHITE'])
self.visual_Canvas.grid(row = 4 , column = 0, padx = 10, pady = (0, 10))
# <--- Functions/definations --->
def drawArray (self, colorArray):
self.update()
if(self.switch == False):
return
self.visual_Canvas.delete("all")
canvas_width = 960
canvas_height = 480
x_width = canvas_width / (len(self.array) + 1)
offset = 4
spacing = 2
normalized_array = [i / max(self.array) for i in self.array]
for i, height in enumerate(normalized_array):
if(self.switch == False):
return
x0 = i * x_width + offset + spacing
y0 = canvas_height - height * 470
x1 = (i + 1) * x_width + offset
y1 = canvas_height
self.visual_Canvas.create_rectangle(x0, y0, x1, y1, fill = colorArray[i])
self.update_idletasks()
def generate(self):
self.update()
array_size = self.no_of_items.get()
self.array = []
for col in range(0, array_size):
randomval = random.randint(0, 200)
self.array.append(randomval)
self.drawArray([color['BLUE'] for x in range(len(self.array))])
def setspeed(self):
speed = self.speed_menu.get()
if speed == 'Slowest':
return 1000
elif speed == 'Slow':
return 500
elif speed == 'Medium':
return 100
elif speed == 'Fast':
return 10
else:
return 0
def sort(self):
self.timespeed = self.setspeed()
algo = self.algo_menu.get()
if algo == 'Bubble Sort':
self.bubble_sort()
elif algo == 'Merge Sort':
self.merge_sort(0, len(self.array) - 1, )
elif algo == 'Bogo Sort':
self.bogo_sort()
elif algo == 'Selection Sort':
self.selection_sort()
elif algo == 'Insertion Sort':
self.insertion_sort()
elif algo == 'Quick Sort':
self.quick_sort(0, len(self.array) - 1, )
elif algo == 'Odd Even Sort':
self.odd_even_sort()
def switch_on(self):
self.switch = True
self.sort()
def stop(self):
self.switch = False
def exit(self):
self.destroy()
def odd_even_sort(self):
isSorted = 0
while isSorted == 0:
isSorted = 1
for i in range(1, len(self.array) - 1, 2):
self.update()
if(self.switch == False):
return
if self.array[i] > self.array[i + 1]:
self.array[i], self.array[i + 1] = self.array[i + 1], self.array[i]
isSorted = 0
self.after(self.timespeed, self.drawArray([color['ORANGE'] if x == i else color["YELLOW"] if x == i + 1
else color["BLUE"] for x in range(len(self.array))]))
for i in range(0, len(self.array) - 1, 2):
self.update()
if(self.switch == False):
return
if self.array[i] > self.array[i + 1]:
self.array[i], self.array[i + 1] = self.array[i + 1], self.array[i]
isSorted = 0
self.after(self.timespeed, self.drawArray([color['ORANGE'] if x == i else color["YELLOW"] if x == i + 1
else color["BLUE"] for x in range(len(self.array))]))
self.drawArray([color['BLUE'] for x in range(len(self.array))])
def bubble_sort(self):
size = len(self.array)
for i in range(size - 1):
for j in range(size - i - 1):
self.update()
if(self.switch == False):
return
if self.array[j] > self.array[j + 1]:
self.array[j], self.array[j + 1] = self.array[j + 1], self.array[j]
self.after(self.timespeed, self.drawArray([color['ORANGE'] if x == j
else color["YELLOW"] if x == j + 1
else color["BLUE"] for x in range(len(self.array))]))
self.drawArray([color['BLUE'] for x in range(len(self.array))])
def bogo_sort(self):
size = len(self.array)
self.update()
while(self.is_sorted() == False):
self.shuffle()
def is_sorted(self):
size = len(self.array)
self.update()
for i in range (0, size - 1):
if(self.array[i] > self.array[i - 1]):
return False
return True
def shuffle(self):
size = len(self.array)
self.update()
for i in range (0, size):
self.update()
if(self.switch == False):
return
r = random.randint(0, size - 1)
self.array[i], self.array[r] = self.array[r], self.array[i]
self.after(self.timespeed, self.drawArray([color["ORANGE"] if x == r
else color['BLUE'] for x in range(len(self.array))]))
self.drawArray([color['BLUE'] for x in range(len(self.array))])
def quick_sort(self, start, end):
self.update()
if(self.switch == False):
return
if start < end :
self.update()
if(self.switch == False):
return
pivot = self.partition( start, end)
self.after(self.timespeed, self.drawArray([color["PURPLE"] if x >= start
and x < pivot else color['ORANGE'] if x == pivot
else color['YELLOW'] if x > pivot and x <= end
else color['BLUE'] for x in range(len(self.array))]))
self.quick_sort(start, pivot - 1)
self.update()
if(self.switch == False):
return
self.quick_sort(pivot + 1, end)
self.after(self.timespeed, self.drawArray([color["PURPLE"] if x >=start
and x < pivot else color['ORANGE'] if x == pivot
else color['YELLOW'] if x > pivot and x<=end
else color['BLUE'] for x in range(len(self.array))]))
self.drawArray([color['BLUE'] for x in range(len(self.array))])
def partition(self, start, end):
self.update()
if(self.switch == False):
return
pivot = end
pi = self.array[end]
i = start - 1
for j in range(start, end):
self.update()
if(self.switch == False):
return
if self.array[j] < pi:
i += 1
self.array[i], self.array[j] = self.array[j], self.array[i]
self.array[i + 1], self.array[pivot] = self.array[pivot], self.array[i + 1]
return i + 1
def insertion_sort(self):
self.update()
size = len(self.array)
for i in range(1, size):
key = self.array[i]
j = i - 1
while key < self.array[j] and j >= 0:
self.update()
if(self.switch == False):
return
self.array[j + 1] = self.array[j]
j -= 1
self.after(self.timespeed, self.drawArray([color['ORANGE'] if x == i else color["YELLOW"] if x == j
else color["BLUE"] for x in range(len(self.array))]))
self.array[j + 1] = key
self.drawArray([color['BLUE'] for x in range(len(self.array))])
def selection_sort(self):
for i in range(len(self.array) - 1):
min_index = i
for j in range(i + 1, len(self.array)):
self.update()
if(self.switch == False):
return
if self.array[j] < self.array[min_index]:
min_index = j
self.drawArray([color['PURPLE'] if x == j
else color['ORANGE'] if x == i
else color['YELLOW'] if x == min_index
else color['BLUE'] for x in range(len(self.array))])
self.drawArray([color['ORANGE'] if x == i
else color['YELLOW'] if x == min_index
else color["BLUE"] for x in range(len(self.array))])
self.array[i], self.array[min_index] = self.array[min_index], self.array[i]
self.after(self.timespeed, self.drawArray([color['YELLOW'] if x == i
else color['ORANGE'] if x == min_index
else color["BLUE"] for x in range(len(self.array))]))
self.drawArray([color['BLUE'] for x in range(len(self.array))])
def merge_sort(self, start, end):
self.update()
if(self.switch == False):
return
if start<end:
mid = int((start + end) / 2)
self.after(self.timespeed, self.drawArray([color["PURPLE"] if x >=start
and x < mid else color['ORANGE'] if x == mid
else color['YELLOW'] if x > mid and x <= end
else color['BLUE'] for x in range(len(self.array))]))
self.merge_sort(start, mid)
self.update()
if(self.switch == False):
return
self.merge_sort(mid + 1, | |
<filename>python_scripts/run_compute_precision_recall.py<gh_stars>1-10
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2015--, <NAME>.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
"""
This script computes the true-positive (TP), false-positive (FP),
false-negative (FN), precision, recall, F-measure, FP-chimeric
(taxa fully comprising of chimeric OTUs), FP-known (taxa fully
comprising of OTUs mapping to BLAST's NT database with >=97% id
and coverage and FP-other (taxa fully comprising of OTUs mapping
to BLAST's NT database with <97% id and coverage.
Dependencies: QIIME 1.9.0, BIOM-format >=2.1.3, <2.2.0, BLAST 2.2.29+,
Blast NT database indexed, USEARCH Uchime (7.0.1090)
usage: python run_compute_precision_recall.py [16S, 18S] study \
taxonomy_level expected_taxa.txt
"""
import sys
import os
from biom import load_table
from subprocess import Popen, PIPE
from skbio.parse.sequences import parse_fasta
import copy
import numpy as np
import matplotlib.pyplot as plt
import brewer2mpl
def graph_abundance_func(true_positive_otus,
false_positive_known_otus,
false_positive_other_otus,
false_positive_chimeric_otus,
datatype,
tool,
study,
method,
results_dir,
taxonomy_mean,
taxonomy_stdev):
'''Function to build taxonomy_mean and taxonomy_stdev dictionaries for
storing TP, FP-chimeric, FP-known and FP-other mean number of reads
and stdev. Accessed once per gene/method/tool/study. At the end of main(),
these dictionaries are written to files which can be passed to
OTU-picking/python_scripts/plot_tp_fp_distribution.py to generate
graphs.
'''
total_taxa = len(false_positive_known_otus) + len(false_positive_other_otus) \
+ len(false_positive_chimeric_otus)
if (method == "de_novo" and (tool == "uparse_q3" or tool == "uparse_q16")):
otu_map_f = os.path.join(
results_dir, datatype, method, "%s_%s" % (tool, study), "seqs_otus.txt")
elif (method == "closed_ref" and (tool == "uclust" or tool == "usearch" or tool == "usearch61")):
otu_map_f = os.path.join(
results_dir, datatype, method, "%s_%s" % (tool, study), "%s_ref_picked_otus" % tool, "seqs_otus.txt")
elif method == "open_ref":
otu_map_f = os.path.join(
results_dir, datatype, method, "%s_%s" % (tool, study), "final_otu_map_mc2.txt")
else:
otu_map_f = os.path.join(
results_dir, datatype, method, "%s_%s" % (tool, study), "%s_picked_otus" % tool, "seqs_otus.txt")
# load OTU map into dict
otu_map_dict = {}
with open (otu_map_f, 'U') as otu_map_fp:
for line in otu_map_fp:
line = line.strip().split("\t")
if line[0] not in otu_map_dict:
otu_map_dict[line[0]] = line[1:]
else:
print "ERROR: %s is already in dict" % line[0]
exit(1)
if tool not in taxonomy_mean:
taxonomy_mean[tool] = []
if tool not in taxonomy_stdev:
taxonomy_stdev[tool] = []
# TP
total_reads = []
for taxa in true_positive_otus:
t = 0
for otu_id in true_positive_otus[taxa]:
if otu_id in otu_map_dict:
t += len(otu_map_dict[otu_id])
total_reads.append(t)
arr = np.array(total_reads)
taxonomy_mean[tool].append(np.rint(np.nan_to_num(np.mean(arr, axis=0))))
taxonomy_stdev[tool].append(np.rint(np.nan_to_num(np.std(arr, axis=0))))
# FP-known
total_reads = []
for taxa in false_positive_known_otus:
t = 0
for otu_id in false_positive_known_otus[taxa]:
if otu_id in otu_map_dict:
t += len(otu_map_dict[otu_id])
total_reads.append(t)
arr = np.array(total_reads)
taxonomy_mean[tool].append(np.rint(np.nan_to_num(np.mean(arr, axis=0))))
taxonomy_stdev[tool].append(np.rint(np.nan_to_num(np.std(arr, axis=0))))
# FP-other
total_reads = []
for taxa in false_positive_other_otus:
t = 0
for otu_id in false_positive_other_otus[taxa]:
if otu_id in otu_map_dict:
t += len(otu_map_dict[otu_id])
total_reads.append(t)
arr = np.array(total_reads)
taxonomy_mean[tool].append(np.rint(np.nan_to_num(np.mean(arr, axis=0))))
taxonomy_stdev[tool].append(np.rint(np.nan_to_num(np.std(arr, axis=0))))
# FP-chimeric
total_reads = []
for taxa in false_positive_chimeric_otus:
t = 0
for otu_id in false_positive_chimeric_otus[taxa]:
if otu_id in otu_map_dict:
t += len(otu_map_dict[otu_id])
total_reads.append(t)
arr = np.array(total_reads)
taxonomy_mean[tool].append(np.rint(np.nan_to_num(np.mean(arr, axis=0))))
taxonomy_stdev[tool].append(np.rint(np.nan_to_num(np.std(arr, axis=0))))
def compute_fp_other(results_dir,
out_dir,
filter_otus_dir,
taxonomy_mean,
taxonomy_stdev,
blast_nt_index,
actual_tax,
expected_tax,
tool,
study,
datatype,
method,
tax_level,
chimera_db_18S=None,
chimera_db_16S=None,
threads="1"):
'''This function completes the following steps,
1. Load original OTU table (excl. singleton OTUs) into dict with taxonomies as
keys and OTUs representing them as values in a list (only L5 and L6 supported)
2. Load all true-positive taxonomies as keys in a dict and all OTUs representing
them (known from dict in 1.) as values in a list
3. Load all false-positive taxonomies as keys in a dict and all OTUs representing
them (known from dict in 1.) as values in a list
4. Using results from 3. output all false-positive OTUs to a FASTA file and
run UCHIME chimera filter on them
5. Remove all chimeric OTUs from 3., if all OTUs representing a taxa have
been removed, count this taxa chimeric (FP-chimeric)
6. Write all non-chimeric OTUs to a file and run MEGABLAST on them against
BLAST's NT database
7. Remove all OTUs from 3. if mapping with >=97% identity and coverage to BLAST's
NT database. If all OTUs representing a taxa have been removed, count
this taxa as a known species (FP-known)
8. The remaining taxa in 3. comprise of OTUs mapping with <97% identity and coverage
to BLAST's NT database (FP-other)
9. Pass the true-positive dict, FP-chimeric dict, FP-known dict and FP-other dict
to graph_abundance_func() to compute the mean number of reads representing each
taxa in each of those dicts.
'''
if datatype == "16S" and chimera_db_16S is None:
raise ValueError("A chimera database for 16S must be passed")
elif datatype == "18S" and chimera_db_18S is None:
raise ValueError("A chimera database for 18S must be passed")
fp_chimera = 0
fp_known = 0
fp_other = 0
# load taxonomies from OTU table as keys into dictionary,
# and all OTU ids that share that taxonomy as values in a list
otu_table_dict = {}
biom_table_f = os.path.join(
filter_otus_dir, datatype, method, "%s_%s" % (tool, study), "otu_table_mc2.biom")
if not os.path.exists(biom_table_f):
print "%s does not exist, cannot search for contaminants" % biom_table_f
else:
#print "loading %s" % biom_table_f
biom_table = load_table(biom_table_f)
obs_ids_list = biom_table._observation_ids
for obs_id in obs_ids_list:
obs_data = biom_table.metadata(obs_id,'observation')
# taxonomy must be up to genus level (L6)
if tax_level == "L6":
if len(obs_data['taxonomy']) == 7:
del obs_data['taxonomy'][-1]
# taxonomy must be up to family level (L5)
elif tax_level == "L5":
if len(obs_data['taxonomy']) > 5:
obs_data['taxonomy'] = obs_data['taxonomy'][0:5]
else:
print "ERROR: taxonomy level %s not supported" % tax_level
exit(1)
assignment = ";".join(obs_data['taxonomy'])
if assignment not in otu_table_dict:
otu_table_dict[assignment] = [obs_id]
else:
otu_table_dict[assignment].append(obs_id)
# collect all OTUs representing summarized taxa true positive matches
# (this info is for the graph)
true_positive_otus = {}
lis_tp = actual_tax & expected_tax
for l in lis_tp:
if l not in otu_table_dict:
print "Error: TP %s not in OTU table" % l
exit(1)
else:
if l not in true_positive_otus:
true_positive_otus[l] = otu_table_dict[l]
else:
true_positive_otus[l].extend(otu_table_dict[l])
# collect all OTUs representing summarized taxa false positive matches
false_positive_otus = {}
lis = actual_tax - expected_tax
for l in lis:
if ";Other" in l:
l = l.replace(";Other", "")
if l not in otu_table_dict:
print "Error: FP %s not in OTU table" % l
exit(1)
else:
if l not in false_positive_otus:
false_positive_otus[l] = otu_table_dict[l]
else:
false_positive_otus[l].extend(otu_table_dict[l])
#print "Total true positive taxa = ", len(true_positive_otus)
#print "Total false positive taxa = ", len(false_positive_otus)
false_positive_otus_count = 0
for s in false_positive_otus:
false_positive_otus_count += len(false_positive_otus[s])
#print "Total false positive otus = ", false_positive_otus_count
true_positive_otus_count = 0
for s in true_positive_otus:
true_positive_otus_count += len(true_positive_otus[s])
#print "Total true positive otus = ", true_positive_otus_count
# create list file of false positive OTUs
fp_otus_ids_f = os.path.join(out_dir, datatype, method, "%s_%s_fp_ids.txt" % (tool, study))
with open(fp_otus_ids_f, 'w') as out_file:
for tax in false_positive_otus:
for otu in false_positive_otus[tax]:
out_file.write("%s\n" % otu)
# create FASTA file of false positive OTUs
if method == "closed_ref":
rep_set_fasta = os.path.join(
results_dir, datatype, method, "%s_%s" % (tool, study), "rep_set", "seqs_rep_set.fasta")
elif method == "de_novo":
if (tool == "uparse_q3" or tool == "uparse_q16"):
rep_set_fasta = os.path.join(
results_dir, datatype, method, "%s_%s" % (tool, study), "otus.fa")
else:
rep_set_fasta = os.path.join(
results_dir, datatype, method, "%s_%s" % (tool, study), "rep_set", "seqs_rep_set.fasta")
else:
rep_set_fasta = os.path.join(
results_dir, datatype, method, "%s_%s" % (tool, study), "rep_set.fna")
otus_fasta = os.path.join(
out_dir, datatype, method, "%s_%s_fp.fasta" % (tool, study))
filter_fasta_command = ["filter_fasta.py",
"-f", rep_set_fasta, "-o", otus_fasta,
"-s", fp_otus_ids_f]
#print "command = ", filter_fasta_command
proc = Popen(filter_fasta_command, stdout=PIPE, stderr=PIPE, close_fds=True)
proc.wait()
stdout, stderr = proc.communicate()
if stderr:
print stderr
# search for chimeras in all false positive OTUs using UCHIME
if datatype == "16S":
chimera_db = chimera_db_16S
elif datatype == "18S":
chimera_db = chimera_db_18S
else:
raise ValueError("%s not supported" % datatype)
chimeric_otus_fasta = os.path.join(
out_dir, datatype, method, "%s_%s_fp_chimeras.fasta" % (tool, study))
uchime_command = ["usearch70", "-uchime_ref", otus_fasta,
"-db", chimera_db,
"-strand", "plus", "-chimeras", chimeric_otus_fasta]
#print "command = ", uchime_command
proc = Popen(uchime_command, stdout=PIPE, stderr=PIPE, close_fds=True)
proc.wait()
stdout, stderr = proc.communicate()
#if stderr:
# print stderr
# get list of chimeric OTU ids
chimeric_ids = []
with open(chimeric_otus_fasta, "U") as identified_chimeras_fp:
for label, seq in parse_fasta(identified_chimeras_fp):
chimeric_ids.append(label)
#print "Total chimeric OTUs identified in false-positive set: %s" | |
import logging
import uuid
import re
logger = logging.getLogger(__name__)
DEFAULT_TRUSTZONE = "b61d6911-338d-46a8-9f39-8dcd24abfe91"
def format_source_objects(source_objects):
if not isinstance(source_objects, list):
source_objects = [source_objects]
return source_objects
def get_mappings_for_name_and_tags(mapping_definition):
mapping_tags = None
if "name" in mapping_definition:
mapping_name = mapping_definition["name"]
else:
logger.debug(f"Required mandatory field: 'name' in mapping definition: {mapping_definition}")
if "tags" in mapping_definition:
mapping_tags = mapping_definition["tags"]
return mapping_name, mapping_tags
def get_tags(source_model, source_object, mapping):
c_tags = []
if mapping is not None:
if isinstance(mapping, list):
for tag in mapping:
c_tags.append(source_model.search(tag, source=source_object))
else:
c_tags.append(source_model.search(mapping, source=source_object))
return c_tags
def set_optional_parameters_to_resource(resource, mapping_tags, resource_tags, singleton_multiple_name=None,
singleton_multiple_tags=None):
if mapping_tags is not None and resource_tags is not None and len(
list(filter(lambda tag: tag is not None and tag is not '', resource_tags))) > 0:
resource["tags"] = resource_tags
if singleton_multiple_name is not None:
resource["singleton_multiple_name"] = singleton_multiple_name
if mapping_tags is not None and singleton_multiple_tags is not None:
resource["singleton_multiple_tags"] = singleton_multiple_tags
return resource
def get_altsource_mapping_path_value(source_model, alt_source_object, mapping_path):
value = None
mapping_path_value = source_model.search(mapping_path, source=alt_source_object)
if isinstance(mapping_path_value, str):
value = mapping_path_value
elif isinstance(mapping_path_value, dict):
if "Fn::Join" in mapping_path_value:
value = []
separator = mapping_path_value["Fn::Join"][0]
for e in mapping_path_value["Fn::Join"][1]:
if isinstance(e, str):
value.append(e)
else:
pass
value = separator.join(value)
elif "Fn::Sub" in mapping_path_value:
value = mapping_path_value["Fn::Sub"]
return value
class TrustzoneMapper:
def __init__(self, mapping):
self.mapping = mapping
self.id_map = {}
def run(self, source_model):
trustzones = []
if "$source" in self.mapping:
source_objs = format_source_objects(source_model.search(self.mapping["$source"]))
else:
source_objs = [self.mapping]
for source_obj in source_objs:
trustzone = {"name": source_model.search(self.mapping["name"], source=source_obj),
"source": source_obj
}
if "properties" in self.mapping:
trustzone["properties"] = self.mapping["properties"]
source_id = source_model.search(self.mapping["id"], source=trustzone)
self.id_map[source_id] = source_id
trustzone["id"] = source_id
logger.debug(f"Added trustzone: [{trustzone['id']}][{trustzone['name']}]")
trustzones.append(trustzone)
return trustzones
class ComponentMapper:
def __init__(self, mapping):
self.mapping = mapping
self.source = None
self.id_map = {}
def run(self, source_model, id_parents):
"""
Iterates through the source model and returns the parameters to create the components
:param source_model:
:param id_parents:
:return:
"""
components = []
source_objects = format_source_objects(source_model.search(self.mapping["$source"], source=None))
mapping_name, mapping_tags = get_mappings_for_name_and_tags(self.mapping)
for source_object in source_objects:
component_type = source_model.search(self.mapping["type"], source=source_object)
component_name, singleton_multiple_name = self.__get_component_names(source_model, source_object,
mapping_name)
parents, parents_from_component = self.__get_parent_resources_ids(source_model, source_object, id_parents,
component_name)
component_tags, singleton_multiple_tags = self.__get_component_tags(source_model, source_object,
mapping_tags)
for parent_number, parent_element in enumerate(parents):
# If there is more than one parent (i.e. subnets), the component will replicated inside each
component = {"name": component_name, "type": component_type, "source": source_object,
"parent": self.__get_parent_id(parent_element, parents_from_component, component_name)}
if "properties" in self.mapping:
component["properties"] = self.mapping["properties"]
component = set_optional_parameters_to_resource(component, mapping_tags, component_tags,
singleton_multiple_name, singleton_multiple_tags)
component_id = self.__generate_id(source_model, component, component_name, parent_number)
component["id"] = component_id
# If the component is defining child components the ID must be saved in a parent dict
if "$children" in self.mapping["$source"]:
logger.debug("Component is defining child components...")
children = source_model.search(self.mapping["$source"]["$children"], source=source_object)
# TODO: Alternative options for $path when nothing is returned
if children not in id_parents:
id_parents[children] = list()
id_parents[children].append(component_id)
logger.debug(
f"Added component: [{component['id']}][{component['type']}] | Parent: [{component['parent']}]")
components.append(component)
logger.debug("")
# Here we should already have all the components
if "$altsource" in self.mapping and components == []:
logger.debug("No components found. Trying to find components from alternative source")
alt_source = self.mapping["$altsource"]
for alt in alt_source:
mapping_type = alt["$mappingType"]
mapping_path = alt["$mappingPath"]
mapping_lookups = alt["$mappingLookups"]
alt_source_objects = format_source_objects(source_model.search(mapping_type, source=None))
for alt_source_object in alt_source_objects:
value = get_altsource_mapping_path_value(source_model, alt_source_object, mapping_path)
for mapping_lookup in mapping_lookups:
result = re.match(mapping_lookup["regex"], value)
if result is not None:
if DEFAULT_TRUSTZONE not in self.id_map:
self.id_map[DEFAULT_TRUSTZONE] = str(uuid.uuid4())
mapping_name, mapping_tags = get_mappings_for_name_and_tags(mapping_lookup)
component_name, singleton_multiple_name = self.__get_component_names(source_model,
alt_source_object,
mapping_name)
component_tags, singleton_multiple_tags = self.__get_component_tags(source_model,
alt_source_object,
mapping_tags)
component = {"id": str(uuid.uuid4()), "name": component_name,
"type": mapping_lookup["type"], "parent": self.id_map[DEFAULT_TRUSTZONE]}
component = set_optional_parameters_to_resource(component, mapping_tags, component_tags,
singleton_multiple_name,
singleton_multiple_tags)
components.append(component)
return components
def __get_component_names(self, source_model, source_object, mapping):
singleton_multiple_name = None
if self.__multiple_sources_mapping_inside(mapping):
component_name, singleton_multiple_name = self.__get_component_singleton_names(source_model, source_object,
mapping)
else:
component_name = self.__get_component_individual_name(source_model, source_object, mapping)
return component_name, singleton_multiple_name
def __get_component_individual_name(self, source_model, source_object, mapping):
if "name" in self.mapping:
source_component_name = source_model.search(mapping, source=source_object)
logger.debug(f"+Found source object with name {source_component_name}")
else:
source_component_name = None
logger.error(f"+Found source object with name None")
return source_component_name
def __get_component_singleton_names(self, source_model, source_object, mapping):
if "name" in self.mapping:
source_component_name, source_component_multiple_name = source_model.search(mapping, source=source_object)
logger.debug(f"+Found singleton source object with multiple name {source_component_name}")
else:
source_component_name = None
logger.error(f"+Found singleton source object with name None")
return source_component_name, source_component_multiple_name
def __get_component_tags(self, source_model, source_object, mapping):
component_tags = None
singleton_multiple_tags = None
if mapping is not None:
if self.__multiple_sources_mapping_inside(mapping):
component_tags, singleton_multiple_tags = self.__get_component_singleton_tags(source_model, source_object,
mapping)
else:
component_tags = get_tags(source_model, source_object, mapping)
return component_tags, singleton_multiple_tags
def __get_component_singleton_tags(self, source_model, source_object, mapping):
c_tags = []
c_multiple_tags = []
if "tags" in self.mapping:
if isinstance(mapping, list):
for tag in mapping:
if self.__multiple_sources_mapping_inside(tag):
c_temp_tags, c_temp_multiple_tags = source_model.search(tag, source=source_object)
c_tags.append(c_temp_tags)
c_multiple_tags.append(c_temp_multiple_tags)
else:
c_temp_tags = source_model.search(tag, source=source_object)
c_tags.append(c_temp_tags)
c_multiple_tags.append(c_temp_tags)
else:
c_temp_tags, c_temp_multiple_tags = source_model.search(mapping, source=source_object)
c_tags.append(c_temp_tags)
c_multiple_tags.append(c_temp_multiple_tags)
return c_tags, c_multiple_tags
def __multiple_sources_mapping_inside(self, mapping_definition):
return "$singleton" in self.mapping["$source"] and \
len(list(filter(lambda obj: "$numberOfSources" in obj, mapping_definition))) > 0
def __get_parent_resources_ids(self, source_model, source_object, id_parents, component_name):
# Retrieves a list of parent resources (components or trustZones) of the element.
parents_from_component = False
if "parent" in self.mapping:
if "$parent" in self.mapping["parent"]:
# In this case the parent component is the one in charge of defining which components
# are their children, so it's ID should be stored before reaching the child components
# With $parent, it will check if the supposed id_parents exist,
# otherwise performing a standard search using action inside $parent
if len(id_parents) > 0:
parent = id_parents[component_name]
parents_from_component = True
else:
parent = source_model.search(self.mapping["parent"]["$parent"], source=source_object)
else:
# Just takes the parent component from the "parent" field in the mapping file
# TODO: What if the object can't find a parent component? Should it have a default parent in case the path didn't find anything?
parent = source_model.search(self.mapping["parent"], source=source_object)
else:
parent = ""
if isinstance(parent, list):
if len(parent) == 0:
parent = [DEFAULT_TRUSTZONE]
if isinstance(parent, str):
if parent == "":
parent = [DEFAULT_TRUSTZONE]
else:
parent = [parent]
return parent, parents_from_component
def __get_parent_id(self, parent_element, parents_from_component, component_name):
if parents_from_component:
# If the parent component was detected outside the component we need to look at the parent dict
parent_id = parent_element
self.id_map[parent_element] = parent_id
logger.debug(f"Component {component_name} gets parent ID from existing component")
else:
found = False
logger.debug("Trying to get parent ID from existing component...")
if parent_element in self.id_map:
parent_id = self.id_map[parent_element]
found = True
logger.debug(f"Parent ID detected: [{parent_id}][{parent_element}]")
if not found:
logger.debug("ID not found. Trying to get parent ID from parent substring...")
for key in self.id_map:
if key in parent_element:
parent_id = self.id_map[key]
found = True
logger.debug(f"Parent ID detected: [{parent_id}][{key}]")
break
if not found:
logger.debug("No ID found. Creating new parent ID...")
parent_id = str(uuid.uuid4())
self.id_map[parent_element] = parent_id
return parent_id
def __generate_id(self, source_model, component, component_name, parent_number):
if "id" in self.mapping:
source_id = source_model.search(self.mapping["id"], source=component)
else:
source_id = str(uuid.uuid4())
# make a previous lookup on the list of parent mappings
c_id = None
if source_id is not None and len(self.id_map) > 0:
if component_name in self.id_map.keys():
c_id = self.id_map[component_name]
# a new ID can be generated if there is more a parent and this is not the first one
if c_id is None or parent_number > 0:
c_id = str(uuid.uuid4())
self.id_map[source_id] = c_id
return c_id
class DataflowNodeMapper:
def __init__(self, mapping):
self.mapping = mapping
self.id_map = {}
def run(self, source_model, source):
source_objs = source_model.search(self.mapping, source=source)
if isinstance(source_objs, str):
source_objs = [source_objs]
return source_objs
class DataflowMapper:
def __init__(self, mapping):
self.mapping = mapping
self.id_map = {}
def run(self, source_model):
dataflows = []
source_objs = format_source_objects(source_model.search(self.mapping["$source"], source=None))
mapping_name, mapping_tags = get_mappings_for_name_and_tags(self.mapping)
for source_obj in source_objs:
df_name = source_model.search(mapping_name, source=source_obj)
source_mapper = DataflowNodeMapper(self.mapping["source"])
destination_mapper = DataflowNodeMapper(self.mapping["destination"])
source_nodes = source_mapper.run(source_model, source_obj)
if source_nodes is not None and len(source_nodes) > 0:
for source_node in source_nodes:
destination_nodes = destination_mapper.run(source_model, source_obj)
if destination_nodes is not None and len(destination_nodes) > 0:
for destination_node in destination_nodes:
# skip self referencing dataflows
if source_node == destination_node:
continue
dataflow = {"name": df_name}
if source_node in self.id_map:
source_node_id = self.id_map[source_node]
else:
# not generate component IDs that may have been generated on component mapping
continue
if destination_node in self.id_map:
destination_node_id = self.id_map[destination_node]
else:
# not generate components that may have been generated on components mapping
continue
dataflow["source_node"] = source_node_id
dataflow["destination_node"] = destination_node_id
dataflow["source"] = source_obj
if "properties" in self.mapping:
dataflow["properties"] = self.mapping["properties"]
if "bidirectional" | |
"""
Author: <NAME>, Phd Student @ Ishida Laboratory, Department of Computer Science, Tokyo Institute of Technology
Created on: February 21st, 2020
Description: This file contains necessary functions for the generation and splitting of the raw original dataset.
"""
import os
import random
import numpy as np
import pandas as pd
from collections import Counter
from tqdm import tqdm
from chemistry_methods.reactions import parse_reaction_roles
from chemistry_methods.fingerprints import construct_ecfp, construct_hsfp
from chemistry_methods.reaction_analysis import extract_info_from_reaction, extract_info_from_molecule
from chemistry_methods.reaction_cores import get_reaction_core_atoms, get_separated_cores
from chemistry_methods.molecules import get_atom_environment, get_bond_environment
from data_methods.helpers import get_n_most_frequent_rows, encode_one_hot
def generate_unique_compound_pools(args):
""" Generates and stores unique (RDKit Canonical SMILES) chemical compound pools of the reactants and products for a
chemical reaction dataset. The dataset needs to contain a column named 'rxn_smiles' in which the values for the
mapped reaction SMILES strings are stored. """
reactant_pool_smiles, product_pool_smiles, reactant_pool_mol, product_pool_mol = [], [], [], []
reactant_reaction_class, product_reaction_class = [], []
# Read the raw original chemical reaction dataset.
raw_dataset = pd.read_csv(args.dataset_config.raw_dataset)
# Iterate through the chemical reaction entries and generate unique canonical SMILES reactant and product pools.
# Reagents are skipped in this research.
for row_ind, row in tqdm(raw_dataset.iterrows(), total=len(raw_dataset.index), ascii=True,
desc="Generating unique reactant and product compound representations"):
# Extract and save the canonical SMILES from the reaction.
reactants, _, products = parse_reaction_roles(row["rxn_smiles"], as_what="canonical_smiles_no_maps")
[reactant_pool_smiles.append(reactant) for reactant in reactants]
[product_pool_smiles.append(product) for product in products]
# Extract and save the RDKit Mol objects from the reaction.
reactants, _, products = parse_reaction_roles(row["rxn_smiles"], as_what="mol_no_maps")
[reactant_pool_mol.append(reactant) for reactant in reactants]
[product_pool_mol.append(product) for product in products]
# Save the reaction class of the entry.
[reactant_reaction_class.append(row["class"]) for _ in reactants]
[product_reaction_class.append(row["class"]) for _ in products]
# Aggregate the saved reaction classes for the same reactant compounds.
for reactant_ind, reactant in tqdm(enumerate(reactant_pool_smiles), total=len(reactant_pool_smiles), ascii=True,
desc="Aggregating reaction class values for the reactant compounds"):
if type(reactant_reaction_class[reactant_ind]) == set:
continue
same_reactant_rows = [r_ind for r_ind, r in enumerate(reactant_pool_smiles) if r == reactant]
aggregated_class_values = [c for c_ind, c in enumerate(reactant_reaction_class) if c_ind in same_reactant_rows]
for same_row_ind in same_reactant_rows:
reactant_reaction_class[same_row_ind] = set(aggregated_class_values)
# Aggregate the saved reaction classes for the same product compounds.
for product_ind, product in tqdm(enumerate(product_pool_smiles), total=len(product_pool_smiles), ascii=True,
desc="Aggregating reaction class values for the product compounds"):
if type(product_reaction_class[product_ind]) == set:
continue
same_product_rows = [p_ind for p_ind, p in enumerate(product_pool_smiles) if p == product]
aggregated_class_values = [c for c_ind, c in enumerate(product_reaction_class) if c_ind in same_product_rows]
for same_row_ind in same_product_rows:
product_reaction_class[same_row_ind] = set(aggregated_class_values)
print("Filtering unique reactant and product compounds...", end="")
# Filter out duplicate reactant molecules from the reactant and product sets.
reactant_pool_smiles, reactants_uq_ind = np.unique(reactant_pool_smiles, return_index=True)
product_pool_smiles, products_uq_ind = np.unique(product_pool_smiles, return_index=True)
# Apply the unique indices to the list of RDKit Mol objects.
reactant_pool_mol = np.array(reactant_pool_mol)[reactants_uq_ind].tolist()
product_pool_mol = np.array(product_pool_mol)[products_uq_ind].tolist()
# Apply the unique indices to the list of reaction classes.
reactant_reaction_class = np.array(reactant_reaction_class)[reactants_uq_ind].tolist()
product_reaction_class = np.array(product_reaction_class)[products_uq_ind].tolist()
print("done.")
# Pre-generate the reactant molecular fingerprint descriptors for similarity searching purpouses.
ecfp_1024 = []
for uqr_ind, uq_reactant in tqdm(enumerate(reactant_pool_smiles), total=len(reactant_pool_smiles), ascii=True,
desc="Generating reactant compound fingerprints"):
ecfp_1024.append(construct_ecfp(uq_reactant, radius=args.descriptor_config.similarity_search["radius"],
bits=args.descriptor_config.similarity_search["bits"]))
print("Saving the processed reactant compound data...", end="", flush=True)
# Store all of the generated reactant fingerprints in a .pkl file.
pd.DataFrame({"mol_id": list(range(0, len(reactant_pool_smiles))), "canonical_smiles": reactant_pool_smiles,
"mol_object": reactant_pool_mol, "ecfp_1024": ecfp_1024, "reaction_class": reactant_reaction_class}).\
to_pickle(args.dataset_config.output_folder + "unique_reactants_pool.pkl")
print("done.")
# Pre-generate the product molecular fingerprint descriptors for similarity searching purpouses.
ecfp_1024 = []
for uqp_ind, uq_product in tqdm(enumerate(product_pool_smiles), total=len(product_pool_smiles), ascii=True,
desc="Generating product compound fingerprints"):
ecfp_1024.append(construct_ecfp(uq_product, radius=args.descriptor_config.similarity_search["radius"],
bits=args.descriptor_config.similarity_search["bits"]))
print("Saving the processed product compound data...", end="", flush=True)
# Store all of the generated product fingerprints in a .pkl file.
pd.DataFrame({"mol_id": list(range(0, len(product_pool_smiles))), "canonical_smiles": product_pool_smiles,
"mol_object": product_pool_mol, "ecfp_1024": ecfp_1024, "reaction_class": product_reaction_class}).\
to_pickle(args.dataset_config.output_folder + "unique_products_pool.pkl")
print("done.")
def extract_relevant_information(reaction_smiles, uq_reactant_mols_pool, uq_product_mols_pool, fp_params):
""" Extracts the necessary information from a single mapped reaction SMILES string. """
# Extract the canonical SMILES and RDKit Mol objects from the reaction SMILES string.
reactant_smiles, _, product_smiles = parse_reaction_roles(reaction_smiles, as_what="canonical_smiles_no_maps")
reactants, _, products = parse_reaction_roles(reaction_smiles, as_what="mol_no_maps")
# Sort the reactants and products in descending order by number of atoms so the largest reactants is always first.
reactants, reactant_smiles = zip(*sorted(zip(reactants, reactant_smiles), key=lambda k: len(k[0].GetAtoms()),
reverse=True))
products, product_smiles = zip(*sorted(zip(products, product_smiles), key=lambda k: len(k[0].GetAtoms()),
reverse=True))
r_uq_mol_maps, rr_smiles, rr_smols, rr_smals, rr_fps, rnr_smiles, rnr_smols, rnr_smals, rnr_fps = \
[], [], [], [], [], [], [], [], []
p_uq_mol_maps, pr_smiles, pr_smols, pr_smals, pr_fps, pnr_smiles, pnr_smols, pnr_smals, pnr_fps = \
[], [], [], [], [], [], [], [], []
# Extract the reactive and non-reactive parts of the reactant and product molecules.
reactant_frags, product_frags = extract_info_from_reaction(reaction_smiles)
# Iterate through all of the reactants and aggregate the specified data.
for r_ind, reactant in enumerate(reactants):
r_uq_mol_maps.append(uq_reactant_mols_pool.index(reactant_smiles[r_ind]))
rr_smiles.append(reactant_frags[r_ind][0][0])
rnr_smiles.append(reactant_frags[r_ind][1][0])
rr_smols.append(reactant_frags[r_ind][0][2])
rnr_smols.append(reactant_frags[r_ind][1][2])
rr_smals.append(reactant_frags[r_ind][0][3])
rnr_smals.append(reactant_frags[r_ind][1][3])
rr_fps.append(construct_ecfp(reactant_frags[r_ind][0][2], radius=fp_params["radius"], bits=fp_params["bits"]))
rnr_fps.append(construct_ecfp(reactant_frags[r_ind][1][2], radius=fp_params["radius"], bits=fp_params["bits"]))
# Iterate through all of the products and aggregate the specified data.
for p_ind, product in enumerate(products):
p_uq_mol_maps.append(uq_product_mols_pool.index(product_smiles[p_ind]))
pr_smiles.extend(product_frags[p_ind][0][0])
pnr_smiles.extend(product_frags[p_ind][1][0])
pr_smols.extend(product_frags[p_ind][0][2])
pnr_smols.extend(product_frags[p_ind][1][2])
pr_smals.extend(product_frags[p_ind][0][3])
pnr_smals.extend(product_frags[p_ind][1][3])
for pf in product_frags[p_ind][0][2]:
pr_fps.append(construct_ecfp(pf, radius=fp_params["radius"], bits=fp_params["bits"]))
for pf in product_frags[p_ind][1][2]:
pnr_fps.append(construct_ecfp(pf, radius=fp_params["radius"], bits=fp_params["bits"]))
# Return the extracted information.
return r_uq_mol_maps, rr_smiles, rr_smols, rr_smals, rr_fps, rnr_smiles, rnr_smols, rnr_smals, rnr_fps,\
p_uq_mol_maps, pr_smiles, pr_smols, pr_smals, pr_fps, pnr_smiles, pnr_smols, pnr_smals, pnr_fps
def expand_reaction_dataset(args):
""" Standardizes and expands the original dataset with additional, useful information. The raw dataset needs to
contain columns named 'id', 'rxn_smiles' and 'class' in which the values for the reaction identification, mapped
reaction SMILES and reaction class are stored, respectively."""
# Read the raw chemical reaction dataset and rename the fetched columns.
raw_dataset = pd.read_csv(args.dataset_config.raw_dataset)[["id", "rxn_smiles", "class"]]
raw_dataset.columns = ["patent_id", "reaction_smiles", "reaction_class"]
# Create new columns to store the id's of the unique reactant and product molecules.
raw_dataset["reactants_uq_mol_maps"], raw_dataset["products_uq_mol_maps"] = None, None
# Create new columns to store the SMILES strings of the reactive parts of reactant and product molecules.
raw_dataset["reactants_reactive_smiles"], raw_dataset["products_reactive_smiles"] = None, None
# Create new columns to store the SMILES Mol objects of the reactive parts of reactant and product molecules.
raw_dataset["reactants_reactive_smols"], raw_dataset["products_reactive_smols"] = None, None
# Create new columns to store the SMARTS Mol objects of the reactive parts of reactant and product molecules.
raw_dataset["reactants_reactive_smals"], raw_dataset["products_reactive_smals"] = None, None
# Create new columns to store the fingerprints of the reactive parts of reactant and product molecules.
raw_dataset["reactants_reactive_fps"], raw_dataset["products_reactive_fps"] = None, None
# Create new columns to store the SMILES strings of the non-reactive parts of reactant and product molecules.
raw_dataset["reactants_non_reactive_smiles"], raw_dataset["products_non_reactive_smiles"] = None, None
# Create new columns to store the SMILES Mol objects of the non-reactive parts of reactant and product molecules.
raw_dataset["reactants_non_reactive_smols"], raw_dataset["products_non_reactive_smols"] = None, None
# Create new columns to store the SMARTS Mol objects of the non-reactive parts of reactant and product molecules.
raw_dataset["reactants_non_reactive_smals"], raw_dataset["products_non_reactive_smals"] = None, None
# Create new columns to store the fingerprints of the non-reactive parts of reactant and product molecules.
raw_dataset["reactants_non_reactive_fps"], raw_dataset["products_non_reactive_fps"] = None, None
# Read the previously generated unique molecule pools.
reactant_pool = pd.read_pickle(args.dataset_config.output_folder +
"unique_reactants_pool.pkl")["canonical_smiles"].values.tolist()
product_pool = pd.read_pickle(args.dataset_config.output_folder +
"unique_products_pool.pkl")["canonical_smiles"].values.tolist()
# Iterate through all of the reactions and generate their unique molecule mapping for easier reactant retrieval in
# the later stages of the approach.
for row_ind, row in tqdm(raw_dataset.iterrows(), total=len(raw_dataset.index), ascii=True,
desc="Generating unique reactant and product compound representations"):
# Extract the needed values from the reaction SMILES string.
ruqmm, rrsm, rrso, rrsa, rrsf, rnsm, rnso, rnsa, rnsf, puqmm, prsm, prso, prsa, prsf, pnsm, pnso, pnsa, pnsf = \
extract_relevant_information(row["reaction_smiles"], reactant_pool, product_pool,
args.descriptor_config.similarity_search)
# Assign the extracted values to the data frame.
raw_dataset.at[row_ind, "reactants_uq_mol_maps"] = ruqmm
raw_dataset.at[row_ind, "reactants_reactive_smiles"] = rrsm
raw_dataset.at[row_ind, "reactants_reactive_smols"] = rrso
raw_dataset.at[row_ind, "reactants_reactive_smals"] = rrsa
raw_dataset.at[row_ind, "reactants_reactive_fps"] = rrsf
raw_dataset.at[row_ind, "reactants_non_reactive_smiles"] = rnsm
raw_dataset.at[row_ind, "reactants_non_reactive_smols"] = rnso
raw_dataset.at[row_ind, "reactants_non_reactive_smals"] = rnsa
raw_dataset.at[row_ind, "reactants_non_reactive_fps"] = rnsf
raw_dataset.at[row_ind, "products_uq_mol_maps"] = puqmm
raw_dataset.at[row_ind, "products_reactive_smiles"] = prsm
raw_dataset.at[row_ind, "products_reactive_smols"] = prso
raw_dataset.at[row_ind, "products_reactive_smals"] = prsa
raw_dataset.at[row_ind, "products_reactive_fps"] = prsf
raw_dataset.at[row_ind, "products_non_reactive_smiles"] = pnsm
raw_dataset.at[row_ind, "products_non_reactive_smols"] = pnso
raw_dataset.at[row_ind, "products_non_reactive_smals"] = pnsa
raw_dataset.at[row_ind, "products_non_reactive_fps"] = pnsf
print("Saving the generated compound data...", end="", flush=True)
# Save the final reaction dataset as in .pkl or .csv format.
raw_dataset.to_pickle(args.dataset_config.output_folder + "final_training_dataset.pkl")
print("done.")
def generate_dataset_splits(args):
""" Generates training and test splits for the n-fold cross validation process in the ratio 80:20. """
# Read the processed chemical | |
<filename>src/decoding.py<gh_stars>0
from collections import namedtuple
import torch
import util
from dataloader import BOS_IDX, EOS_IDX, STEP_IDX
from model import Categorical, HardMonoTransducer, HMMTransducer, dummy_mask
from transformer import Transformer
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Decode(util.NamedEnum):
greedy = "greedy"
sample = "sample"
beam = "beam"
class Decoder(object):
def __init__(
self,
decoder_type,
max_len=100,
beam_size=5,
trg_bos=BOS_IDX,
trg_eos=EOS_IDX,
skip_attn=True,
):
self.type = decoder_type
self.max_len = max_len
self.beam_size = beam_size
self.trg_bos = trg_bos
self.trg_eos = trg_eos
self.skip_attn = skip_attn
self.cache = {}
def reset(self):
self.cache = {}
def src2str(self, src_sentence):
def tensor2str(tensor):
return str(tensor.view(-1).cpu().numpy())
if isinstance(src_sentence, tuple) and all(
[isinstance(x, torch.Tensor) for x in src_sentence]
):
return str([tensor2str(x) for x in src_sentence])
elif isinstance(src_sentence, torch.Tensor):
return tensor2str(src_sentence)
else:
raise ValueError(src_sentence)
def __call__(self, transducer, src_sentence):
key = self.src2str(src_sentence)
if key in self.cache:
return self.cache[key]
if self.type == Decode.greedy:
output, attns = decode_greedy(
transducer,
src_sentence,
max_len=self.max_len,
trg_bos=self.trg_bos,
trg_eos=self.trg_eos,
)
elif self.type == Decode.beam:
output, attns = decode_beam_search(
transducer,
src_sentence,
max_len=self.max_len,
nb_beam=self.beam_size,
trg_bos=self.trg_bos,
trg_eos=self.trg_eos,
)
elif self.type == Decode.sample:
output, attns = decode_sample(
transducer,
src_sentence,
max_len=self.max_len,
trg_bos=self.trg_bos,
trg_eos=self.trg_eos,
)
else:
raise ValueError
return_values = (output, None if self.skip_attn else attns)
# don't cache sampling results
if self.type == Decode.sample:
return return_values
else:
self.cache[key] = return_values
return self.cache[key]
def get_decode_fn(decode, max_len=100, beam_size=5):
return Decoder(decode, max_len=max_len, beam_size=beam_size)
def decode_sample(
transducer, src_sentence, max_len=100, trg_bos=BOS_IDX, trg_eos=EOS_IDX
):
"""
src_sentence: [seq_len]
"""
assert not isinstance(transducer, HardMonoTransducer)
if isinstance(transducer, HMMTransducer):
return decode_sample_hmm(
transducer, src_sentence, max_len=max_len, trg_bos=BOS_IDX, trg_eos=EOS_IDX
)
transducer.eval()
src_mask = dummy_mask(src_sentence)
enc_hs = transducer.encode(src_sentence)
output, attns = [], []
hidden = transducer.dec_rnn.get_init_hx(1)
input_ = torch.tensor([trg_bos], device=DEVICE)
input_ = transducer.dropout(transducer.trg_embed(input_))
for _ in range(max_len):
word_logprob, hidden, attn = transducer.decode_step(
enc_hs, src_mask, input_, hidden
)
word = Categorical(word_logprob.exp()).sample_n(1)[0]
attns.append(attn)
if word == trg_eos:
break
input_ = transducer.dropout(transducer.trg_embed(word))
output.append(word.item())
return output, attns
def decode_sample_hmm(
transducer, src_sentence, max_len=100, trg_bos=BOS_IDX, trg_eos=EOS_IDX
):
transducer.eval()
src_mask = dummy_mask(src_sentence)
enc_hs = transducer.encode(src_sentence)
T = src_mask.shape[0]
output, attns = [], []
hidden = transducer.dec_rnn.get_init_hx(1)
input_ = torch.tensor([trg_bos], device=DEVICE)
input_ = transducer.dropout(transducer.trg_embed(input_))
for idx in range(max_len):
trans, emiss, hidden = transducer.decode_step(enc_hs, src_mask, input_, hidden)
if idx == 0:
initial = trans[:, 0].unsqueeze(1)
attns.append(initial)
forward = initial
else:
attns.append(trans)
# forward = torch.bmm(forward, trans)
forward = forward + trans.transpose(1, 2)
forward = forward.logsumexp(dim=-1, keepdim=True).transpose(1, 2)
# wordprob = torch.bmm(forward, emiss)
log_wordprob = forward + emiss.transpose(1, 2)
log_wordprob = log_wordprob.logsumexp(dim=-1)
# word = torch.max(log_wordprob, dim=-1)[1]
word = Categorical(log_wordprob.exp()).sample_n(1)[0]
if word == trg_eos:
break
input_ = transducer.dropout(transducer.trg_embed(word))
output.append(word.item())
word_idx = word.view(-1, 1).expand(1, T).unsqueeze(-1)
word_emiss = torch.gather(emiss, -1, word_idx).view(1, 1, T)
forward = forward + word_emiss
return output, attns
def decode_greedy(
transducer, src_sentence, max_len=100, trg_bos=BOS_IDX, trg_eos=EOS_IDX
):
"""
src_sentence: [seq_len]
"""
if isinstance(transducer, HardMonoTransducer):
return decode_greedy_mono(
transducer, src_sentence, max_len=max_len, trg_bos=BOS_IDX, trg_eos=EOS_IDX
)
if isinstance(transducer, HMMTransducer):
return decode_greedy_hmm(
transducer, src_sentence, max_len=max_len, trg_bos=BOS_IDX, trg_eos=EOS_IDX
)
if isinstance(transducer, Transformer):
return decode_greedy_transformer(
transducer, src_sentence, max_len=max_len, trg_bos=BOS_IDX, trg_eos=EOS_IDX
)
transducer.eval()
src_mask = dummy_mask(src_sentence)
enc_hs = transducer.encode(src_sentence)
output, attns = [], []
hidden = transducer.dec_rnn.get_init_hx(1)
input_ = torch.tensor([trg_bos], device=DEVICE)
input_ = transducer.dropout(transducer.trg_embed(input_))
for _ in range(max_len):
word_logprob, hidden, attn = transducer.decode_step(
enc_hs, src_mask, input_, hidden
)
word = torch.max(word_logprob, dim=1)[1]
attns.append(attn)
if word == trg_eos:
break
input_ = transducer.dropout(transducer.trg_embed(word))
output.append(word.item())
return output, attns
def decode_greedy_mono(
transducer, src_sentence, max_len=100, trg_bos=BOS_IDX, trg_eos=EOS_IDX
):
"""
src_sentence: [seq_len]
"""
assert isinstance(transducer, HardMonoTransducer)
attn_pos = 0
transducer.eval()
if isinstance(src_sentence, tuple):
seq_len = src_sentence[0].shape[0]
else:
seq_len = src_sentence.shape[0]
src_mask = dummy_mask(src_sentence)
enc_hs = transducer.encode(src_sentence)
output, attns = [], []
hidden = transducer.dec_rnn.get_init_hx(1)
input_ = torch.tensor([trg_bos], device=DEVICE)
input_ = transducer.dropout(transducer.trg_embed(input_))
for _ in range(max_len):
word_logprob, hidden, attn = transducer.decode_step(
enc_hs, src_mask, input_, hidden, attn_pos
)
word = torch.max(word_logprob, dim=1)[1]
attns.append(attn)
if word == STEP_IDX:
attn_pos += 1
if attn_pos == seq_len:
attn_pos = seq_len - 1
if word == trg_eos:
break
input_ = transducer.dropout(transducer.trg_embed(word))
output.append(word.item())
return output, attns
def decode_greedy_hmm(
transducer, src_sentence, max_len=100, trg_bos=BOS_IDX, trg_eos=EOS_IDX
):
transducer.eval()
src_mask = dummy_mask(src_sentence)
enc_hs = transducer.encode(src_sentence)
T = src_mask.shape[0]
output, attns = [], []
hidden = transducer.dec_rnn.get_init_hx(1)
input_ = torch.tensor([trg_bos], device=DEVICE)
input_ = transducer.dropout(transducer.trg_embed(input_))
for idx in range(max_len):
trans, emiss, hidden = transducer.decode_step(enc_hs, src_mask, input_, hidden)
if idx == 0:
initial = trans[:, 0].unsqueeze(1)
attns.append(initial)
forward = initial
else:
attns.append(trans)
# forward = torch.bmm(forward, trans)
forward = forward + trans.transpose(1, 2)
forward = forward.logsumexp(dim=-1, keepdim=True).transpose(1, 2)
# wordprob = torch.bmm(forward, emiss)
log_wordprob = forward + emiss.transpose(1, 2)
log_wordprob = log_wordprob.logsumexp(dim=-1)
word = torch.max(log_wordprob, dim=-1)[1]
if word == trg_eos:
break
input_ = transducer.dropout(transducer.trg_embed(word))
output.append(word.item())
word_idx = word.view(-1, 1).expand(1, T).unsqueeze(-1)
word_emiss = torch.gather(emiss, -1, word_idx).view(1, 1, T)
forward = forward + word_emiss
return output, attns
def decode_greedy_transformer(
transducer, src_sentence, max_len=100, trg_bos=BOS_IDX, trg_eos=EOS_IDX
):
"""
src_sentence: [seq_len]
"""
assert isinstance(transducer, Transformer)
transducer.eval()
src_mask = dummy_mask(src_sentence)
src_mask = (src_mask == 0).transpose(0, 1)
enc_hs = transducer.encode(src_sentence, src_mask)
output, attns = [trg_bos], []
for _ in range(max_len):
output_tensor = torch.tensor(output, device=DEVICE).view(len(output), 1)
trg_mask = dummy_mask(output_tensor)
trg_mask = (trg_mask == 0).transpose(0, 1)
word_logprob = transducer.decode(enc_hs, src_mask, output_tensor, trg_mask)
word_logprob = word_logprob[-1]
word = torch.max(word_logprob, dim=1)[1]
if word == trg_eos:
break
output.append(word.item())
return output[1:], attns
Beam = namedtuple("Beam", "seq_len log_prob hidden input partial_sent attn")
def decode_beam_search(
transducer,
src_sentence,
max_len=50,
nb_beam=5,
norm=True,
trg_bos=BOS_IDX,
trg_eos=EOS_IDX,
):
"""
src_sentence: [seq_len]
"""
if isinstance(transducer, HardMonoTransducer):
return decode_beam_mono(
transducer,
src_sentence,
max_len=max_len,
nb_beam=nb_beam,
norm=norm,
trg_bos=BOS_IDX,
trg_eos=EOS_IDX,
)
if isinstance(transducer, HMMTransducer):
return decode_beam_hmm(
transducer,
src_sentence,
max_len=max_len,
nb_beam=nb_beam,
norm=norm,
trg_bos=BOS_IDX,
trg_eos=EOS_IDX,
)
if isinstance(transducer, Transformer):
return decode_beam_transformer(
transducer,
src_sentence,
max_len=max_len,
nb_beam=nb_beam,
norm=norm,
trg_bos=BOS_IDX,
trg_eos=EOS_IDX,
)
def score(beam):
"""
compute score based on logprob
"""
assert isinstance(beam, Beam)
if norm:
return -beam.log_prob / beam.seq_len
return -beam.log_prob
transducer.eval()
src_mask = dummy_mask(src_sentence)
enc_hs = transducer.encode(src_sentence)
hidden = transducer.dec_rnn.get_init_hx(1)
input_ = torch.tensor([trg_bos], device=DEVICE)
input_ = transducer.dropout(transducer.trg_embed(input_))
start = Beam(1, 0, hidden, input_, "", [])
beams = [start]
finish_beams = []
for _ in range(max_len):
next_beams = []
for beam in sorted(beams, key=score)[:nb_beam]:
word_logprob, hidden, attn = transducer.decode_step(
enc_hs, src_mask, beam.input, beam.hidden
)
topk_log_prob, topk_word = word_logprob.topk(nb_beam)
topk_log_prob = topk_log_prob.view(nb_beam, 1)
topk_word = topk_word.view(nb_beam, 1)
for log_prob, word in zip(topk_log_prob, topk_word):
if word == trg_eos:
new_beam = Beam(
beam.seq_len + 1,
beam.log_prob + log_prob.item(),
None,
None,
beam.partial_sent,
beam.attn + [attn],
)
finish_beams.append(new_beam)
# if len(finish_beams) == 10*K:
# max_output = sorted(finish_beams, key=score)[0]
# return list(map(int, max_output.partial_sent.split())), max_output.attn
else:
new_beam = Beam(
beam.seq_len + 1,
beam.log_prob + log_prob.item(),
hidden,
transducer.dropout(transducer.trg_embed(word)),
" ".join([beam.partial_sent, str(word.item())]),
beam.attn + [attn],
)
next_beams.append(new_beam)
beams = next_beams
finish_beams = finish_beams if finish_beams else next_beams
max_output = sorted(finish_beams, key=score)[0]
return list(map(int, max_output.partial_sent.split())), max_output.attn
def decode_beam_transformer(
transducer,
src_sentence,
max_len=50,
nb_beam=5,
norm=True,
trg_bos=BOS_IDX,
trg_eos=EOS_IDX,
):
"""
src_sentence: [seq_len]
"""
assert isinstance(transducer, Transformer)
def score(beam):
"""
compute score based on logprob
"""
assert isinstance(beam, Beam)
if norm:
return -beam.log_prob / beam.seq_len
return -beam.log_prob
transducer.eval()
src_mask = dummy_mask(src_sentence)
src_mask = (src_mask == 0).transpose(0, 1)
enc_hs = transducer.encode(src_sentence, src_mask)
input_ = torch.tensor([trg_bos], device=DEVICE).view(1, 1)
start = Beam(1, 0, None, input_, "", None)
beams = [start]
finish_beams = []
for _ in range(max_len):
next_beams = []
for beam in sorted(beams, key=score)[:nb_beam]:
trg_mask = dummy_mask(beam.input)
trg_mask = (trg_mask == 0).transpose(0, 1)
word_logprob = transducer.decode(enc_hs, src_mask, beam.input, trg_mask)
word_logprob = word_logprob[-1]
topk_log_prob, topk_word = word_logprob.topk(nb_beam)
topk_log_prob = topk_log_prob.view(nb_beam, 1)
topk_word = topk_word.view(nb_beam, 1)
for log_prob, word in zip(topk_log_prob, topk_word):
if word == trg_eos:
new_beam = Beam(
beam.seq_len + 1,
beam.log_prob + log_prob.item(),
None,
None,
beam.partial_sent,
None,
)
finish_beams.append(new_beam)
else:
new_beam = Beam(
beam.seq_len + 1,
beam.log_prob + log_prob.item(),
None,
torch.cat((beam.input, word.view(1, 1))),
" ".join([beam.partial_sent, str(word.item())]),
None,
)
next_beams.append(new_beam)
beams = next_beams
finish_beams = finish_beams if finish_beams else next_beams
max_output = sorted(finish_beams, key=score)[0]
return list(map(int, max_output.partial_sent.split())), []
BeamHard = namedtuple(
"BeamHard", "seq_len log_prob hidden input partial_sent attn attn_pos"
)
def decode_beam_mono(
transducer,
src_sentence,
max_len=50,
nb_beam=5,
norm=True,
trg_bos=BOS_IDX,
trg_eos=EOS_IDX,
):
assert isinstance(transducer, HardMonoTransducer)
def score(beam):
"""
compute score based on logprob
"""
assert isinstance(beam, BeamHard)
if norm:
return -beam.log_prob / beam.seq_len
return -beam.log_prob
transducer.eval()
if isinstance(src_sentence, tuple):
seq_len = src_sentence[0].shape[0]
else:
seq_len = src_sentence.shape[0]
src_mask = dummy_mask(src_sentence)
enc_hs = transducer.encode(src_sentence)
hidden = transducer.dec_rnn.get_init_hx(1)
input_ = torch.tensor([trg_bos], device=DEVICE)
input_ = transducer.dropout(transducer.trg_embed(input_))
start = BeamHard(1, 0, hidden, input_, "", [], 0)
beams = [start]
finish_beams = []
for _ in range(max_len):
next_beams = []
for beam in sorted(beams, key=score)[:nb_beam]:
word_logprob, hidden, attn = transducer.decode_step(
enc_hs, | |
"Charleston",
"ru_RU": "Чарльстон"
},
"CHARLOTTETOWN": {
"de_DE": "Charlottetown",
"es_ES": "Charlottetown",
"fr_FR": "Charlottetown",
"it_IT": "Charlottetown",
"ja_JP": "シャーロットタウン",
"ko_KR": "샬럿타운",
"pl_PL": "Charlottetown",
"pt_BR": "Charlottetown",
"ru_RU": "Шарлоттаун"
},
"CHARTES": {
"de_DE": "Chartres",
"es_ES": "Chartres",
"fr_FR": "Chartres",
"it_IT": "Chartres",
"ja_JP": "シャルトル",
"ko_KR": "샤르트르",
"pl_PL": "Chartres",
"pt_BR": "Chartres",
"ru_RU": "Шартр"
},
"CHARTRES": {
"de_DE": "Chartres",
"es_ES": "Chartres",
"fr_FR": "Chartres",
"it_IT": "Chartres",
"ja_JP": "シャルトル",
"ko_KR": "샤르트르",
"pl_PL": "Chartres",
"pt_BR": "Chartres",
"ru_RU": "Шартр"
},
"CHATHAM": {
"de_DE": "Chatham",
"es_ES": "Chatham",
"fr_FR": "Chatham",
"it_IT": "Chatham",
"ja_JP": "チャタム",
"ko_KR": "채텀",
"pl_PL": "Chatham",
"pt_BR": "Chatham",
"ru_RU": "Чатем"
},
"CHATURMUKHA": {
"de_DE": "Chaturmukha",
"es_ES": "Chaturmukha",
"fr_FR": "Chaturmukha",
"it_IT": "Chaturmukha",
"ja_JP": "チャトルムカ",
"ko_KR": "차투르무카",
"pl_PL": "Czaturmukha",
"pt_BR": "Chaturmukha",
"ru_RU": "Чатурмукха"
},
"CHAU_DOC": {
"de_DE": "Châu Đốc",
"es_ES": "Châu Đốc",
"fr_FR": "Châu Đốc",
"it_IT": "Châu Đốc",
"ja_JP": "チャウドック",
"ko_KR": "짜우적",
"pl_PL": "Châu Đốc",
"pt_BR": "Chau Doc",
"ru_RU": "Тяудок"
},
"CHAVES": {
"de_DE": "Chaves",
"es_ES": "Chaves",
"fr_FR": "Chaves",
"it_IT": "Chaves",
"ja_JP": "シャヴェス",
"ko_KR": "차베스",
"pl_PL": "Chaves",
"pt_BR": "Chaves",
"ru_RU": "Шавиш"
},
"CHEMAWAWIN": {
"de_DE": "Chemawawin",
"es_ES": "Chemawawin",
"fr_FR": "Chemawawin",
"it_IT": "Chemawawin",
"ja_JP": "チェマワウィン",
"ko_KR": "체마와윈",
"pl_PL": "Czemawawin",
"pt_BR": "Chemawawin",
"ru_RU": "Чемававин"
},
"CHEN": {
"de_DE": "Chen",
"es_ES": "Chen",
"fr_FR": "Chen",
"it_IT": "Chen",
"ja_JP": "陳",
"ko_KR": "첸",
"pl_PL": "Chen",
"pt_BR": "Chen",
"ru_RU": "Чэнь"
},
"CHENCHA": {
"de_DE": "Chencha",
"es_ES": "Chencha",
"fr_FR": "Chencha",
"it_IT": "Chencha",
"ja_JP": "チェンチャ",
"ko_KR": "첸차",
"pl_PL": "Chencha",
"pt_BR": "Chencha",
"ru_RU": "Ченча"
},
"CHENGDU": {
"de_DE": "Chengdu",
"es_ES": "Chengdu",
"fr_FR": "Chengdu",
"it_IT": "Chengdu",
"ja_JP": "成都",
"ko_KR": "청두",
"pl_PL": "Chengdu",
"pt_BR": "Chengdu",
"ru_RU": "Чэнду"
},
"CHENNAI": {
"de_DE": "Chennai",
"es_ES": "Chennai",
"fr_FR": "Chennai",
"it_IT": "Chennai",
"ja_JP": "チェンナイ",
"ko_KR": "첸나이",
"pl_PL": "Chennai",
"pt_BR": "Chennai",
"ru_RU": "Ченнаи"
},
"CHEONGJU": {
"de_DE": "Cheongju",
"es_ES": "Cheongju",
"fr_FR": "Cheongju",
"it_IT": "Cheongju",
"ja_JP": "清州",
"ko_KR": "청주",
"pl_PL": "Czeongju",
"pt_BR": "Cheongju",
"ru_RU": "Чхонджу"
},
"CHERSON": {
"de_DE": "Cherson",
"es_ES": "Cherson",
"fr_FR": "Kherson",
"it_IT": "Cherson",
"ja_JP": "ケルソン",
"ko_KR": "체르손",
"pl_PL": "Cherson",
"pt_BR": "Cherson",
"ru_RU": "Херсонес"
},
"CHERTOMLYK": {
"de_DE": "Chertomlyk",
"es_ES": "Chertomlyk",
"fr_FR": "Tchortomlyk",
"it_IT": "Chertomlyk",
"ja_JP": "チェルトムリク",
"ko_KR": "체르토믈리크",
"pl_PL": "Czertomłyk",
"pt_BR": "Chertomlyk",
"ru_RU": "Чертомлык"
},
"CHICAGO": {
"de_DE": "Chicago",
"es_ES": "Chicago",
"fr_FR": "Chicago",
"it_IT": "Chicago",
"ja_JP": "シカゴ",
"ko_KR": "시카고",
"pl_PL": "Chicago",
"pt_BR": "Chicago",
"ru_RU": "Чикаго"
},
"CHICHEN_ITZA": {
"de_DE": "Chichén Itzá",
"es_ES": "Chichen Itzá",
"fr_FR": "Chichén Itzá",
"it_IT": "Chichén Itzá",
"ja_JP": "チチェン・イツァ",
"ko_KR": "치첸 이트사",
"pl_PL": "Chichen Itza",
"pt_BR": "Chichén Itzá",
"ru_RU": "Чичен-Ица"
},
"CHINGUETTI": {
"de_DE": "Chinguetti",
"es_ES": "Chinguetti",
"fr_FR": "Chinguetti",
"it_IT": "Chinguetti",
"ja_JP": "シンゲッティ",
"ko_KR": "싱게티",
"pl_PL": "Szinkit",
"pt_BR": "Chinguetti",
"ru_RU": "Шингетти"
},
"CHINKULTIC": {
"de_DE": "Chinkultic",
"es_ES": "Chinkultic",
"fr_FR": "Chinkultic",
"it_IT": "Chinkultic",
"ja_JP": "チンクルティック",
"ko_KR": "칭쿨틱",
"pl_PL": "Chinkultic",
"pt_BR": "Chinkultic",
"ru_RU": "Чинкультик"
},
"CHIOS": {
"de_DE": "Chios",
"es_ES": "Quíos",
"fr_FR": "Chios",
"it_IT": "Chio",
"ja_JP": "ヒオス",
"ko_KR": "키오스",
"pl_PL": "Chios",
"pt_BR": "Chios",
"ru_RU": "Хиос"
},
"CHIQUITOY": {
"de_DE": "Chiquitoy",
"es_ES": "Chiquitoy",
"fr_FR": "Chiquitoy",
"it_IT": "Chiquitoy",
"ja_JP": "チキトイ",
"ko_KR": "치키토이",
"pl_PL": "Chiquitoy",
"pt_BR": "Chiquitoy",
"ru_RU": "Чикитой"
},
"CHOIR": {
"de_DE": "Tschoir",
"es_ES": "Choir",
"fr_FR": "Choyr",
"it_IT": "Choir",
"ja_JP": "クワイア",
"ko_KR": "처이르",
"pl_PL": "Czojr",
"pt_BR": "Choir",
"ru_RU": "Хор"
},
"CHOL_CHOL_MAPU": {
"de_DE": "Chol Chol Mapu",
"es_ES": "Chol Chol Mapu",
"fr_FR": "Chol Chol Mapu",
"it_IT": "Chol Chol Mapu",
"ja_JP": "チョル・チョル・マプ",
"ko_KR": "철철 마푸",
"pl_PL": "Czol Czol Mapu",
"pt_BR": "Chol Chol Mapu",
"ru_RU": "Чол-Чол-Мапу"
},
"CHUCUITO": {
"de_DE": "Chucuito",
"es_ES": "Chucuito",
"fr_FR": "Chucuito",
"it_IT": "Chucuito",
"ja_JP": "チュクイト",
"ko_KR": "추쿠이토",
"pl_PL": "Chucuito",
"pt_BR": "Chucuito",
"ru_RU": "Чукуито"
},
"CHUNCHEON": {
"de_DE": "Chuncheon",
"es_ES": "Chuncheon",
"fr_FR": "Chuncheon",
"it_IT": "Chuncheon",
"ja_JP": "春川",
"ko_KR": "춘천",
"pl_PL": "Chuncheon",
"pt_BR": "Chuncheon",
"ru_RU": "Чхунчхон"
},
"CHUQUIABO": {
"de_DE": "Chuquiabo",
"es_ES": "Chuquiabo",
"fr_FR": "Chuquiabo",
"it_IT": "Chuquiabo",
"ja_JP": "チュキアボ",
"ko_KR": "추키아보",
"pl_PL": "Chuquiabo",
"pt_BR": "Chuquiabo",
"ru_RU": "Чукиабо"
},
"CIDADE_VELHA": {
"de_DE": "Cidade Velha",
"es_ES": "Cidade Velha",
"fr_FR": "Cidade Velha",
"it_IT": "Cidade Velha",
"ja_JP": "シダーデ・ヴェーリャ",
"ko_KR": "시다데 벨라",
"pl_PL": "Cidade Velha",
"pt_BR": "Cidade Velha",
"ru_RU": "Сидади-Велья"
},
"CINCINNATI": {
"de_DE": "Cincinnati",
"es_ES": "Cincinnati",
"fr_FR": "Cincinnati",
"it_IT": "Cincinnati",
"ja_JP": "シンシナティ",
"ko_KR": "신시내티",
"pl_PL": "Cincinnati",
"pt_BR": "Cincinnati",
"ru_RU": "Цинциннати"
},
"CLACKMANNAN": {
"de_DE": "Clackmannan",
"es_ES": "Clackmannan",
"fr_FR": "Clackmannan",
"it_IT": "Clackmannan",
"ja_JP": "クラックマナン",
"ko_KR": "클라크매넌",
"pl_PL": "Clackmannan",
"pt_BR": "Clackmannan",
"ru_RU": "Клэкманнан"
},
"CLEVELAND": {
"de_DE": "Cleveland",
"es_ES": "Cleveland",
"fr_FR": "Cleveland",
"it_IT": "Cleveland",
"ja_JP": "クリーブランド",
"ko_KR": "클리블랜드",
"pl_PL": "Cleveland",
"pt_BR": "Cleveland",
"ru_RU": "Кливленд"
},
"COBA": {
"de_DE": "Cobá",
"es_ES": "Coba",
"fr_FR": "Cobá",
"it_IT": "Coba",
"ja_JP": "コバー",
"ko_KR": "코바",
"pl_PL": "Coba",
"pt_BR": "Coba",
"ru_RU": "Коба"
},
"COFFS_HARBOUR": {
"de_DE": "<NAME>",
"es_ES": "<NAME>",
"fr_FR": "<NAME>",
"it_IT": "<NAME>",
"ja_JP": "コフスハーバー",
"ko_KR": "코프스하버",
"pl_PL": "<NAME>",
"pt_BR": "<NAME>",
"ru_RU": "Кофс-Харбор"
},
"COIMBRA": {
"de_DE": "Coimbra",
"es_ES": "Coímbra",
"fr_FR": "Coïmbre",
"it_IT": "Coimbra",
"ja_JP": "コインブラ",
"ko_KR": "코임브라",
"pl_PL": "Coimbra",
"pt_BR": "Coimbra",
"ru_RU": "Коимбра"
},
"COLMAR": {
"de_DE": "Colmar",
"es_ES": "Colmar",
"fr_FR": "Colmar",
"it_IT": "Colmar",
"ja_JP": "コルマール",
"ko_KR": "콜마르",
"pl_PL": "Colmar",
"pt_BR": "Colmar",
"ru_RU": "Кольмар"
},
"COLOGNE": {
"de_DE": "Köln",
"es_ES": "Colonia",
"fr_FR": "Cologne",
"it_IT": "Colonia",
"ja_JP": "コロン",
"ko_KR": "퀼른",
"pl_PL": "Kolonia",
"pt_BR": "Colônia",
"ru_RU": "Кельн"
},
"COLONIA_AGRIPPINA": {
"de_DE": "Colonia Agrippina",
"es_ES": "Colonia Agrippina",
"fr_FR": "Colonia Agrippina",
"it_IT": "Colonia Agrippina",
"ja_JP": "コロニア・アグリッピナ",
"ko_KR": "콜로니아아그리피나",
"pl_PL": "Colonia Agrippina",
"pt_BR": "Colônia Agripina",
"ru_RU": "Колония Агриппины"
},
"COMALCALCO": {
"de_DE": "Comalcalco",
"es_ES": "Comalcalco",
"fr_FR": "Comalcalco",
"it_IT": "Comalcalco",
"ja_JP": "コマルカルコ",
"ko_KR": "코말칼코",
"pl_PL": "Comalcalco",
"pt_BR": "Comalcalco",
"ru_RU": "Комалькалько"
},
"CONDATE": {
"de_DE": "Condate",
"es_ES": "Condate",
"fr_FR": "Condate",
"it_IT": "Condate",
"ja_JP": "コンダテ",
"ko_KR": "콘다테",
"pl_PL": "Condate",
"pt_BR": "Condate",
"ru_RU": "Кондат"
},
"CONSTANTINOPLE": {
"de_DE": "Konstantinopel",
"es_ES": "Constantinopla",
"fr_FR": "Constantinople",
"it_IT": "Costantinopoli",
"ja_JP": "コンスタンティノープル",
"ko_KR": "콘스탄티노플",
"pl_PL": "Konstantynopol",
"pt_BR": "Constantinopla",
"ru_RU": "Константинополь"
},
"CONTAGEM": {
"de_DE": "Contagem",
"es_ES": "Contagem",
"fr_FR": "Contagem",
"it_IT": "Contagem",
"ja_JP": "コンタジェム",
"ko_KR": "콘타젱",
"pl_PL": "Contagem",
"pt_BR": "Contagem",
"ru_RU": "Контажен"
},
"COPAN": {
"de_DE": "Copán",
"es_ES": "Copán",
"fr_FR": "Copán",
"it_IT": "Copan",
"ja_JP": "コパン",
"ko_KR": "코판",
"pl_PL": "Copan",
"pt_BR": "Copan",
"ru_RU": "Копан"
},
"CORDOBA": {
"de_DE": "Córdoba",
"es_ES": "Córdoba",
"fr_FR": "Cordoue",
"it_IT": "Córdoba",
"ja_JP": "コルドバ",
"ko_KR": "코르도바",
"pl_PL": "Kordoba",
"pt_BR": "Córdoba",
"ru_RU": "Кордова"
},
"CORINTH": {
"de_DE": "Korinth",
"es_ES": "Corinto",
"fr_FR": "Corinthe",
"it_IT": "Corinto",
"ja_JP": "コリント",
"ko_KR": "코린트",
"pl_PL": "Korynt",
"pt_BR": "Corinto",
"ru_RU": "Коринф"
},
"COVENTRY": {
"de_DE": "Coventry",
"es_ES": "Coventry",
"fr_FR": "Coventry",
"it_IT": "Coventry",
"ja_JP": "コベントリ",
"ko_KR": "코번트리",
"pl_PL": "Coventry",
"pt_BR": "Coventry",
"ru_RU": "Ковентри"
},
"COYOTEPEC": {
"de_DE": "Coyotepec",
"es_ES": "Coyotepec",
"fr_FR": "Coyotepec",
"it_IT": "Coyotepec",
"ja_JP": "コヨテペック",
"ko_KR": "코요테팩",
"pl_PL": "Coyotepec",
"pt_BR": "Coyotepec",
"ru_RU": "Койотепек"
},
"CROCIATONUM": {
"de_DE": "Crociatonum",
"es_ES": "Crociatonum",
"fr_FR": "Crociatonum",
"it_IT": "Crociatonum",
"ja_JP": "クロキアトヌム",
"ko_KR": "크로시아토눔",
"pl_PL": "Crociatonum",
"pt_BR": "Crociatonum",
"ru_RU": "Кроциатонум"
},
"CSESZNEK": {
"de_DE": "Csesznek",
"es_ES": "Csesznek",
"fr_FR": "Csesznek",
"it_IT": "Csesznek",
"ja_JP": "テスネク",
"ko_KR": "크제스츠네크",
"pl_PL": "Csesznek",
"pt_BR": "Csesznek",
"ru_RU": "Чеснек"
},
"CUCUTA": {
"de_DE": "Cúcuta",
"es_ES": "Cúcuta",
"fr_FR": "Cúcuta",
"it_IT": "Cúcuta",
"ja_JP": "ククタ",
"ko_KR": "쿠쿠타",
"pl_PL": "Cúcuta",
"pt_BR": "Cúcuta",
"ru_RU": "Кукута"
},
"CUENCA": {
"de_DE": "Cuenca",
"es_ES": "La Cuenca",
"fr_FR": "Cuenca",
"it_IT": "Cuenca",
"ja_JP": "クエンカ",
"ko_KR": "쿠엥카",
"pl_PL": "Cuenca",
"pt_BR": "Cuenca",
"ru_RU": "Куэнка"
},
"CUIABA": {
"de_DE": "Cuiabá",
"es_ES": "Cuiabá",
"fr_FR": "Cuiabá",
"it_IT": "Cuiabá",
"ja_JP": "クイアバ",
"ko_KR": "쿠이아바",
"pl_PL": "Cuiaba",
"pt_BR": "Cuiabá",
"ru_RU": "Куяба"
},
"CULLEN": {
"de_DE": "Cullen",
"es_ES": "Cullen",
"fr_FR": "Cullen",
"it_IT": "Cullen",
"ja_JP": "カレン",
"ko_KR": "컬렌",
"pl_PL": "Cullen",
"pt_BR": "Cullen",
"ru_RU": "Каллен"
},
"CUMAE": {
"de_DE": "Cumae",
"es_ES": "Cumas",
"fr_FR": "Cumae",
"it_IT": "Cuma",
"ja_JP": "クマエ",
"ko_KR": "쿠마이",
"pl_PL": "Cumae",
"pt_BR": "Cumae",
"ru_RU": "Кумы"
},
"CUMANA": {
"de_DE": "Cumaná",
"es_ES": "Cumaná",
"fr_FR": "Cumaná",
"it_IT": "Cumaná",
"ja_JP": "クマナ",
"ko_KR": "쿠마나",
"pl_PL": "Cumaná",
"pt_BR": "Cumaná",
"ru_RU": "Кумана"
},
"CUMBERNAULD": {
"de_DE": "Cumbernauld",
"es_ES": "Cumbernauld",
"fr_FR": "Cumbernauld",
"it_IT": "Cumbernauld",
"ja_JP": "カンバーノールド",
"ko_KR": "컴버놀드",
"pl_PL": "Cumbernauld",
"pt_BR": "Cumbernauld",
"ru_RU": "Камберналд"
},
"CUPAR": {
"de_DE": "Cupar",
"es_ES": "Cupar",
"fr_FR": "Cupar",
"it_IT": "Cupar",
"ja_JP": "クーパー",
"ko_KR": "쿠퍼",
"pl_PL": "Cupar",
"pt_BR": "Cupar",
"ru_RU": "Капар"
},
"CURACAO": {
"de_DE": "Curaçao",
"es_ES": "Curazao",
"fr_FR": "Curaçao",
"it_IT": "Curacao",
"ja_JP": "キュラソー",
"ko_KR": "퀴라소",
"pl_PL": "Curacao",
"pt_BR": "Curaçao",
"ru_RU": "Кюрасао"
},
"CURITIBA": {
"de_DE": "Curitiba",
"es_ES": "Curitiba",
"fr_FR": "Curitiba",
"it_IT": "Curitiba",
"ja_JP": "クリティバ",
"ko_KR": "쿠리티바",
"pl_PL": "Kurytyba",
"pt_BR": "Curitiba",
"ru_RU": "Куритиба"
},
"CUTTACK": {
"de_DE": "Cuttack",
"es_ES": "Cuttack",
"fr_FR": "Cuttack",
"it_IT": "Cuttack",
"ja_JP": "カタック",
"ko_KR": "쿠타크",
"pl_PL": "Cuttack",
"pt_BR": "Cuttack",
"ru_RU": "Каттак"
},
"CYRA": {
"de_DE": "Cyra",
"es_ES": "Cyra",
"fr_FR": "Cyra",
"it_IT": "Cyra",
"ja_JP": "サイラ",
"ko_KR": "싸이라",
"pl_PL": "Cyra",
"pt_BR": | |
<reponame>khromiumos/chromiumos-chromite
# -*- coding: utf-8 -*-
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run lint checks on the specified files."""
from __future__ import print_function
import errno
import functools
import json
import multiprocessing
import os
import re
import sys
from six.moves import urllib
from chromite.lib import constants
from chromite.cli import command
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import git
from chromite.lib import osutils
from chromite.lib import parallel
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
# Extract a script's shebang.
SHEBANG_RE = re.compile(br'^#!\s*([^\s]+)(\s+([^\s]+))?')
def _GetProjectPath(path):
"""Find the absolute path of the git checkout that contains |path|."""
if git.FindRepoCheckoutRoot(path):
manifest = git.ManifestCheckout.Cached(path)
return manifest.FindCheckoutFromPath(path).GetPath(absolute=True)
else:
# Maybe they're running on a file outside of a checkout.
# e.g. cros lint ~/foo.py /tmp/test.py
return os.path.dirname(path)
def _GetPylintrc(path):
"""Locate pylintrc or .pylintrc file that applies to |path|.
If not found - use the default.
"""
path = os.path.realpath(path)
project_path = _GetProjectPath(path)
parent = os.path.dirname(path)
while project_path and parent.startswith(project_path):
pylintrc = os.path.join(parent, 'pylintrc')
dotpylintrc = os.path.join(parent, '.pylintrc')
# Only allow one of these to exist to avoid confusing which one is used.
if os.path.isfile(pylintrc) and os.path.isfile(dotpylintrc):
cros_build_lib.Die('%s: Only one of "pylintrc" or ".pylintrc" is allowed',
parent)
if os.path.isfile(pylintrc):
return pylintrc
if os.path.isfile(dotpylintrc):
return dotpylintrc
parent = os.path.dirname(parent)
return os.path.join(constants.SOURCE_ROOT, 'chromite', 'pylintrc')
def _GetPylintGroups(paths):
"""Return a dictionary mapping pylintrc files to lists of paths."""
groups = {}
for path in paths:
pylintrc = _GetPylintrc(path)
if pylintrc:
groups.setdefault(pylintrc, []).append(path)
return groups
def _GetPythonPath(paths):
"""Return the set of Python library paths to use."""
# Carry through custom PYTHONPATH that the host env has set.
return os.environ.get('PYTHONPATH', '').split(os.pathsep) + [
# Add the Portage installation inside the chroot to the Python path.
# This ensures that scripts that need to import portage can do so.
os.path.join(constants.SOURCE_ROOT, 'chroot', 'usr', 'lib', 'portage',
'pym'),
# Allow platform projects to be imported by name (e.g. crostestutils).
os.path.join(constants.SOURCE_ROOT, 'src', 'platform'),
# Ideally we'd modify meta_path in pylint to handle our virtual chromite
# module, but that's not possible currently. We'll have to deal with
# that at some point if we want `cros lint` to work when the dir is not
# named 'chromite'.
constants.SOURCE_ROOT,
# Also allow scripts to import from their current directory.
] + list(set(os.path.dirname(x) for x in paths))
# The mapping between the "cros lint" --output-format flag and cpplint.py
# --output flag.
CPPLINT_OUTPUT_FORMAT_MAP = {
'colorized': 'emacs',
'msvs': 'vs7',
'parseable': 'emacs',
}
# The mapping between the "cros lint" --output-format flag and shellcheck
# flags.
# Note that the msvs mapping here isn't quite VS format, but it's closer than
# the default output.
SHLINT_OUTPUT_FORMAT_MAP = {
'colorized': ['--color=always'],
'msvs': ['--format=gcc'],
'parseable': ['--format=gcc'],
}
def _LinterRunCommand(cmd, debug, **kwargs):
"""Run the linter with common run args set as higher levels expect."""
return cros_build_lib.run(cmd, check=False, print_cmd=debug,
debug_level=logging.NOTICE, **kwargs)
def _WhiteSpaceLintData(path, data):
"""Run basic whitespace checks on |data|.
Args:
path: The name of the file (for diagnostics).
data: The file content to lint.
Returns:
True if everything passed.
"""
ret = True
# Make sure files all have a trailing newline.
if not data.endswith('\n'):
ret = False
logging.warning('%s: file needs a trailing newline', path)
# Disallow leading & trailing blank lines.
if data.startswith('\n'):
ret = False
logging.warning('%s: delete leading blank lines', path)
if data.endswith('\n\n'):
ret = False
logging.warning('%s: delete trailing blank lines', path)
for i, line in enumerate(data.splitlines(), start=1):
if line.rstrip() != line:
ret = False
logging.warning('%s:%i: trim trailing whitespace: %s', path, i, line)
return ret
def _CpplintFile(path, output_format, debug):
"""Returns result of running cpplint on |path|."""
cmd = [os.path.join(constants.DEPOT_TOOLS_DIR, 'cpplint.py')]
if output_format != 'default':
cmd.append('--output=%s' % CPPLINT_OUTPUT_FORMAT_MAP[output_format])
cmd.append(path)
return _LinterRunCommand(cmd, debug)
def _PylintFile(path, output_format, debug, interp):
"""Returns result of running pylint on |path|."""
pylint = os.path.join(constants.DEPOT_TOOLS_DIR, 'pylint-1.9')
# vpython3 isn't actually Python 3. But maybe it will be someday.
if interp != 'python3':
vpython = os.path.join(constants.DEPOT_TOOLS_DIR, 'vpython')
else:
vpython = os.path.join(constants.DEPOT_TOOLS_DIR, 'vpython3')
pylint = os.path.join(constants.CHROMITE_DIR, 'cli', 'cros', 'pylint-2')
pylintrc = _GetPylintrc(path)
cmd = [vpython, pylint, '--rcfile=%s' % pylintrc]
if interp == 'python3':
cmd += ['--disable=old-division']
if output_format != 'default':
cmd.append('--output-format=%s' % output_format)
cmd.append(path)
extra_env = {
# When inside the SDK, Gentoo's python wrappers (i.e. `python`, `python2`,
# and `python3`) will select a version based on $EPYTHON first. Make sure
# we run through the right Python version when switching.
# We can drop this once we are Python 3-only.
'EPYTHON': interp,
'PYTHONPATH': ':'.join(_GetPythonPath([path])),
}
return _LinterRunCommand(cmd, debug, extra_env=extra_env)
def _Pylint2File(path, output_format, debug):
"""Returns result of running pylint via python2 on |path|."""
return _PylintFile(path, output_format, debug, 'python2')
def _Pylint3File(path, output_format, debug):
"""Returns result of running pylint via python3 on |path|."""
return _PylintFile(path, output_format, debug, 'python3')
def _Pylint23File(path, output_format, debug):
"""Returns result of running pylint via python2 & python3 on |path|."""
ret2 = _Pylint2File(path, output_format, debug)
ret3 = _Pylint3File(path, output_format, debug)
# Caller only checks returncode atm.
return ret2 if ret2.returncode else ret3
def _PylintProbeFile(path, output_format, debug):
"""Returns result of running pylint based on the interp."""
try:
with open(path, 'rb') as fp:
data = fp.read(128)
if data.startswith(b'#!'):
if b'python3' in data:
return _Pylint3File(path, output_format, debug)
elif b'python2' in data:
return _Pylint2File(path, output_format, debug)
elif b'python' in data:
return _Pylint23File(path, output_format, debug)
except IOError as e:
if e.errno != errno.ENOENT:
raise
# TODO(vapier): Change the unknown default to Python 2+3 compat.
return _Pylint2File(path, output_format, debug)
def _GolintFile(path, _, debug):
"""Returns result of running golint on |path|."""
# Try using golint if it exists.
try:
cmd = ['golint', '-set_exit_status', path]
return _LinterRunCommand(cmd, debug)
except cros_build_lib.RunCommandError:
logging.notice('Install golint for additional go linting.')
return cros_build_lib.CommandResult('gofmt "%s"' % path,
returncode=0)
def _JsonLintFile(path, _output_format, _debug):
"""Returns result of running json lint checks on |path|."""
result = cros_build_lib.CommandResult('python -mjson.tool "%s"' % path,
returncode=0)
data = osutils.ReadFile(path)
# Strip off leading UTF-8 BOM if it exists.
if data.startswith(u'\ufeff'):
data = data[1:]
# Strip out comments for JSON parsing.
stripped_data = re.sub(r'^\s*#.*', '', data, flags=re.M)
# See if it validates.
try:
json.loads(stripped_data)
except ValueError as e:
result.returncode = 1
logging.notice('%s: %s', path, e)
# Check whitespace.
if not _WhiteSpaceLintData(path, data):
result.returncode = 1
return result
def _MarkdownLintFile(path, _output_format, _debug):
"""Returns result of running lint checks on |path|."""
result = cros_build_lib.CommandResult('mdlint(internal) "%s"' % path,
returncode=0)
data = osutils.ReadFile(path)
# Check whitespace.
if not _WhiteSpaceLintData(path, data):
result.returncode = 1
return result
def _ShellLintFile(path, output_format, debug, gentoo_format=False):
"""Returns result of running lint checks on |path|.
Args:
path: The path to the script on which to run the linter.
output_format: The format of the output that the linter should emit. See
|SHLINT_OUTPUT_FORMAT_MAP|.
debug: Whether to print out the linter command.
gentoo_format: Whether to treat this file as an ebuild style script.
Returns:
A CommandResult object.
"""
# TODO: Try using `checkbashisms`.
syntax_check = _LinterRunCommand(['bash', '-n', path], debug)
if syntax_check.returncode != 0:
return syntax_check
# Try using shellcheck if it exists, with a preference towards finding it
# inside the chroot. This is OK as it is statically linked.
shellcheck = (
osutils.Which('shellcheck', path='/usr/bin',
root=os.path.join(constants.SOURCE_ROOT, 'chroot'))
or osutils.Which('shellcheck'))
if not shellcheck:
logging.notice('Install shellcheck for additional shell linting.')
return syntax_check
# Instruct shellcheck to run itself from the shell script's dir. Note that
# 'SCRIPTDIR' is a special string that shellcheck rewrites to the dirname of
# the given path.
extra_checks = [
'avoid-nullary-conditions', # SC2244
'check-unassigned-uppercase', # Include uppercase in SC2154
'require-variable-braces', # SC2250
]
if not gentoo_format:
extra_checks.append('quote-safe-variables') # SC2248
cmd = [shellcheck, '--source-path=SCRIPTDIR',
'--enable=%s' % ','.join(extra_checks)]
if output_format != 'default':
cmd.extend(SHLINT_OUTPUT_FORMAT_MAP[output_format])
cmd.append('-x')
if gentoo_format:
# ebuilds don't explicitly export variables or contain a shebang.
cmd.append('--exclude=SC2148')
# ebuilds always use bash.
cmd.append('--shell=bash')
cmd.append(path)
lint_result = _LinterRunCommand(cmd, debug)
# During testing, we don't want to fail the linter for shellcheck errors,
# so override the return code.
if lint_result.returncode != 0:
bug_url = (
'https://bugs.chromium.org/p/chromium/issues/entry?' +
urllib.parse.urlencode({
'template':
'Defect report from Developer',
'summary':
'Bad shellcheck warnings for %s' % os.path.basename(path),
'components':
'Infra>Client>ChromeOS>Build,',
'cc':
'<EMAIL>,<EMAIL>',
'comment':
'Shellcheck output from file:\n%s\n\n<paste output here>\n\n'
"What is wrong with shellcheck's findings?\n" % path,
}))
logging.warning('Shellcheck found problems. These | |
<reponame>ksmit799/POTCO-PS<filename>pirates/piratesgui/NewTutorialPanel.py
# File: N (Python 2.4)
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from pirates.piratesgui import GuiPanel, PiratesGuiGlobals
from pirates.piratesbase import PLocalizer, PiratesGlobals
from direct.interval.IntervalGlobal import *
from pirates.battle import CannonGlobals
from pirates.audio import SoundGlobals
from pirates.audio.SoundGlobals import loadSfx
import DialogButton
import string
class NewTutorialPanel(GuiPanel.GuiPanel):
STAGE_TO_EVENTS = {
'exitCannon': 'cannonExited',
'drawSword': 'weaponEquipped',
'attackSword': 'properHit',
'comboSword': 'didSlash',
'cutlassLvl': 'seachestOpened',
'cutlassSkillOpen': 'skillPanelOpened',
'cutlassSkillUnlock': 'skillImprovementAttempted',
'cutlassDoneLvl': 'closePointSpendPanel',
'compassActiveQuest': 'closeCompassActiveQuest',
'compassIconsBearing': 'closeCompassIconsBearing',
'compassIconsPeople': 'closeCompassIconsPeople',
'specialMenu': 'usedSpecialAttack',
'specialUse': 'usedSpecialAttack',
'sheatheSword': 'weaponSheathed',
'questPageOpen': 'questPageOpened',
'questPageClose': 'seachestClosed',
'seachestOpen': 'seachestOpened',
'pistolAim': 'pistolMoved',
'pistolTarget': 'pistolAimedTarget',
'pistolHit': 'pistolHitTarget',
'pistolPractice': 'weaponSheathed',
'lookoutChestOpen': 'seachestOpened',
'lookoutOpen': 'lookoutOpened',
'lookoutClose': 'lookoutClosed' }
DIMENSION_TO_EVENTS = {
'seachestOpen': (1.25, 0.25, 0.20000000000000001, 0, 0.20000000000000001),
'questPageOpen': (1.25, 0.25, 0.20000000000000001, 0, 0.20000000000000001),
'questPageClose': (1.25, 0.25, 0.20000000000000001, 0, 0.20000000000000001),
'boardShip': (1.25, 0.25, 0.20000000000000001, 0, 0.20000000000000001),
'useCannon': (1.25, 0.25, 0.20000000000000001, 0, 1.3999999999999999),
'moveCannon': (1.25, 0.25, 0.20000000000000001, 0, 1.3999999999999999),
'fireCannon': (1.25, 0.25, 0.20000000000000001, 0, 1.3999999999999999),
'exitCannon': (1.25, 0.25, 0.20000000000000001, 0, 1.3999999999999999),
'leaveJail': (1.25, 0.25, 0.20000000000000001, 0, 0.20000000000000001),
'showBlacksmith': (1.27, 0.25, 0.20000000000000001, 0, 0.20000000000000001),
'doCutlassTutorial': (1.25, 0.25, 0.20000000000000001, 0, 1.2),
'drawSword': (1.25, 0.25, 0.20000000000000001, 0, 1.2),
'attackSword': (1.25, 0.25, 0.20000000000000001, 0, 1.2),
'comboSword': (1.25, 0.25, 0.20000000000000001, 0, 1.2),
'bonusSword': (1.25, 0.25, 0.20000000000000001, 0, 1.2),
'cutlassLvl': (1.25, 0.25, 0.20000000000000001, 0, 0.25),
'cutlassSkillOpen': (1.25, 0.25, 0.20000000000000001, 0, 0.25),
'cutlassSkillUnlock': (1.25, 0.25, 0.20000000000000001, 0, 0.25),
'cutlassDoneLvl': (1.25, 0.25, 0.20000000000000001, 0, 0.25),
'specialMenu': (1.25, 0.25, 0.20000000000000001, 0, 1.2),
'skillLearning': (1.27, 0.25, 0.20000000000000001, 0, 1.2),
'sheatheSword': (1.25, 0.25, 0.20000000000000001, 0, 1.2),
'showSkeleton': (1.27, 0.25, 0.20000000000000001, 0, 0.20000000000000001),
'showJungleTia': (1.25, 0.25, 0.20000000000000001, 0, 0.20000000000000001),
'receiveCompass': (1.25, 0.25, 0.20000000000000001, 0, 0.20000000000000001),
'compassActiveQuest': (1.25, 0.25, 0.20000000000000001, 0, 1.2),
'compassIconsBearing': (1.25, 0.25, 0.20000000000000001, 0, 1.2),
'compassIconsPeople': (1.25, 0.25, 0.20000000000000001, 0, 1.2),
'showNavy': (1.25, 0.25, 0.20000000000000001, 0, 0.20000000000000001),
'showGovMansion': (1.25, 0.25, 0.20000000000000001, 0, 0.20000000000000001),
'showDarby': (1.25, 0.25, 0.20000000000000001, 0, 0.20000000000000001),
'showDinghy': (1.25, 0.25, 0.20000000000000001, 0, 0.20000000000000001),
'showBarbossa': (1.25, 0.25, 0.20000000000000001, 0, 0.20000000000000001),
'pistolAim': (1.25, 0.25, 0.20000000000000001, 0, 0.25),
'pistolTarget': (1.25, 0.25, 0.20000000000000001, 0, 0.25),
'pistolHit': (1.25, 0.25, 0.20000000000000001, 0, 0.25),
'pistolPractice': (1.25, 0.25, 0.20000000000000001, 0, 0.25),
'learnLookout': (1.25, 0.25, 0.20000000000000001, 0, 0.20000000000000001),
'lookoutChestOpen': (1.25, 0.25, 0.20000000000000001, 0, 0.20000000000000001),
'lookoutOpen': (1.25, 0.25, 0.20000000000000001, 0, 0.20000000000000001),
'lookoutClose': (1.25, 0.25, 0.20000000000000001, 0, 0.20000000000000001),
'showTortugaJack': (1.25, 0.25, 0.20000000000000001, 0, 0.20000000000000001),
'teleport_tut1': (1.25, 0.25, 0.20000000000000001, 0, 0.20000000000000001),
'teleport_tut2': (1.25, 0.25, 0.20000000000000001, 0, 0.20000000000000001),
'teleport_tut3': (1.25, 0.25, 0.20000000000000001, 0, 0.20000000000000001),
'chat_tut1': (1.25, 0.25, 0.20000000000000001, 0, 1.2),
'chat_tut2': (1.25, 0.25, 0.20000000000000001, 0, 1.2),
'chat_tut3': (1.27, 0.25, 0.20000000000000001, 0, 1.2),
'chat_tut4': (1.27, 0.25, 0.20000000000000001, 0, 0.20000000000000001),
'chat_tut5': (1.27, 0.25, 0.20000000000000001, 0, 0.20000000000000001),
'chat_tut6': (1.25, 0.25, 0.20000000000000001, 0, 1.2),
'chat_tut7': (1.25, 0.25, 0.20000000000000001, 0, 1.2),
'chat_tut8': (1.25, 0.25, 0.20000000000000001, 0, 1.2),
'chat_tut_alt1': (1.25, 0.25, 0.20000000000000001, 0, 1.2),
'chat_tut_alt2': (1.25, 0.25, 0.20000000000000001, 0, 1.2),
'chat_tut_alt3': (1.25, 0.25, 0.20000000000000001, 0, 1.2),
'chat_tut_alt4': (1.27, 0.25, 0.20000000000000001, 0, 1.2),
'chat_tut_alt5': (1.27, 0.25, 0.20000000000000001, 0, 0.20000000000000001),
'chat_tut_alt6': (1.27, 0.25, 0.20000000000000001, 0, 0.20000000000000001),
'chat_tut_alt7': (1.25, 0.25, 0.20000000000000001, 0, 1.2),
'chat_tut_alt8': (1.25, 0.25, 0.20000000000000001, 0, 1.2) }
ARROW_TO_EVENTS = {
'seachestOpen': (base.a2dBottomRight, -0.25, 0, 0.25, 135),
'questPageOpen': (base.a2dBottomRight, -0.22500000000000001, 0, 0.42999999999999999, 135),
'questPageClose': (base.a2dBottomRight, -0.25, 0, 0.25, 135),
'drawSword': (base.a2dBottomRight, -0.67500000000000004, 0, 0.25, 135),
'cutlassLvl': (base.a2dBottomRight, -0.25, 0, 0.25, 135),
'cutlassSkillOpen': (base.a2dBottomRight, -0.25, 0, 1.0, 135),
'cutlassSkillUnlock': (base.a2dBottomRight, -1.0, 0, 1.05, 135),
'cutlassDoneLvl': (base.a2dBottomRight, -0.25, 0, 0.25, 135),
'specialMenu': (base.a2dBottomCenter, -0.42499999999999999, 0, 0.32500000000000001, 135),
'chat_tut1': (base.a2dBottomLeft, 0.40000000000000002, 0, 0.20000000000000001, 225),
'chat_tut2': (base.a2dBottomLeft, 0.5, 0, 0.20000000000000001, 225),
'chat_tut5': (base.a2dBottomLeft, 1.45, 0, 1.55, 225),
'chat_tut_alt2': (base.a2dBottomLeft, 0.29999999999999999, 0, 0.20000000000000001, 225),
'chat_tut_alt6': (base.a2dBottomLeft, 1.45, 0, 1.55, 225),
'lookoutChestOpen': (base.a2dBottomRight, -0.22500000000000001, 0, 0.25, 135),
'lookoutOpen': (base.a2dBottomRight, -0.22500000000000001, 0, 0.54500000000000004, 135),
'lookoutClose': (base.a2dBottomRight, -0.25, 0, 0.25, 135) }
ICON_TO_EVENTS = {
'seachestOpen': ('models/gui/toplevel_gui', 'treasure_chest_closed', 0.17000000000000001),
'questPageOpen': ('models/gui/toplevel_gui', 'topgui_icon_journal', 0.28599999999999998),
'questPageClose': ('models/gui/toplevel_gui', 'treasure_chest_open', 0.17000000000000001),
'moveCannon': ('models/gui/toplevel_gui', 'icon_mouse_right', 0.17000000000000001),
'fireCannon': ('models/gui/toplevel_gui', 'icon_mouse_left', 0.17000000000000001),
'showBlacksmith': ('models/gui/toplevel_gui', 'icon_warehouse', 0.22500000000000001),
'doCutlassTutorial': ('models/gui/gui_icons_weapon', 'pir_t_ico_swd_cutlass_a', 0.17000000000000001),
'drawSword': ('models/gui/gui_icons_weapon', 'pir_t_ico_swd_cutlass_a', 0.17000000000000001),
'attackSword': ('models/gui/toplevel_gui', 'icon_mouse_left', 0.17000000000000001),
'comboSword': ('models/gui/toplevel_gui', 'icon_mouse_double_left', 0.28499999999999998),
'bonusSword': ('models/gui/toplevel_gui', 'icon_mouse_double_left', 0.28499999999999998),
'cutlassLvl': ('models/gui/toplevel_gui', 'treasure_chest_closed', 0.17000000000000001),
'cutlassSkillOpen': ('models/gui/toplevel_gui', 'topgui_icon_skills', 0.17000000000000001),
'cutlassSkillUnlock': ('models/textureCards/skillIcons', 'tutorial_sweep', 0.10000000000000001),
'cutlassDoneLvl': ('models/gui/toplevel_gui', 'treasure_chest_open', 0.17000000000000001),
'specialMenu': ('models/textureCards/skillIcons', 'tutorial_sweep', 0.10000000000000001),
'skillLearning': ('models/gui/toplevel_gui', 'topgui_icon_skills', 0.17000000000000001),
'showSkeleton': ('models/gui/toplevel_gui', 'icon_grave_yard', 0.22500000000000001),
'showJungleTia': ('models/gui/toplevel_gui', 'icon_jungle_entrance', 0.22500000000000001),
'receiveCompass': ('models/gui/toplevel_gui', 'compass_small_button_open', 0.25),
'compassActiveQuest': ('models/gui/toplevel_gui', 'compass_small_button_open', 0.25),
'compassIconsBearing': ('models/gui/toplevel_gui', 'compass_small_button_open', 0.25),
'compassIconsPeople': ('models/gui/toplevel_gui', 'compass_small_button_open', 0.25),
'showNavy': ('models/gui/toplevel_gui', 'icon_navy', 0.22500000000000001),
'showGovMansion': ('models/gui/toplevel_gui', 'icon_gov_mansion', 0.22500000000000001),
'showDarby': ('models/gui/toplevel_gui', 'icon_darby', 0.22500000000000001),
'showDinghy': ('models/gui/toplevel_gui', 'icon_dinghy', 0.22500000000000001),
'showBarbossa': ('models/gui/toplevel_gui', 'icon_cave_entrance', 0.22500000000000001),
'chat_tut1': ('models/gui/triangle', 'triangle_over', 0.085000000000000006),
'chat_tut2': ('models/gui/triangle', 'triangle_over', 0.085000000000000006),
'chat_tut3': ('models/gui/chat_frame_skull', 'chat_frame_skull_over', 0.29999999999999999),
'chat_tut4': ('models/gui/chat_frame_skull', 'chat_frame_skull_over', 0.29999999999999999),
'chat_tut5': ('models/gui/chat_frame_skull', 'chat_frame_skull_over', 0.29999999999999999),
'chat_tut6': ('models/gui/chat_frame_skull', 'chat_frame_skull_over', 0.29999999999999999),
'chat_tut7': ('models/gui/chat_frame_skull', 'chat_frame_skull_over', 0.29999999999999999),
'chat_tut8': ('models/gui/chat_frame_skull', 'chat_frame_skull_over', 0.29999999999999999),
'chat_tut_alt2': ('models/gui/chat_frame_skull', 'chat_frame_skull_over', 0.29999999999999999),
'chat_tut_alt4': ('models/gui/chat_frame_skull', 'chat_frame_skull_over', 0.29999999999999999),
'chat_tut_alt5': ('models/gui/chat_frame_skull', 'chat_frame_skull_over', 0.29999999999999999),
'chat_tut_alt6': ('models/gui/chat_frame_skull', 'chat_frame_skull_over', 0.29999999999999999),
'chat_tut_alt7': ('models/gui/chat_frame_skull', 'chat_frame_skull_over', 0.29999999999999999),
'chat_tut_alt8': ('models/gui/chat_frame_skull', 'chat_frame_skull_over', 0.29999999999999999),
'pistolAim': ('models/gui/toplevel_gui', 'icon_mouse_right', 0.17000000000000001),
'pistolHit': ('models/gui/toplevel_gui', 'icon_mouse_left', 0.17000000000000001),
'learnLookout': ('models/gui/toplevel_gui', 'telescope_button', 0.22500000000000001),
'lookoutChestOpen': ('models/gui/toplevel_gui', 'treasure_chest_closed', 0.17000000000000001),
'lookoutOpen': ('models/gui/toplevel_gui', 'telescope_button', 0.22500000000000001),
'lookoutClose': ('models/gui/toplevel_gui', 'treasure_chest_open', 0.17000000000000001),
'showTortugaJack': ('models/gui/toplevel_gui', 'icon_faithful_bride', 0.22500000000000001) }
IVALS_TO_EVENTS = {
'compassActiveQuest': True,
'compassIconsBearing': True,
'compassIconsPeople': True }
def __init__(self, tutorialList, ignoreEscape = True, title = None):
mode = tutorialList[0]
aspectRatio = 1.3200000000000001
showClose = False
self.closeMessage = self.STAGE_TO_EVENTS.get(mode, 'closeTutorialWindow')
self.closeMessageCatchall = 'closeTutorialWindowAll'
self.mode = mode
(width, height, x, y, z) = self.DIMENSION_TO_EVENTS.get(mode, (PiratesGuiGlobals.TutorialPanelWidth, PiratesGuiGlobals.TutorialPanelHeight, 0.029999999999999999, 0, 1.0))
GuiPanel.GuiPanel.__init__(self, '', width * aspectRatio, height * aspectRatio, showClose, modelName = 'general_frame_e', borderScale = 0.40000000000000002, bgBuffer = 0.14999999999999999)
self.initialiseoptions(NewTutorialPanel)
self.reparentTo(base.a2dBottomLeft)
self.setPos(x, y, z)
self.setBin('gui-popup', 0)
(iconFile, iconName, iconScale) = self.ICON_TO_EVENTS.get(self.mode, ('models/gui/toplevel_gui', 'not_defined', 0.17000000000000001))
guiFile = loader.loadModel(iconFile)
flip = 0
if iconName == 'icon_mouse_right':
iconName = 'icon_mouse_left'
flip = 1
if iconName == 'icon_mouse_left':
iconScale = 0.75
self.icon = guiFile.find('**/' + iconName)
if self.icon.isEmpty():
self.icon = None
if self.icon:
self.icon.reparentTo(self)
self.icon.setPos(0.17000000000000001, 0, 0.12 * aspectRatio)
self.icon.setScale(iconScale * aspectRatio)
if flip:
self.icon.setHpr(180, 0, 0)
textXOffset = 0.29999999999999999
else:
textXOffset = 0.10000000000000001
(arrowParent, ax, ay, az, ar) = self.ARROW_TO_EVENTS.get(self.mode, (None, 0, 0, 0, 135))
if arrowParent:
self.arrow = loader.loadModel('models/gui/arrow_with_halo')
if self.arrow.isEmpty():
self.arrow = loader.loadModel('models/gui/compass_arrow')
self.arrow.setBin('gui-popup', 0)
arrowScale = 0.75
self.arrow.reparentTo(arrowParent)
self.arrow.setPos(ax, ay, az)
self.arrow.setScale(arrowScale)
self.arrow.setR(ar)
self.arrow.hide()
else:
self.arrow = None
self.openSfx = None
self.showPanelIval = None
self.createShowPanelIval()
undefText = 'undefined text'
if base.config.GetBool('want-easy-combos', 1) and PLocalizer.TutorialPanelDialogEasyCombo.get(mode):
text = PLocalizer.TutorialPanelDialogEasyCombo.get(mode)
else:
text = PLocalizer.TutorialPanelDialog.get(mode, undefText)
loc = string.find(text, '[')
start = 0
title = None
if loc >= 0:
loc2 = string.find(text, ']')
start = loc2 + 1
title = text[loc + 1:loc2]
listLen = len(tutorialList)
self.yesTutorial = None
self.noTutorial = None
buttonPos = (0.59999999999999998 + textXOffset, 0, 0.11)
yesButtonText = PLocalizer.lOk
if listLen > 3:
self.wreckHitButton = []
for addText in tutorialList[1:3]:
localText = PLocalizer.TutorialPanelDialog.get(addText, undefText)
if localText != undefText:
text += localText
continue
text += addText
self.modifyText = text
elif listLen >= 3:
buttonPos = (0.69999999999999996 + textXOffset, 0, 0.11)
self.noTutorial = DialogButton.DialogButton(self, text = PLocalizer.lNo, buttonStyle = DialogButton.DialogButton.NO, pos = buttonPos)
buttonPos = (0.40000000000000002 + textXOffset, 0, 0.11)
yesButtonText = PLocalizer.lYes
if listLen >= 2:
self.yesTutorial = DialogButton.DialogButton(self, text = yesButtonText, buttonStyle = DialogButton.DialogButton.YES, pos = buttonPos)
self.createTextIcons()
yOffsetFudge = 0.0
ratio = width / height
if ratio >= 5:
wordWrap = 25
elif ratio >= 2:
wordWrap = 12
else:
wordWrap = 8
yOffsetFudge = 0.050000000000000003
lenText = len(text)
possibleNumLines = int(lenText / wordWrap)
if title:
textYOffset = ((height + yOffsetFudge) / 1.5) * aspectRatio
self.titleText = DirectLabel(parent = self, relief = None, text = title, text_scale = PiratesGuiGlobals.TextScaleLarge * aspectRatio, text_align = TextNode.ALeft, text_fg = PiratesGuiGlobals.TextFG2, text_shadow = PiratesGuiGlobals.TextShadow, pos = (textXOffset, 0, textYOffset))
else:
self.titleText = None
textYOffset = ((height + yOffsetFudge) / 1.5) * aspectRatio
self.helpText = DirectLabel(parent = self, relief = None, text = text[start:len(text)], text_scale = PiratesGuiGlobals.TextScaleLarge * aspectRatio, text_align = TextNode.ALeft, text_fg = PiratesGuiGlobals.TextFG8, text_shadow = PiratesGuiGlobals.TextShadow, text_wordwrap = wordWrap, pos = (textXOffset, 0, textYOffset))
if ignoreEscape:
self.ignore('escape')
self.hide()
base.ntp = self
def createTextIcons(self):
tpMgr = TextPropertiesManager.getGlobalPtr()
if not tpMgr.hasGraphic('cutlassEquip'):
topGui = loader.loadModel('models/gui/toplevel_gui')
triangleGui = loader.loadModel('models/gui/triangle')
skullGui = loader.loadModel('models/gui/chat_frame_skull')
kbButton = topGui.find('**/keyboard_button')
nomButton = topGui.find('**/icon_mouse')
lmButton = topGui.find('**/icon_mouse_left')
lmButton.setScale(4)
csButton = skullGui.find('**/*skull')
caButton = triangleGui.find('**/triangle')
rmButton = topGui.find('**/icon_mouse_left')
rmButton.setScale(4)
rmButton.setHpr(180, 0, 0)
jnButton = topGui.find('**/topgui_icon_journal')
skButton = topGui.find('**/topgui_icon_skills')
chestClosedButton = | |
)
# client.set_options( url=self.HIS_Central_URL, proxy=proxy )
# ERROR: Unexpected Python exception: <urlopen error [Errno 111]
# Connection refused>
#----------------------------------------------------------------------
# from suds.transport.http import HttpTransport
# tran = HttpTransport()
# import urllib2
# opener = urllib2.build_opener( proxy )
# tran.urlopener = opener
# client = suds.client.Client( self.HIS_Central_URL, transport=tran )
# ERROR: Unexpected Python exception:
# expected BaseHandler instance, got <type 'dict'>
#----------------------------------------------------------------------
# from suds.transport.http import HttpTransport
# tran = HttpTransport()
# import urllib2
# proxy_han = urllib2.ProxyHandler( proxies=proxy )
# opener = urllib2.build_opener( proxy_han )
# tran.urlopener = opener
# client = suds.client.Client( self.HIS_Central_URL, transport=tran )
# ERROR: Unexpected Python exception:
# expected BaseHandler instance, got <type 'dict'>
#----------------------------------------------------------------------
# from suds.transport.http import HttpTransport
# tran = HttpTransport()
# import urllib2
# proxy_han = urllib2.ProxyHandler( self.proxies )
# opener = urllib2.build_opener( proxy_han )
# tran.urlopener = opener
# client = suds.client.Client( self.HIS_Central_URL, transport=tran )
# ERROR: Unexpected Python exception:
# expected BaseHandler instance, got <type 'dict'>
#----------------------------------------------------------------------
# print client #########
#######################
## print 'Calling client.add_prefix() method...'
## client.add_prefix( 'hiscentral.asmx' ) #####
## print 'Created client object. ########'
response = client.service.GetSeriesCatalogForBox2(xmin, xmax, ymin, ymax, \
self.data_keyword, networkIDs, \
self.start_date, self.stop_date)
rep_len = len(response)
if (rep_len == 0):
print('-------------------------------------------')
print('SORRY, Your query returned no matches.')
print(' You may want to try:')
print(' (1) Changing the keyword.')
print(' (2) Expanding the bounding box.')
print(' (3) Expanding the range of dates.')
print('-------------------------------------------')
print(' ')
self.DONE = True
return
# print 'len(response) =', rep_len
series_array = response[0]
n_series = len( series_array )
print('Number of time series retrieved =', n_series)
print('Max allowed number of series =', self.max_n_series)
print(' ')
#------------------------------------------------
# Return if n_series > max_n_series. (10/25/11)
#------------------------------------------------
if (n_series > self.max_n_series):
print('###############################################')
print(' ABORTING: n_series > max_n_series.')
print(' n_series =', n_series)
print(' max_n_series =', self.max_n_series)
print('###############################################')
print(' ')
return
#-----------------------------------
# Check the data for nodata/NaN ??
#-----------------------------------
#------------------------------------------
# Read next infil vars from input files ?
#------------------------------------------
# if (self.time_index > 0):
# self.read_input_files()
#-------------------------------------------
# Extract data values for each time series
#-------------------------------------------
## print 'Writing time series to local files...'
series_num = 0
# print 'Processing array of time series...'
for series in series_array:
series_num += 1
print('=========================================================')
print('Working on series', series_num, 'of', n_series)
try:
client = suds.client.Client( series.ServURL )
values_obj = client.service.GetValuesObject(series.location, series.VarCode, \
self.start_date, self.stop_date)
values = values_obj.timeSeries.values
n_values = values._count
SKIP = False
if (n_values < 1):
print('Skipping series: n_values =', n_values, ' ( < 1 )')
print(' ')
SKIP = True
if (n_values > self.max_n_values):
print('Skipping series: n_values =', n_values)
print(' but max_n_values =', self.max_n_values)
print(' ')
SKIP = True
#------------------------------------------------------
# Get actual time series values and date-time strings
#------------------------------------------------------
if not(SKIP):
#-------------------------------------
# Is there a faster way than this ??
#-------------------------------------
vals = np.asarray([np.float(val.value) for val in values.value])
dts = np.asarray([val._dateTime for val in values.value])
#-----------------------------------------------
# Write the data values to a text file here ??
#-----------------------------------------------
## if (self.DEBUG):
if (True):
print('series ID =', series_num)
print('size(series) =', np.size(vals))
print('min(series) =', vals.min())
print('max(series) =', vals.max())
print('series.Sitename =', series.Sitename)
print('series.location =', series.location)
print('series.VarName =', series.VarName)
print('series.VarCode =', series.VarCode)
print('series.ValueCount =', series.ValueCount)
print('series.datatype =', series.datatype)
print('series.valuetype =', series.valuetype)
print('series.timeunits =', series.timeunits)
# print 'len(series) =', len(series) # (always 18)
# print 'type(values_obj) =', type(values_obj) # ("instance")
# print 'type(values) =', type(values) # ("instance")
print(' ')
except:
print('ERROR: Skipping time series.')
print(' ')
#----------------------------------------------
# Write user-specified data to output files ?
#----------------------------------------------
# Components use own self.time_sec by default.
#-----------------------------------------------
#self.write_output_files()
## self.write_output_files( time_seconds )
#-----------------------------
# Update internal clock
# after write_output_files()
#-----------------------------
self.update_time( dt )
self.status = 'updated' # (OpenMI 2.0 convention)
#-----------------------------------
# Only allow one call to update ??
#-----------------------------------
self.DONE = True
# update()
#-------------------------------------------------------------------
def finalize(self):
self.status = 'finalizing' # (OpenMI 2.0 convention)
## self.close_input_files() ## TopoFlow input "data streams"
self.close_output_files()
self.status = 'finalized' # (OpenMI 2.0 convention)
# self.print_final_report(comp_name='HIS Data component')
# finalize()
#-------------------------------------------------------------------
def set_computed_input_vars(self):
#-----------------------------------------------
# Convert start_month & stop_month from string
# to integer. January should be 1.
#-----------------------------------------------
month_list = ['January', 'February', 'March', 'April',
'May', 'June', 'July', 'August', 'September',
'October', 'November', 'December']
self.start_month = month_list.index( self.start_month ) + 1
self.stop_month = month_list.index( self.stop_month ) + 1
#-----------------------------------------------
# Construct the query start date and stop date
#-----------------------------------------------
self.start_date = str(self.start_year) + '-' + \
str(self.start_month).zfill(2) + '-' + \
str(self.start_day).zfill(2)
self.stop_date = str(self.stop_year) + '-' + \
str(self.stop_month).zfill(2) + '-' + \
str(self.stop_day).zfill(2)
#--------------------------------------------------------
# Define these here, so all components can use the same
# output file functions, like "open_output_files()".
#--------------------------------------------------------
## self.SAVE_Q0_GRIDS = False
## self.SAVE_ZW_GRIDS = False
#--------------------------------------------
# Can maybe remove this when GUI info file
# has the "save cubes" part put back in.
#--------------------------------------------
## self.q_cs_file = ''
## self.p_cs_file = ''
#---------------------------------------------------------
# Make sure that all "save_dts" are larger or equal to
# the specified process dt. There is no point in saving
# results more often than they change.
# Issue a message to this effect if any are smaller ??
#---------------------------------------------------------
## self.save_grid_dt = np.maximum(self.save_grid_dt, self.dt)
## self.save_pixels_dt = np.maximum(self.save_pixels_dt, self.dt)
## self.save_profile_dt = np.maximum(self.save_profile_dt, self.dt)
## self.save_cube_dt = np.maximum(self.save_cube_dt, self.dt)
# set_computed_input_vars()
#-------------------------------------------------------------------
def check_input_types(self):
#------------------------------------------------------
# Notes: Usually this will be overridden by a given
# method of computing ET. But this one should
# work for Green-Ampt and Smith-Parlange.
#------------------------------------------------------
## are_scalars = np.array([
## self.mp.is_scalar('P'),
## self.sp.is_scalar('SM'),
## self.gp.is_scalar('h_table'),
## #------------------------------
## self.is_scalar('Ks'),
## self.is_scalar('Ki'),
## self.is_scalar('qs'),
## self.is_scalar('qi'),
## self.is_scalar('G'),
## self.is_scalar('gam') ])
##
## self.ALL_SCALARS = np.all(are_scalars)
self.ALL_SCALARS = True # (HIS only works with time series)
# check_input_types()
#-------------------------------------------------------------------
def initialize_computed_vars(self):
pass
# initialize_computed_vars()
#-------------------------------------------------------------------
def open_input_files(self):
pass
## self.Ks_unit = [] # (empty lists to hold file objects)
##
## for k in xrange(self.n_layers):
## self.Ks_unit.append( model_input.open_file(self.Ks_type[k], self.Ks_file[k]) )
# open_input_files()
#-------------------------------------------------------------------
def read_input_files(self):
if (self.DEBUG):
print('Calling read_input_files()...')
pass
## rti = self.rti
##
## #-------------------------------------------------------
## # All grids are assumed to have data type of float32.
## #-------------------------------------------------------
## # This method works for Green-Ampt and Smith-Parlange
## # but must be overridden for Richards 1D.
## #-------------------------------------------------------
## # NB! Green-Ampt and Smith-Parlange currently only
## # support ONE layer (n_layers == 1).
## #-------------------------------------------------------
## for k in xrange(self.n_layers):
## Ks = model_input.read_next(self.Ks_unit[k], self.Ks_type[k], rti)
## if (Ks is not None): self.Ks[k] = Ks
# read_input_files()
#-------------------------------------------------------------------
def close_input_files(self):
pass
## for k in xrange(self.n_layers):
## if (self.Ks_type[k] != 'Scalar'): self.Ks_unit[k].close()
# close_input_files()
#-------------------------------------------------------------------
def update_outfile_names(self):
pass
#-------------------------------------------------
# Notes: Append out_directory to outfile names.
#-------------------------------------------------
## self.v0_gs_file = (self.out_directory + self.v0_gs_file)
## #-------------------------------------------------------------
## self.v0_ts_file = (self.out_directory + self.v0_ts_file)
# update_outfile_names()
#-------------------------------------------------------------------
def open_output_files(self):
#-------------------------------------------------
# Notes: v0 = infiltration rate at surface
#-------------------------------------------------
model_output.check_netcdf( SILENT=self.SILENT )
self.update_outfile_names()
## #--------------------------------------
## # Open new files to write grid stacks
## #--------------------------------------
## if (self.SAVE_V0_GRIDS):
## model_output.open_new_gs_file( self, self.v0_gs_file, self.rti,
## var_name='v0',
## long_name='infiltration_rate_at_surface',
## units_name='m/s')
##
## #--------------------------------------
## # Open new files to write time series
## #--------------------------------------
## IDs = self.outlet_IDs
## if (self.SAVE_V0_PIXELS):
## model_output.open_new_ts_file( self, self.v0_ts_file, IDs,
## var_name='v0',
## long_name='infiltration_rate_at_surface',
## units_name='m/s')
# open_output_files()
#-------------------------------------------------------------------
def write_output_files(self, time_seconds=None):
#---------------------------------------------------------
# Notes: This function was written to use only model
# time (maybe from a caller) in seconds, and
# the save_grid_dt and save_pixels_dt parameters
# read by read_cfg_file().
#
# read_cfg_file() makes sure that all of
# the "save_dts" are larger than or equal to the
# process dt.
#---------------------------------------------------------
if (self.DEBUG):
print('Calling write_output_files()...')
#-----------------------------------------
# Allows time to be passed from a caller
#-----------------------------------------
if (time_seconds is None):
time_seconds = self.time_sec
model_time = int(time_seconds)
#----------------------------------------
# Save computed values at sampled times
#----------------------------------------
if (model_time % int(self.save_grid_dt) == 0):
self.save_grids()
if (model_time % int(self.save_pixels_dt) == 0):
self.save_pixel_values()
# write_output_files()
#-------------------------------------------------------------------
def close_output_files(self):
pass
## if (self.SAVE_V0_GRIDS): model_output.close_gs_file( self, 'v0')
##
## if (self.SAVE_V0_PIXELS): model_output.close_ts_file( self, 'v0')
# close_output_files()
#-------------------------------------------------------------------
def save_grids(self):
#-----------------------------------
# Save grid stack to a netCDF file
#---------------------------------------------
# Note that add_grid() methods will | |
def id(self) -> str:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the backup being referenced.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isAutomatic")
def is_automatic(self) -> bool:
"""
True if this object is automatically created
"""
return pulumi.get(self, "is_automatic")
@property
@pulumi.getter(name="lifecycleDetails")
def lifecycle_details(self) -> str:
"""
Describes the object's current state in detail. For example, it can be used to provide actionable information for a resource in a Failed state.
"""
return pulumi.get(self, "lifecycle_details")
@property
@pulumi.getter
def namespace(self) -> str:
"""
Name of namespace that serves as a container for all of your buckets
"""
return pulumi.get(self, "namespace")
@property
@pulumi.getter
def object(self) -> str:
"""
Name of the object to be uploaded to object storage
"""
return pulumi.get(self, "object")
@property
@pulumi.getter(name="oggVersion")
def ogg_version(self) -> str:
"""
Version of OGG
"""
return pulumi.get(self, "ogg_version")
@property
@pulumi.getter
def state(self) -> str:
"""
A filter to return only the resources that match the 'lifecycleState' given.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="systemTags")
def system_tags(self) -> Mapping[str, Any]:
"""
The system tags associated with this resource, if any. The system tags are set by Oracle Cloud Infrastructure services. Each key is predefined and scoped to namespaces. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{orcl-cloud: {free-tier-retain: true}}`
"""
return pulumi.get(self, "system_tags")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> str:
"""
The time the resource was created. The format is defined by [RFC3339](https://tools.ietf.org/html/rfc3339), such as `2016-08-25T21:10:29.600Z`.
"""
return pulumi.get(self, "time_created")
@property
@pulumi.getter(name="timeOfBackup")
def time_of_backup(self) -> str:
"""
The time of the resource backup. The format is defined by [RFC3339](https://tools.ietf.org/html/rfc3339), such as `2016-08-25T21:10:29.600Z`.
"""
return pulumi.get(self, "time_of_backup")
@property
@pulumi.getter(name="timeUpdated")
def time_updated(self) -> str:
"""
The time the resource was last updated. The format is defined by [RFC3339](https://tools.ietf.org/html/rfc3339), such as `2016-08-25T21:10:29.600Z`.
"""
return pulumi.get(self, "time_updated")
@pulumi.output_type
class GetDeploymentBackupsFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str],
regex: Optional[bool] = None):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
if regex is not None:
pulumi.set(__self__, "regex", regex)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@property
@pulumi.getter
def regex(self) -> Optional[bool]:
return pulumi.get(self, "regex")
@pulumi.output_type
class GetDeploymentOggDataResult(dict):
def __init__(__self__, *,
admin_password: str,
admin_username: str,
certificate: str,
deployment_name: str,
key: str):
"""
:param str admin_username: The GoldenGate deployment console username.
:param str certificate: A PEM-encoded SSL certificate.
:param str deployment_name: The name given to the GoldenGate service deployment. The name must be 1 to 32 characters long, must contain only alphanumeric characters and must start with a letter.
"""
pulumi.set(__self__, "admin_password", <PASSWORD>)
pulumi.set(__self__, "admin_username", admin_username)
pulumi.set(__self__, "certificate", certificate)
pulumi.set(__self__, "deployment_name", deployment_name)
pulumi.set(__self__, "key", key)
@property
@pulumi.getter(name="adminPassword")
def admin_password(self) -> str:
return pulumi.get(self, "admin_password")
@property
@pulumi.getter(name="adminUsername")
def admin_username(self) -> str:
"""
The GoldenGate deployment console username.
"""
return pulumi.get(self, "admin_username")
@property
@pulumi.getter
def certificate(self) -> str:
"""
A PEM-encoded SSL certificate.
"""
return pulumi.get(self, "certificate")
@property
@pulumi.getter(name="deploymentName")
def deployment_name(self) -> str:
"""
The name given to the GoldenGate service deployment. The name must be 1 to 32 characters long, must contain only alphanumeric characters and must start with a letter.
"""
return pulumi.get(self, "deployment_name")
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@pulumi.output_type
class GetDeploymentsDeploymentCollectionResult(dict):
def __init__(__self__, *,
items: Sequence['outputs.GetDeploymentsDeploymentCollectionItemResult']):
pulumi.set(__self__, "items", items)
@property
@pulumi.getter
def items(self) -> Sequence['outputs.GetDeploymentsDeploymentCollectionItemResult']:
return pulumi.get(self, "items")
@pulumi.output_type
class GetDeploymentsDeploymentCollectionItemResult(dict):
def __init__(__self__, *,
compartment_id: str,
cpu_core_count: int,
defined_tags: Mapping[str, Any],
deployment_backup_id: str,
deployment_type: str,
deployment_url: str,
description: str,
display_name: str,
fqdn: str,
freeform_tags: Mapping[str, Any],
id: str,
is_auto_scaling_enabled: bool,
is_healthy: bool,
is_latest_version: bool,
is_public: bool,
license_model: str,
lifecycle_details: str,
nsg_ids: Sequence[str],
ogg_data: 'outputs.GetDeploymentsDeploymentCollectionItemOggDataResult',
private_ip_address: str,
public_ip_address: str,
state: str,
subnet_id: str,
system_tags: Mapping[str, Any],
time_created: str,
time_updated: str):
"""
:param str compartment_id: The ID of the compartment in which to list resources.
:param int cpu_core_count: The Minimum number of OCPUs to be made available for this Deployment.
:param Mapping[str, Any] defined_tags: Tags defined for this resource. Each key is predefined and scoped to a namespace. Example: `{"foo-namespace.bar-key": "value"}`
:param str deployment_backup_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the backup being referenced.
:param str deployment_type: The deployment type.
:param str deployment_url: The URL of a resource.
:param str description: Metadata about this specific object.
:param str display_name: A filter to return only the resources that match the entire 'displayName' given.
:param str fqdn: A three-label Fully Qualified Domain Name (FQDN) for a resource.
:param Mapping[str, Any] freeform_tags: A simple key-value pair that is applied without any predefined name, type, or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
:param str id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the deployment being referenced.
:param bool is_auto_scaling_enabled: Indicates if auto scaling is enabled for the Deployment's CPU core count.
:param bool is_healthy: True if all of the aggregate resources are working correctly.
:param bool is_latest_version: Indicates if the resource is the the latest available version.
:param bool is_public: True if this object is publicly available.
:param str license_model: The Oracle license model that applies to a Deployment.
:param str lifecycle_details: Describes the object's current state in detail. For example, it can be used to provide actionable information for a resource in a Failed state.
:param Sequence[str] nsg_ids: An array of [Network Security Group](https://docs.cloud.oracle.com/iaas/Content/Network/Concepts/networksecuritygroups.htm) OCIDs used to define network access for a deployment.
:param 'GetDeploymentsDeploymentCollectionItemOggDataArgs' ogg_data: Deployment Data for an OggDeployment
:param str private_ip_address: The private IP address in the customer's VCN representing the access point for the associated endpoint service in the GoldenGate service VCN.
:param str public_ip_address: The public IP address representing the access point for the Deployment.
:param str state: A filter to return only the resources that match the 'lifecycleState' given.
:param str subnet_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the subnet being referenced.
:param Mapping[str, Any] system_tags: The system tags associated with this resource, if any. The system tags are set by Oracle Cloud Infrastructure services. Each key is predefined and scoped to namespaces. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{orcl-cloud: {free-tier-retain: true}}`
:param str time_created: The time the resource was created. The format is defined by [RFC3339](https://tools.ietf.org/html/rfc3339), such as `2016-08-25T21:10:29.600Z`.
:param str time_updated: The time the resource was last updated. The format is defined by [RFC3339](https://tools.ietf.org/html/rfc3339), such as `2016-08-25T21:10:29.600Z`.
"""
pulumi.set(__self__, "compartment_id", compartment_id)
pulumi.set(__self__, "cpu_core_count", cpu_core_count)
pulumi.set(__self__, "defined_tags", defined_tags)
pulumi.set(__self__, "deployment_backup_id", deployment_backup_id)
pulumi.set(__self__, "deployment_type", deployment_type)
pulumi.set(__self__, "deployment_url", deployment_url)
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "display_name", display_name)
pulumi.set(__self__, "fqdn", fqdn)
pulumi.set(__self__, "freeform_tags", freeform_tags)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "is_auto_scaling_enabled", is_auto_scaling_enabled)
pulumi.set(__self__, "is_healthy", is_healthy)
pulumi.set(__self__, "is_latest_version", is_latest_version)
pulumi.set(__self__, "is_public", is_public)
pulumi.set(__self__, "license_model", license_model)
pulumi.set(__self__, "lifecycle_details", lifecycle_details)
pulumi.set(__self__, "nsg_ids", nsg_ids)
pulumi.set(__self__, "ogg_data", ogg_data)
pulumi.set(__self__, "private_ip_address", private_ip_address)
pulumi.set(__self__, "public_ip_address", public_ip_address)
pulumi.set(__self__, "state", state)
pulumi.set(__self__, "subnet_id", subnet_id)
pulumi.set(__self__, "system_tags", system_tags)
pulumi.set(__self__, "time_created", time_created)
pulumi.set(__self__, "time_updated", time_updated)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> str:
"""
The ID of the compartment in which to list resources.
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter(name="cpuCoreCount")
def cpu_core_count(self) -> int:
"""
The Minimum number of OCPUs to be made available for this Deployment.
"""
return pulumi.get(self, "cpu_core_count")
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Mapping[str, Any]:
"""
Tags defined for this resource. Each key is predefined and scoped to a namespace. Example: `{"foo-namespace.bar-key": "value"}`
"""
return pulumi.get(self, "defined_tags")
@property
@pulumi.getter(name="deploymentBackupId")
def deployment_backup_id(self) -> str:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the backup being referenced.
"""
return pulumi.get(self, "deployment_backup_id")
@property
@pulumi.getter(name="deploymentType")
def deployment_type(self) -> str:
"""
The deployment type.
"""
return pulumi.get(self, "deployment_type")
@property
@pulumi.getter(name="deploymentUrl")
def deployment_url(self) -> str:
"""
The URL of a resource.
"""
return pulumi.get(self, "deployment_url")
@property
@pulumi.getter
def description(self) -> str:
"""
Metadata about this specific object.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
A filter to return only the resources that match the entire 'displayName' given.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def fqdn(self) -> str:
"""
A three-label Fully Qualified Domain Name (FQDN) for a resource.
"""
return pulumi.get(self, "fqdn")
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Mapping[str, Any]:
"""
A simple key-value pair that is applied without any predefined name, type, or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
"""
return pulumi.get(self, "freeform_tags")
@property
@pulumi.getter
def id(self) -> str:
"""
The | |
# yellowbrick.features.radviz
# Implements radviz for feature analysis.
#
# Author: <NAME>
# Created: Fri Oct 07 13:18:00 2016 -0400
#
# Copyright (C) 2016 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: radviz.py [0f4b236] <EMAIL> $
"""
Implements radviz for feature analysis.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
import matplotlib.patches as patches
from yellowbrick.draw import manual_legend
from yellowbrick.utils import is_dataframe
from yellowbrick.utils import nan_warnings
from yellowbrick.features.base import DataVisualizer
##########################################################################
## Quick Methods
##########################################################################
def radviz(
X,
y=None,
ax=None,
features=None,
classes=None,
colors=None,
colormap=None,
alpha=1.0,
**kwargs
):
"""
Displays each feature as an axis around a circle surrounding a scatter
plot whose points are each individual instance.
This helper function is a quick wrapper to utilize the RadialVisualizer
(Transformer) for one-off analysis.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
ax : matplotlib Axes, default: None
The axes to plot the figure on.
features : list of strings, default: None
The names of the features or columns
classes : list of strings, default: None
The names of the classes in the target
colors : list or tuple of colors, default: None
Specify the colors for each individual class
colormap : string or matplotlib cmap, default: None
Sequential colormap for continuous target
alpha : float, default: 1.0
Specify a transparency where 1 is completely opaque and 0 is completely
transparent. This property makes densely clustered points more visible.
Returns
-------
viz : RadViz
Returns the fitted, finalized visualizer
"""
# Instantiate the visualizer
visualizer = RadialVisualizer(
ax=ax,
features=features,
classes=classes,
colors=colors,
colormap=colormap,
alpha=alpha,
**kwargs
)
# Fit and transform the visualizer (calls draw)
visualizer.fit(X, y, **kwargs)
visualizer.transform(X)
# Return the visualizer object
return visualizer
##########################################################################
## Static RadViz Visualizer
##########################################################################
class RadialVisualizer(DataVisualizer):
"""
RadViz is a multivariate data visualization algorithm that plots each
axis uniformely around the circumference of a circle then plots points on
the interior of the circle such that the point normalizes its values on
the axes from the center to each arc.
Parameters
----------
ax : matplotlib Axes, default: None
The axis to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
features : list, default: None
a list of feature names to use
The names of the features specified by the columns of the input dataset.
This length of this list must match the number of columns in X, otherwise
an exception will be raised on ``fit()``.
classes : list, default: None
a list of class names for the legend
The class labels for each class in y, ordered by sorted class index. These
names act as a label encoder for the legend, identifying integer classes
or renaming string labels. If omitted, the class labels will be taken from
the unique values in y.
Note that the length of this list must match the number of unique values in
y, otherwise an exception is raised. This parameter is only used in the
discrete target type case and is ignored otherwise.
colors : list or tuple, default: None
optional list or tuple of colors to colorize lines
A single color to plot all instances as or a list of colors to color each
instance according to its class. If not enough colors per class are
specified then the colors are treated as a cycle.
colormap : string or cmap, default: None
optional string or matplotlib cmap to colorize lines
The colormap used to create the individual colors. If classes are
specified the colormap is used to evenly space colors across each class.
alpha : float, default: 1.0
Specify a transparency where 1 is completely opaque and 0 is completely
transparent. This property makes densely clustered points more visible.
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Examples
--------
>>> visualizer = RadViz()
>>> visualizer.fit(X, y)
>>> visualizer.transform(X)
>>> visualizer.show()
Attributes
----------
features_ : ndarray, shape (n_features,)
The names of the features discovered or used in the visualizer that
can be used as an index to access or modify data in X. If a user passes
feature names in, those features are used. Otherwise the columns of a
DataFrame are used or just simply the indices of the data array.
classes_ : ndarray, shape (n_classes,)
The class labels that define the discrete values in the target. Only
available if the target type is discrete. This is guaranteed to be
strings even if the classes are a different type.
"""
def __init__(
self,
ax=None,
features=None,
classes=None,
colors=None,
colormap=None,
alpha=1.0,
**kwargs
):
if "target_type" not in kwargs:
kwargs["target_type"] = "discrete"
super(RadialVisualizer, self).__init__(
ax=ax,
features=features,
classes=classes,
colors=colors,
colormap=colormap,
**kwargs
)
self.alpha = alpha
@staticmethod
def normalize(X):
"""
MinMax normalization to fit a matrix in the space [0,1] by column.
"""
a = X.min(axis=0)
b = X.max(axis=0)
return (X - a[np.newaxis, :]) / ((b - a)[np.newaxis, :])
def fit(self, X, y=None, **kwargs):
"""
The fit method is the primary drawing input for the
visualization since it has both the X and y data required for the
viz and the transform method does not.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
kwargs : dict
Pass generic arguments to the drawing method
Returns
-------
self : instance
Returns the instance of the transformer/visualizer
"""
super(RadialVisualizer, self).fit(X, y)
self.draw(X, y, **kwargs)
return self
def draw(self, X, y, **kwargs):
"""
Called from the fit method, this method creates the radviz canvas and
draws each instance as a class or target colored point, whose location
is determined by the feature data set.
"""
# Convert from dataframe
if is_dataframe(X):
X = X.values
# Clean out nans and warn that the user they aren't plotted
nan_warnings.warn_if_nans_exist(X)
X, y = nan_warnings.filter_missing(X, y)
# Get the shape of the data
nrows, ncols = X.shape
# Set the axes limits
self.ax.set_xlim([-1, 1])
self.ax.set_ylim([-1, 1])
# Create a data structure to hold scatter plot representations
to_plot = {label: [[], []] for label in self.classes_}
# Compute the arcs around the circumference for each feature axis
# TODO: make this an independent function for override
s = np.array(
[
(np.cos(t), np.sin(t))
for t in [2.0 * np.pi * (i / float(ncols)) for i in range(ncols)]
]
)
# Compute the locations of the scatter plot for each class
# Normalize the data first to plot along the 0, 1 axis
for i, row in enumerate(self.normalize(X)):
row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)
xy = (s * row_).sum(axis=0) / row.sum()
label = self._label_encoder[y[i]]
to_plot[label][0].append(xy[0])
to_plot[label][1].append(xy[1])
# Add the scatter plots from the to_plot function
# TODO: store these plots to add more instances to later
# TODO: make this a separate function
for label in self.classes_:
color = self.get_colors([label])[0]
self.ax.scatter(
to_plot[label][0],
to_plot[label][1],
color=color,
label=label,
alpha=self.alpha,
**kwargs
)
# Add the circular axis path
# TODO: Make this a seperate function (along with labeling)
self.ax.add_patch(
patches.Circle(
(0.0, 0.0),
radius=1.0,
facecolor="none",
edgecolor="grey",
linewidth=0.5,
)
)
# Add the feature names
for xy, name in zip(s, self.features_):
# Add the patch indicating the location of the axis
self.ax.add_patch(patches.Circle(xy, radius=0.025, facecolor="#777777"))
# Add the feature names offset around the axis marker
if xy[0] < 0.0 and xy[1] < 0.0:
self.ax.text(
xy[0] - 0.025,
xy[1] - 0.025,
name,
ha="right",
va="top",
size="small",
)
elif xy[0] < 0.0 and xy[1] >= 0.0:
self.ax.text(
xy[0] - 0.025,
xy[1] + 0.025,
name,
ha="right",
va="bottom",
size="small",
)
elif xy[0] >= 0.0 and xy[1] < 0.0:
self.ax.text(
xy[0] + 0.025,
xy[1] - 0.025,
name,
ha="left",
va="top",
size="small",
)
elif xy[0] >= 0.0 and xy[1] >= 0.0:
self.ax.text(
xy[0] | |
= []
if self._offset == self._failure:
self._expected.append('[0-9a-zA-Z_]')
self._offset = index55
if address54 is FAILURE:
address54 = TreeNode(self._input[self._offset:self._offset], self._offset, [])
self._offset = self._offset
else:
address54 = FAILURE
if address54 is not FAILURE:
elements26.append(address54)
else:
elements26 = None
self._offset = index54
else:
elements26 = None
self._offset = index54
if elements26 is None:
address0 = FAILURE
else:
address0 = TreeNode(self._input[index54:self._offset], index54, elements26)
self._offset = self._offset
if address0 is FAILURE:
self._offset = index1
index56, elements27 = self._offset, []
address55 = FAILURE
chunk54, max54 = None, self._offset + 9
if max54 <= self._input_size:
chunk54 = self._input[self._offset:max54]
if chunk54 == 'otherwise':
address55 = TreeNode(self._input[self._offset:self._offset + 9], self._offset, [])
self._offset = self._offset + 9
else:
address55 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'otherwise\'')
if address55 is not FAILURE:
elements27.append(address55)
address56 = FAILURE
index57 = self._offset
chunk55, max55 = None, self._offset + 1
if max55 <= self._input_size:
chunk55 = self._input[self._offset:max55]
if chunk55 is not None and Grammar.REGEX_32.search(chunk55):
address56 = TreeNode(self._input[self._offset:self._offset + 1], self._offset, [])
self._offset = self._offset + 1
else:
address56 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('[0-9a-zA-Z_]')
self._offset = index57
if address56 is FAILURE:
address56 = TreeNode(self._input[self._offset:self._offset], self._offset, [])
self._offset = self._offset
else:
address56 = FAILURE
if address56 is not FAILURE:
elements27.append(address56)
else:
elements27 = None
self._offset = index56
else:
elements27 = None
self._offset = index56
if elements27 is None:
address0 = FAILURE
else:
address0 = TreeNode(self._input[index56:self._offset], index56, elements27)
self._offset = self._offset
if address0 is FAILURE:
self._offset = index1
index58, elements28 = self._offset, []
address57 = FAILURE
chunk56, max56 = None, self._offset + 6
if max56 <= self._input_size:
chunk56 = self._input[self._offset:max56]
if chunk56 == 'unless':
address57 = TreeNode(self._input[self._offset:self._offset + 6], self._offset, [])
self._offset = self._offset + 6
else:
address57 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'unless\'')
if address57 is not FAILURE:
elements28.append(address57)
address58 = FAILURE
index59 = self._offset
chunk57, max57 = None, self._offset + 1
if max57 <= self._input_size:
chunk57 = self._input[self._offset:max57]
if chunk57 is not None and Grammar.REGEX_33.search(chunk57):
address58 = TreeNode(self._input[self._offset:self._offset + 1], self._offset, [])
self._offset = self._offset + 1
else:
address58 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('[0-9a-zA-Z_]')
self._offset = index59
if address58 is FAILURE:
address58 = TreeNode(self._input[self._offset:self._offset], self._offset, [])
self._offset = self._offset
else:
address58 = FAILURE
if address58 is not FAILURE:
elements28.append(address58)
else:
elements28 = None
self._offset = index58
else:
elements28 = None
self._offset = index58
if elements28 is None:
address0 = FAILURE
else:
address0 = TreeNode(self._input[index58:self._offset], index58, elements28)
self._offset = self._offset
if address0 is FAILURE:
self._offset = index1
index60, elements29 = self._offset, []
address59 = FAILURE
chunk58, max58 = None, self._offset + 3
if max58 <= self._input_size:
chunk58 = self._input[self._offset:max58]
if chunk58 == 'and':
address59 = TreeNode(self._input[self._offset:self._offset + 3], self._offset, [])
self._offset = self._offset + 3
else:
address59 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'and\'')
if address59 is not FAILURE:
elements29.append(address59)
address60 = FAILURE
index61 = self._offset
chunk59, max59 = None, self._offset + 1
if max59 <= self._input_size:
chunk59 = self._input[self._offset:max59]
if chunk59 is not None and Grammar.REGEX_34.search(chunk59):
address60 = TreeNode(self._input[self._offset:self._offset + 1], self._offset, [])
self._offset = self._offset + 1
else:
address60 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('[0-9a-zA-Z_]')
self._offset = index61
if address60 is FAILURE:
address60 = TreeNode(self._input[self._offset:self._offset], self._offset, [])
self._offset = self._offset
else:
address60 = FAILURE
if address60 is not FAILURE:
elements29.append(address60)
else:
elements29 = None
self._offset = index60
else:
elements29 = None
self._offset = index60
if elements29 is None:
address0 = FAILURE
else:
address0 = TreeNode(self._input[index60:self._offset], index60, elements29)
self._offset = self._offset
if address0 is FAILURE:
self._offset = index1
index62, elements30 = self._offset, []
address61 = FAILURE
chunk60, max60 = None, self._offset + 3
if max60 <= self._input_size:
chunk60 = self._input[self._offset:max60]
if chunk60 == 'has':
address61 = TreeNode(self._input[self._offset:self._offset + 3], self._offset, [])
self._offset = self._offset + 3
else:
address61 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'has\'')
if address61 is not FAILURE:
elements30.append(address61)
address62 = FAILURE
index63 = self._offset
chunk61, max61 = None, self._offset + 1
if max61 <= self._input_size:
chunk61 = self._input[self._offset:max61]
if chunk61 is not None and Grammar.REGEX_35.search(chunk61):
address62 = TreeNode(self._input[self._offset:self._offset + 1], self._offset, [])
self._offset = self._offset + 1
else:
address62 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('[0-9a-zA-Z_]')
self._offset = index63
if address62 is FAILURE:
address62 = TreeNode(self._input[self._offset:self._offset], self._offset, [])
self._offset = self._offset
else:
address62 = FAILURE
if address62 is not FAILURE:
elements30.append(address62)
else:
elements30 = None
self._offset = index62
else:
elements30 = None
self._offset = index62
if elements30 is None:
address0 = FAILURE
else:
address0 = TreeNode(self._input[index62:self._offset], index62, elements30)
self._offset = self._offset
if address0 is FAILURE:
self._offset = index1
index64, elements31 = self._offset, []
address63 = FAILURE
chunk62, max62 = None, self._offset + 3
if max62 <= self._input_size:
chunk62 = self._input[self._offset:max62]
if chunk62 == 'not':
address63 = TreeNode(self._input[self._offset:self._offset + 3], self._offset, [])
self._offset = self._offset + 3
else:
address63 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'not\'')
if address63 is not FAILURE:
elements31.append(address63)
address64 = FAILURE
index65 = self._offset
chunk63, max63 = None, self._offset + 1
if max63 <= self._input_size:
chunk63 = self._input[self._offset:max63]
if chunk63 is not None and Grammar.REGEX_36.search(chunk63):
address64 = TreeNode(self._input[self._offset:self._offset + 1], self._offset, [])
self._offset = self._offset + 1
else:
address64 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('[0-9a-zA-Z_]')
self._offset = index65
if address64 is FAILURE:
address64 = TreeNode(self._input[self._offset:self._offset], self._offset, [])
self._offset = self._offset
else:
address64 = FAILURE
if address64 is not FAILURE:
elements31.append(address64)
else:
elements31 = None
self._offset = index64
else:
elements31 = None
self._offset = index64
if elements31 is None:
address0 = FAILURE
else:
address0 = TreeNode(self._input[index64:self._offset], index64, elements31)
self._offset = self._offset
if address0 is FAILURE:
self._offset = index1
index66, elements32 = self._offset, []
address65 = FAILURE
chunk64, max64 = None, self._offset + 2
if max64 <= self._input_size:
chunk64 = self._input[self._offset:max64]
if chunk64 == 'or':
address65 = TreeNode(self._input[self._offset:self._offset + 2], self._offset, [])
self._offset = self._offset + 2
else:
address65 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'or\'')
if address65 is not FAILURE:
elements32.append(address65)
address66 = FAILURE
index67 = self._offset
chunk65, max65 = None, self._offset + 1
if max65 <= self._input_size:
chunk65 = self._input[self._offset:max65]
if chunk65 is not None and Grammar.REGEX_37.search(chunk65):
address66 = TreeNode(self._input[self._offset:self._offset + 1], self._offset, [])
self._offset = self._offset + 1
else:
address66 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('[0-9a-zA-Z_]')
self._offset = index67
if address66 is FAILURE:
address66 = TreeNode(self._input[self._offset:self._offset], self._offset, [])
self._offset = self._offset
else:
address66 = FAILURE
if address66 is not FAILURE:
elements32.append(address66)
else:
elements32 = None
self._offset = index66
else:
elements32 = None
self._offset = index66
if elements32 is None:
address0 = FAILURE
else:
address0 = TreeNode(self._input[index66:self._offset], index66, elements32)
self._offset = self._offset
if address0 is FAILURE:
self._offset = index1
index68, elements33 = self._offset, []
address67 = FAILURE
chunk66, max66 = None, self._offset + 3
if max66 <= self._input_size:
chunk66 = self._input[self._offset:max66]
if chunk66 == 'isa':
address67 = TreeNode(self._input[self._offset:self._offset + 3], self._offset, [])
self._offset = self._offset + 3
else:
address67 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'isa\'')
if address67 is not FAILURE:
elements33.append(address67)
address68 = FAILURE
index69 = self._offset
chunk67, max67 = None, self._offset + 1
if max67 <= self._input_size:
chunk67 = self._input[self._offset:max67]
if chunk67 is not None and Grammar.REGEX_38.search(chunk67):
address68 = TreeNode(self._input[self._offset:self._offset + 1], self._offset, [])
self._offset = self._offset + 1
else:
address68 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
| |
'^n'),
'|', '^d'),
'~')
FROM db_1.schema_1.[Animal] AS [Animal_4]
JOIN db_1.schema_1.[Animal] AS [Animal_3]
ON [Animal_4].uuid = [Animal_3].parent
WHERE [Animal_2].parent = [Animal_4].uuid
FOR XML PATH ('')
), '') AS fold_output_name
FROM db_1.schema_1.[Animal] AS [Animal_2]) AS folded_subquery_1
ON [Animal_1].uuid = folded_subquery_1.uuid
"""
expected_cypher = """
MATCH (Animal___1:Animal)
OPTIONAL MATCH (Animal___1)<-[:Animal_ParentOf]-(Animal__in_Animal_ParentOf___1:Animal)
OPTIONAL MATCH
(Animal__in_Animal_ParentOf___1)-[:Animal_ParentOf]->
(Animal__in_Animal_ParentOf__out_Animal_ParentOf___1:Animal)
WITH
Animal___1 AS Animal___1,
collect(Animal__in_Animal_ParentOf___1) AS
collected_Animal__in_Animal_ParentOf___1,
collect(Animal__in_Animal_ParentOf__out_Animal_ParentOf___1) AS
collected_Animal__in_Animal_ParentOf__out_Animal_ParentOf___1
RETURN
Animal___1.name AS `animal_name`,
[x IN collected_Animal__in_Animal_ParentOf__out_Animal_ParentOf___1 | x.name] AS
`sibling_and_self_names_list`
"""
expected_postgresql = """
SELECT
"Animal_1".name AS animal_name,
coalesce(folded_subquery_1.fold_output_name, ARRAY[]::VARCHAR[])
AS sibling_and_self_names_list
FROM
schema_1."Animal" AS "Animal_1"
JOIN (
SELECT
"Animal_2".uuid AS uuid,
array_agg("Animal_3".name) AS fold_output_name
FROM schema_1."Animal" AS "Animal_2"
JOIN schema_1."Animal" AS "Animal_4"
ON "Animal_2".parent = "Animal_4".uuid
JOIN schema_1."Animal" AS "Animal_3"
ON "Animal_4".uuid = "Animal_3".parent
GROUP BY "Animal_2".uuid
) AS folded_subquery_1
ON "Animal_1".uuid = folded_subquery_1.uuid
"""
check_test_data(
self,
test_data,
expected_match,
expected_gremlin,
expected_mssql,
expected_cypher,
expected_postgresql,
)
def test_fold_and_deep_traverse(self) -> None:
test_data = test_input_data.fold_and_deep_traverse()
expected_match = """
SELECT
Animal___1.name AS `animal_name`,
$Animal___1___in_Animal_ParentOf.name AS `sibling_and_self_species_list`
FROM (
MATCH {{
class: Animal,
as: Animal___1
}}
RETURN $matches
) LET
$Animal___1___in_Animal_ParentOf =
Animal___1.in("Animal_ParentOf")
.out("Animal_ParentOf")
.out("Animal_OfSpecies")
.asList()
"""
expected_gremlin = """
g.V('@class', 'Animal')
.as('Animal___1')
.transform{it, m -> new com.orientechnologies.orient.core.record.impl.ODocument([
animal_name: m.Animal___1.name,
sibling_and_self_species_list: (
(m.Animal___1.in_Animal_ParentOf == null) ? [] : (
m.Animal___1.in_Animal_ParentOf
.collect{entry -> entry.outV.next()}
.collectMany{
entry -> entry.out_Animal_ParentOf
.collect{edge -> edge.inV.next()}
}
.collectMany{
entry -> entry.out_Animal_OfSpecies
.collect{edge -> edge.inV.next()}
}
.collect{entry -> entry.name}
)
)
])}
"""
expected_mssql = """
SELECT
[Animal_1].name AS animal_name,
folded_subquery_1.fold_output_name AS sibling_and_self_species_list
FROM db_1.schema_1.[Animal] AS [Animal_1]
JOIN (
SELECT
[Animal_2].uuid AS uuid,
coalesce((
SELECT
'|' + coalesce(
REPLACE(
REPLACE(
REPLACE([Species_1].name, '^', '^e'),
'~', '^n'),
'|', '^d'),
'~')
FROM db_1.schema_1.[Animal] AS [Animal_3]
JOIN db_1.schema_1.[Animal] AS [Animal_4]
ON [Animal_3].uuid = [Animal_4].parent
JOIN db_1.schema_1.[Species] AS [Species_1]
ON [Animal_4].species = [Species_1].uuid
WHERE [Animal_2].parent = [Animal_3].uuid
FOR XML PATH ('')
), '') AS fold_output_name
FROM db_1.schema_1.[Animal] AS [Animal_2]
) AS folded_subquery_1
ON [Animal_1].uuid = folded_subquery_1.uuid
"""
expected_cypher = """
MATCH (Animal___1:Animal)
OPTIONAL MATCH (Animal___1)<-[:Animal_ParentOf]-(Animal__in_Animal_ParentOf___1:Animal)
OPTIONAL MATCH
(Animal__in_Animal_ParentOf___1)-[:Animal_ParentOf]->
(Animal__in_Animal_ParentOf__out_Animal_ParentOf___1:Animal)
OPTIONAL MATCH
(Animal__in_Animal_ParentOf__out_Animal_ParentOf___1)-[:Animal_OfSpecies]->
(Animal__in_Animal_ParentOf__out_Animal_ParentOf__out_Animal_OfSpecies___1:Species)
WITH
Animal___1 AS Animal___1,
collect(Animal__in_Animal_ParentOf___1) AS
collected_Animal__in_Animal_ParentOf___1,
collect(Animal__in_Animal_ParentOf__out_Animal_ParentOf___1) AS
collected_Animal__in_Animal_ParentOf__out_Animal_ParentOf___1,
collect(Animal__in_Animal_ParentOf__out_Animal_ParentOf__out_Animal_OfSpecies___1) AS
collected_Animal__in_Animal_ParentOf__out_Animal_ParentOf__out_Animal_OfSpecies___1
RETURN
Animal___1.name AS `animal_name`,
[x IN
collected_Animal__in_Animal_ParentOf__out_Animal_ParentOf__out_Animal_OfSpecies___1
| x.name] AS `sibling_and_self_species_list`
"""
expected_postgresql = """
SELECT
"Animal_1".name AS animal_name,
coalesce(folded_subquery_1.fold_output_name, ARRAY[]::VARCHAR[])
AS sibling_and_self_species_list
FROM schema_1."Animal" AS "Animal_1"
JOIN (
SELECT
"Animal_2".uuid AS uuid,
array_agg("Species_1".name) AS fold_output_name
FROM schema_1."Animal" AS "Animal_2"
JOIN schema_1."Animal" AS "Animal_3"
ON "Animal_2".parent = "Animal_3".uuid
JOIN schema_1."Animal" AS "Animal_4"
ON "Animal_3".uuid = "Animal_4".parent
JOIN schema_1."Species" AS "Species_1"
ON "Animal_4".species = "Species_1".uuid
GROUP BY "Animal_2".uuid
) AS folded_subquery_1
ON "Animal_1".uuid = folded_subquery_1.uuid
"""
check_test_data(
self,
test_data,
expected_match,
expected_gremlin,
expected_mssql,
expected_cypher,
expected_postgresql,
)
def test_traverse_and_fold_and_traverse(self) -> None:
test_data = test_input_data.traverse_and_fold_and_traverse()
expected_match = """
SELECT
Animal___1.name AS `animal_name`,
$Animal__in_Animal_ParentOf___1___out_Animal_ParentOf.name
AS `sibling_and_self_species_list`
FROM (
MATCH {{
class: Animal,
as: Animal___1
}}.in('Animal_ParentOf') {{
class: Animal,
as: Animal__in_Animal_ParentOf___1
}}
RETURN $matches
) LET
$Animal__in_Animal_ParentOf___1___out_Animal_ParentOf =
Animal__in_Animal_ParentOf___1
.out("Animal_ParentOf")
.out("Animal_OfSpecies").asList()
"""
expected_gremlin = """
g.V('@class', 'Animal')
.as('Animal___1')
.in('Animal_ParentOf')
.as('Animal__in_Animal_ParentOf___1')
.back('Animal___1')
.transform{it, m -> new com.orientechnologies.orient.core.record.impl.ODocument([
animal_name: m.Animal___1.name,
sibling_and_self_species_list: (
(m.Animal__in_Animal_ParentOf___1.out_Animal_ParentOf == null) ? [] : (
m.Animal__in_Animal_ParentOf___1.out_Animal_ParentOf
.collect{
entry -> entry.inV.next()
}
.collectMany{
entry -> entry.out_Animal_OfSpecies
.collect{
edge -> edge.inV.next()
}
}
.collect{entry -> entry.name}
))
])}
"""
expected_mssql = """
SELECT
[Animal_1].name AS animal_name,
folded_subquery_1.fold_output_name AS sibling_and_self_species_list
FROM db_1.schema_1.[Animal] AS [Animal_1]
JOIN db_1.schema_1.[Animal] AS [Animal_2]
ON [Animal_1].parent = [Animal_2].uuid
JOIN (
SELECT
[Animal_3].uuid AS uuid,
coalesce((
SELECT
'|' + coalesce(
REPLACE(
REPLACE(
REPLACE([Species_1].name, '^', '^e'),
'~', '^n'),
'|', '^d'),
'~')
FROM db_1.schema_1.[Animal] AS [Animal_4]
JOIN db_1.schema_1.[Species] AS [Species_1]
ON [Animal_4].species = [Species_1].uuid
WHERE [Animal_3].uuid = [Animal_4].parent
FOR XML PATH ('')
), '') AS fold_output_name
FROM db_1.schema_1.[Animal] AS [Animal_3]
) AS folded_subquery_1 ON [Animal_2].uuid = folded_subquery_1.uuid
"""
expected_cypher = """
MATCH (Animal___1:Animal)
MATCH (Animal___1)<-[:Animal_ParentOf]-(Animal__in_Animal_ParentOf___1:Animal)
OPTIONAL MATCH
(Animal__in_Animal_ParentOf___1)-[:Animal_ParentOf]->
(Animal__in_Animal_ParentOf__out_Animal_ParentOf___1:Animal)
OPTIONAL MATCH
(Animal__in_Animal_ParentOf__out_Animal_ParentOf___1)-[:Animal_OfSpecies]->
(Animal__in_Animal_ParentOf__out_Animal_ParentOf__out_Animal_OfSpecies___1:Species)
WITH
Animal___1 AS Animal___1,
Animal__in_Animal_ParentOf___1 AS Animal__in_Animal_ParentOf___1,
collect(Animal__in_Animal_ParentOf__out_Animal_ParentOf___1) AS
collected_Animal__in_Animal_ParentOf__out_Animal_ParentOf___1,
collect(Animal__in_Animal_ParentOf__out_Animal_ParentOf__out_Animal_OfSpecies___1) AS
collected_Animal__in_Animal_ParentOf__out_Animal_ParentOf__out_Animal_OfSpecies___1
RETURN
Animal___1.name AS `animal_name`,
[x IN
collected_Animal__in_Animal_ParentOf__out_Animal_ParentOf__out_Animal_OfSpecies___1
| x.name] AS `sibling_and_self_species_list`
"""
expected_postgresql = """
SELECT
"Animal_1".name AS animal_name,
coalesce(folded_subquery_1.fold_output_name, ARRAY[]::VARCHAR[])
AS sibling_and_self_species_list
FROM schema_1."Animal" AS "Animal_1"
JOIN schema_1."Animal" AS "Animal_2"
ON "Animal_1".parent = "Animal_2".uuid
JOIN (
SELECT
"Animal_3".uuid AS uuid,
array_agg("Species_1".name) AS fold_output_name
FROM schema_1."Animal" AS "Animal_3"
JOIN schema_1."Animal" AS "Animal_4"
ON "Animal_3".uuid = "Animal_4".parent
JOIN schema_1."Species" AS "Species_1"
ON "Animal_4".species = "Species_1".uuid
GROUP BY "Animal_3".uuid
) AS folded_subquery_1
ON "Animal_2".uuid = folded_subquery_1.uuid
"""
check_test_data(
self,
test_data,
expected_match,
expected_gremlin,
expected_mssql,
expected_cypher,
expected_postgresql,
)
def test_multiple_outputs_in_same_fold(self) -> None:
test_data = test_input_data.multiple_outputs_in_same_fold()
expected_match = """
SELECT
Animal___1.name AS `animal_name`,
$Animal___1___out_Animal_ParentOf.name AS `child_names_list`,
$Animal___1___out_Animal_ParentOf.uuid AS `child_uuids_list`
FROM (
MATCH {{
class: Animal,
as: Animal___1
}}
RETURN $matches
) LET
$Animal___1___out_Animal_ParentOf = Animal___1.out("Animal_ParentOf").asList()
"""
expected_gremlin = """
g.V('@class', 'Animal')
.as('Animal___1')
.transform{it, m -> new com.orientechnologies.orient.core.record.impl.ODocument([
animal_name: m.Animal___1.name,
child_names_list: (
(m.Animal___1.out_Animal_ParentOf == null) ? [] : (
m.Animal___1.out_Animal_ParentOf
.collect{entry -> entry.inV.next().name}
)
),
child_uuids_list: (
(m.Animal___1.out_Animal_ParentOf == null) ? [] : (
m.Animal___1.out_Animal_ParentOf
.collect{entry -> entry.inV.next().uuid}
)
)
])}
"""
expected_mssql = NotImplementedError
expected_cypher = """
MATCH (Animal___1:Animal)
OPTIONAL MATCH (Animal___1)-[:Animal_ParentOf]->(Animal__out_Animal_ParentOf___1:Animal)
WITH
Animal___1 AS Animal___1,
collect(Animal__out_Animal_ParentOf___1) AS collected_Animal__out_Animal_ParentOf___1
RETURN
Animal___1.name AS `animal_name`,
[x IN collected_Animal__out_Animal_ParentOf___1 | x.name] AS `child_names_list`,
[x IN collected_Animal__out_Animal_ParentOf___1 | x.uuid] AS `child_uuids_list`
"""
expected_postgresql = """
SELECT
"Animal_1".name AS animal_name,
coalesce(folded_subquery_1.fold_output_name, ARRAY[]::VARCHAR[])
AS child_names_list,
coalesce(folded_subquery_1.fold_output_uuid, ARRAY[]::VARCHAR[]) AS child_uuids_list
FROM schema_1."Animal" AS "Animal_1"
JOIN (
SELECT
"Animal_2".uuid AS uuid,
array_agg("Animal_3".uuid) AS fold_output_uuid,
array_agg("Animal_3".name) AS fold_output_name
FROM schema_1."Animal" AS "Animal_2"
JOIN schema_1."Animal" AS "Animal_3"
ON "Animal_2".uuid = "Animal_3".parent
GROUP BY "Animal_2".uuid
) AS folded_subquery_1
ON "Animal_1".uuid = folded_subquery_1.uuid
"""
check_test_data(
self,
test_data,
expected_match,
expected_gremlin,
expected_mssql,
expected_cypher,
expected_postgresql,
)
def test_multiple_outputs_in_same_fold_and_traverse(self) -> None:
test_data = test_input_data.multiple_outputs_in_same_fold_and_traverse()
expected_match = """
SELECT
Animal___1.name AS `animal_name`,
$Animal___1___in_Animal_ParentOf.name AS `sibling_and_self_names_list`,
$Animal___1___in_Animal_ParentOf.uuid AS `sibling_and_self_uuids_list`
FROM (
MATCH {{
class: Animal,
as: Animal___1
}}
RETURN $matches
) LET
$Animal___1___in_Animal_ParentOf =
Animal___1.in("Animal_ParentOf").out("Animal_ParentOf").asList()
"""
expected_gremlin = """
g.V('@class', 'Animal')
.as('Animal___1')
.transform{it, m -> new com.orientechnologies.orient.core.record.impl.ODocument([
animal_name: m.Animal___1.name,
sibling_and_self_names_list:
((m.Animal___1.in_Animal_ParentOf == null) ? [] : (
m.Animal___1.in_Animal_ParentOf
.collect{entry -> entry.outV.next()}
.collectMany{
entry -> entry.out_Animal_ParentOf
.collect{
edge -> edge.inV.next()
}
}
.collect{entry -> entry.name}
)),
sibling_and_self_uuids_list:
((m.Animal___1.in_Animal_ParentOf == null) ? [] : (
m.Animal___1.in_Animal_ParentOf
.collect{entry -> entry.outV.next()}
.collectMany{
entry -> entry.out_Animal_ParentOf
.collect{
edge -> edge.inV.next()
}
}
.collect{entry -> entry.uuid}
))
])}
"""
expected_mssql = NotImplementedError
expected_cypher = """
MATCH (Animal___1:Animal)
OPTIONAL MATCH (Animal___1)<-[:Animal_ParentOf]-(Animal__in_Animal_ParentOf___1:Animal)
OPTIONAL MATCH
(Animal__in_Animal_ParentOf___1)-[:Animal_ParentOf]->
(Animal__in_Animal_ParentOf__out_Animal_ParentOf___1:Animal)
WITH
Animal___1 AS Animal___1,
collect(Animal__in_Animal_ParentOf___1) AS collected_Animal__in_Animal_ParentOf___1,
collect(Animal__in_Animal_ParentOf__out_Animal_ParentOf___1) AS
collected_Animal__in_Animal_ParentOf__out_Animal_ParentOf___1
RETURN
Animal___1.name AS `animal_name`,
[x IN collected_Animal__in_Animal_ParentOf__out_Animal_ParentOf___1 | x.name] AS
`sibling_and_self_names_list`,
[x IN collected_Animal__in_Animal_ParentOf__out_Animal_ParentOf___1 | x.uuid] AS
`sibling_and_self_uuids_list`
"""
expected_postgresql = """
SELECT
"Animal_1".name AS animal_name,
coalesce(folded_subquery_1.fold_output_name, ARRAY[]::VARCHAR[])
AS sibling_and_self_names_list,
coalesce(folded_subquery_1.fold_output_uuid, ARRAY[]::VARCHAR[])
AS sibling_and_self_uuids_list
FROM schema_1."Animal" AS "Animal_1"
JOIN (
SELECT
"Animal_2".uuid AS uuid,
array_agg("Animal_3".uuid) AS fold_output_uuid,
array_agg("Animal_3".name) AS fold_output_name
FROM schema_1."Animal" AS "Animal_2"
JOIN schema_1."Animal" AS "Animal_4"
ON "Animal_2".parent = "Animal_4".uuid
JOIN schema_1."Animal" AS "Animal_3"
ON "Animal_4".uuid = "Animal_3".parent
GROUP BY "Animal_2".uuid
) AS folded_subquery_1
ON "Animal_1".uuid = folded_subquery_1.uuid
"""
check_test_data(
self,
test_data,
expected_match,
expected_gremlin,
expected_mssql,
expected_cypher,
expected_postgresql,
)
def test_multiple_folds(self) -> None:
test_data = test_input_data.multiple_folds()
expected_match = """
SELECT
Animal___1.name AS `animal_name`,
$Animal___1___out_Animal_ParentOf.name AS `child_names_list`,
$Animal___1___out_Animal_ParentOf.uuid AS `child_uuids_list`,
$Animal___1___in_Animal_ParentOf.name AS `parent_names_list`,
$Animal___1___in_Animal_ParentOf.uuid AS `parent_uuids_list`
FROM (
MATCH {{
class: Animal,
as: Animal___1
}}
RETURN $matches
) LET
$Animal___1___in_Animal_ParentOf = Animal___1.in("Animal_ParentOf").asList(),
$Animal___1___out_Animal_ParentOf = Animal___1.out("Animal_ParentOf").asList()
"""
expected_gremlin = """
g.V('@class', 'Animal')
.as('Animal___1')
.transform{it, m -> new com.orientechnologies.orient.core.record.impl.ODocument([
animal_name: m.Animal___1.name,
child_names_list: (
(m.Animal___1.out_Animal_ParentOf == null) ? [] : (
m.Animal___1.out_Animal_ParentOf.collect{entry -> entry.inV.next().name}
)
),
child_uuids_list: (
(m.Animal___1.out_Animal_ParentOf == null) ? [] : (
m.Animal___1.out_Animal_ParentOf.collect{entry -> entry.inV.next().uuid}
)
),
parent_names_list: (
(m.Animal___1.in_Animal_ParentOf == null) ? [] : (
m.Animal___1.in_Animal_ParentOf.collect{entry -> entry.outV.next().name}
)
),
parent_uuids_list: (
(m.Animal___1.in_Animal_ParentOf == null) ? [] : (
m.Animal___1.in_Animal_ParentOf.collect{entry -> entry.outV.next().uuid}
)
)
])}
"""
expected_mssql = NotImplementedError
expected_cypher = """
MATCH (Animal___1:Animal)
OPTIONAL MATCH (Animal___1)<-[:Animal_ParentOf]-(Animal__in_Animal_ParentOf___1:Animal)
WITH
Animal___1 AS Animal___1,
collect(Animal__in_Animal_ParentOf___1) AS collected_Animal__in_Animal_ParentOf___1
OPTIONAL MATCH (Animal___1)-[:Animal_ParentOf]->(Animal__out_Animal_ParentOf___1:Animal)
WITH
Animal___1 AS Animal___1,
collected_Animal__in_Animal_ParentOf___1 AS collected_Animal__in_Animal_ParentOf___1,
collect(Animal__out_Animal_ParentOf___1) AS collected_Animal__out_Animal_ParentOf___1
RETURN
Animal___1.name AS `animal_name`,
[x IN collected_Animal__out_Animal_ParentOf___1 | x.name] AS `child_names_list`,
[x IN collected_Animal__out_Animal_ParentOf___1 | x.uuid] AS `child_uuids_list`,
[x IN collected_Animal__in_Animal_ParentOf___1 | x.name] AS `parent_names_list`,
[x IN collected_Animal__in_Animal_ParentOf___1 | x.uuid] AS `parent_uuids_list`
"""
expected_postgresql = """
| |
not in self.table_featurizer:
print("table: {} not found in featurizer".format(table))
# assert False
continue
# Note: same table might be set to 1.0 twice, in case of aliases
tfeats[self.table_featurizer[table]] = 1.00
if self.sample_bitmap:
assert bitmaps is not None
startidx = len(self.table_featurizer)
sb = bitmaps[(alias,)]
if self.sample_bitmap_key in sb:
bitmap = sb[self.sample_bitmap_key]
if self.feat_onlyseen_preds:
if table not in self.seen_bitmaps:
print(table, " not in seen bitmaps")
# pdb.set_trace()
continue
train_seenvals = self.seen_bitmaps[table]
for val in bitmap:
if self.feat_onlyseen_preds:
if val not in train_seenvals:
# print(" {} not in seen bitmaps for {}".format(val, table))
continue
# else:
# print(" {} in seen bitmaps for {}".format(val, table))
bitmapidx = val % self.sample_bitmap_buckets
tfeats[startidx+bitmapidx] = 1.0
else:
bitmapidx = val % self.sample_bitmap_buckets
tfeats[startidx+bitmapidx] = 1.0
alltablefeats.append(tfeats)
featdict["table"] = alltablefeats
alljoinfeats = []
if self.join_features:
## this would imply the bitmap is the only feature
if not self.join_bitmap:
seenjoins = set()
for alias1 in subplan:
for alias2 in subplan:
ekey = (alias1, alias2)
if ekey in joingraph.edges():
join_str = joingraph.edges()[ekey]["join_condition"]
join_str = self.join_str_to_real_join(join_str)
if join_str in seenjoins:
continue
if join_str not in self.seen_joins:
continue
seenjoins.add(join_str)
jfeats = self._handle_join_features(join_str)
alljoinfeats.append(jfeats)
if self.join_bitmap:
jfeats = self._handle_join_bitmaps(subplan,
join_bitmaps, bitmaps, joingraph)
alljoinfeats += jfeats
if len(alljoinfeats) == 0:
alljoinfeats.append(np.zeros(self.join_features_len))
featdict["join"] = alljoinfeats
allpredfeats = []
for alias in subplan:
if not self.pred_features:
continue
aliasinfo = joingraph.nodes()[alias]
if len(aliasinfo["pred_cols"]) == 0:
continue
node_key = tuple([alias])
if self.true_base_cards:
alias_est = self._get_true_est(subsetgraph.nodes()[node_key])
else:
alias_est = self._get_pg_est(subsetgraph.nodes()[node_key])
subp_est = self._get_pg_est(subsetgraph.nodes()[subplan])
if self.card_type == "joinkey":
# TODO: find appropriate ones for this
alias_est = alias_est
assert alias_est <= 1.0
subp_est = 0.0
seencols = set()
for ci, col in enumerate(aliasinfo["pred_cols"]):
# we should have updated self.column_stats etc. to be appropriately
# updated
if not self.feat_separate_alias:
col = ''.join([ck for ck in col if not ck.isdigit()])
if col not in self.column_stats:
# print("col: {} not found in column stats".format(col))
# assert False
continue
allvals = aliasinfo["pred_vals"][ci]
if isinstance(allvals, dict):
allvals = allvals["literal"]
cmp_op = aliasinfo["pred_types"][ci]
# if jobquery and "like" in cmp_op.lower():
# # print("skipping featurizing likes for JOB")
# continue
continuous = self.column_stats[col]["continuous"]
if continuous and not isinstance(allvals, list):
# FIXME: hack for jobm queries like = '1997'
# print("Hacking around jobM: ", allvals)
allvals = [allvals, allvals]
pfeats = self._handle_single_col(col,allvals,
alias_est, subp_est,
cmp_op,
continuous, jobquery=jobquery)
if continuous:
pass
# print(aliasinfo)
# print(allvals)
# print(pfeats)
allpredfeats += pfeats
## FIXME: need to test this
# using mcvs for implied preds
for alias in subplan:
if not self.implied_pred_features:
continue
aliasinfo = joingraph.nodes()[alias]
if not ("implied_pred_cols" in aliasinfo and \
len(aliasinfo["implied_pred_cols"]) > 0):
continue
node_key = tuple([alias])
alias_est = 0.0
subp_est = self._get_pg_est(subsetgraph.nodes()[subplan])
for ci, col in enumerate(aliasinfo["implied_pred_cols"]):
implied_pred_from = aliasinfo["implied_pred_from"][ci]
implied_pred_alias = implied_pred_from[0:implied_pred_from.find(".")]
# implied_pred only matters if this table also in the join
if implied_pred_alias not in subplan:
continue
cmp_op = "in"
continuous = False
allvals = aliasinfo["implied_pred_vals"][ci]
if isinstance(allvals, dict):
allvals = allvals["literal"]
pfeats = self._handle_single_col(col,allvals,
alias_est, subp_est,
cmp_op,
continuous)
allpredfeats += pfeats
if len(allpredfeats) == 0:
allpredfeats.append(np.zeros(self.max_pred_len))
assert len(allpredfeats) <= self.max_pred_vals
featdict["pred"] = allpredfeats
flow_features = []
if self.flow_features:
flow_features = self.get_flow_features(subplan,
qrep["subset_graph"], qrep["template_name"],
qrep["join_graph"], subset_edge=subset_edge)
featdict["flow"] = flow_features
return featdict
def get_subplan_features_combined(self, qrep, subplan, bitmaps=None):
assert isinstance(subplan, tuple)
featvectors = []
# we need the joingraph here because all the information about
# predicate filters etc. on each of the individual tables is stored in
# the joingraph; subsetgraph stores just the names of the
# tables/aliases involved in a join
subsetgraph = qrep["subset_graph"]
joingraph = qrep["join_graph"]
if self.table_features:
tfeats = np.zeros(self.table_features_len)
## table features
# loop over each node, update the tfeats bitvector
for alias in subplan:
# need to find its real table name from the join_graph
table = joingraph.nodes()[alias]["real_name"]
if table not in self.seen_tabs:
# print("Skipping table featurization")
continue
if table not in self.table_featurizer:
print("table: {} not found in featurizer".format(table))
continue
# Note: same table might be set to 1.0 twice, in case of aliases
tfeats[self.table_featurizer[table]] = 1.00
featvectors.append(tfeats)
if self.join_features:
## join features
jfeats = np.zeros(len(self.joins))
for alias1 in subplan:
for alias2 in subplan:
ekey = (alias1, alias2)
if ekey in joingraph.edges():
join_str = joingraph.edges()[ekey]["join_condition"]
keys = join_str.split("=")
keys.sort()
keys = ",".join(keys)
if keys not in self.join_featurizer:
# print("join_str: {} not found in featurizer".format(join_str))
continue
jfeats[self.join_featurizer[keys]] = 1.00
featvectors.append(jfeats)
## predicate filter features
if self.pred_features:
pfeats = np.zeros(self.pred_features_len)
for alias in subplan:
aliasinfo = joingraph.nodes()[alias]
if len(aliasinfo["pred_cols"]) == 0:
continue
# FIXME: only supporting 1 predicate per column right now ---
# that's all we had in CEB. Supporting an arbitrary number of
# predicates can be messy with a fixed featurization scheme to
# flatten into a 1d array; Presumably, this assumes a known
# workload, and so we could `reserve` additional spaces for each
# known predicate on a column
col = aliasinfo["pred_cols"][0]
val = aliasinfo["pred_vals"][0]
# FIXME: should handle this at the level of parsing
if isinstance(val, dict):
val = val["literal"]
cmp_op = aliasinfo["pred_types"][0]
if col not in self.featurizer:
# print("col: {} not found in featurizer".format(col))
continue
cmp_op_idx, num_vals, continuous = self.featurizer[col]
cmp_idx = self.cmp_ops_onehot[cmp_op]
pfeats[cmp_op_idx+cmp_idx] = 1.00
pred_idx_start = cmp_op_idx + len(self.cmp_ops)
if continuous:
self._handle_continuous_feature(pfeats, pred_idx_start,
col, val)
else:
if "like" in cmp_op:
self._handle_ilike_feature(pfeats, pred_idx_start,
col, val)
else:
self._handle_categorical_feature(pfeats, pred_idx_start,
col, val)
# remaining values after the cmp_op feature
num_pred_vals = num_vals - len(self.cmp_ops)
# add the appropriate postgresql estimate for this table in the
# subplan
if self.heuristic_features:
assert pfeats[pred_idx_start + num_pred_vals-1] == 0.0
node_key = tuple([alias])
subp_est = self._get_pg_est(subsetgraph.nodes()[node_key])
pfeats[pred_idx_start + num_pred_vals-1] = subp_est
# Add the postgres heuristic estimate for the whole subplan as a
# feature to the predicate feature vector.
if self.heuristic_features:
subp_est = self._get_pg_est(subsetgraph.nodes()[subplan])
assert pfeats[-1] == 0.0
pfeats[-1] = subp_est
featvectors.append(pfeats)
if self.flow_features:
flow_features = self.get_flow_features(subplan,
qrep["subset_graph"], qrep["template_name"],
qrep["join_graph"])
featvectors.append(flow_features)
feat = np.concatenate(featvectors)
return feat
def get_subplan_features_joinkey(self, qrep, subset_node, subset_edge,
bitmaps=None,
join_bitmaps=None):
einfo = qrep["subset_graph"].edges()[subset_edge]
if "join_key_cardinality" not in einfo:
print("BAD!")
print(subset_edge, einfo)
pdb.set_trace()
assert False
joincols = list(einfo["join_key_cardinality"].keys())
# assumption: all joins on same column
joincol = joincols[0]
assert self.featurization_type == "set"
x = self.get_subplan_features_set(qrep,
subset_node, bitmaps=bitmaps,
join_bitmaps=join_bitmaps,
subset_edge=subset_edge)
# y-stuff
true_val = einfo["join_key_cardinality"][joincol]["actual"]
y = self.normalize_val(true_val, None)
return x,y
def get_subplan_features(self, qrep, node, bitmaps=None,
join_bitmaps=None):
'''
@subsetg:
@node: subplan in the subsetgraph;
@ret: []
will depend on if self.featurization_type == set or combined;
'''
# if self.sample_bitmap:
# assert False, "TODO: not implemented yet"
# the shapes will depend on combined v/s set feat types
if self.featurization_type == "combined":
x = self.get_subplan_features_combined(qrep,
node, bitmaps=bitmaps)
elif self.featurization_type == "set":
x = self.get_subplan_features_set(qrep,
node, bitmaps=bitmaps,
join_bitmaps = join_bitmaps)
else:
assert False
## choosing the y values
cardinfo = qrep["subset_graph"].nodes()[node]
if "actual" in cardinfo[self.ckey]:
true_val = cardinfo[self.ckey]["actual"]
else:
# e.g., in MLSys competition where we dont want to publish true
# values
true_val = 1.0
if "total" in cardinfo[self.ckey]:
total = cardinfo[self.ckey]["total"]
else:
total = None
y = self.normalize_val(true_val, total)
return x,y
def get_onehot_bucket(self, num_buckets, base, val):
assert val >= 1.0
for i in range(num_buckets):
if val > base**i and val < base**(i+1):
return i
return num_buckets
def get_flow_features(self, node, subsetg,
template_name, join_graph, subset_edge=None):
assert node != SOURCE_NODE
ckey = "cardinality"
flow_features = np.zeros(self.num_flow_features, dtype=np.float32)
cur_idx = 0
if self.card_type == "joinkey":
assert subset_edge is not None
einfo = subsetg.edges()[subset_edge]
joincols = list(einfo["join_key_cardinality"].keys())
# assumption: all joins on same column
joincol = joincols[0]
pg_join_est = einfo["join_key_cardinality"][joincol]["expected"]
pg_join_est = self.normalize_val(pg_join_est, None)
joincol = "".join([jc for jc in joincol if not jc.isdigit()])
realcol = JOIN_COL_MAP[joincol]
jcol_idx = self.real_join_col_mapping[realcol]
flow_features[cur_idx + jcol_idx] = 1.0
cur_idx += len(self.real_join_col_mapping)
# incoming edges
if self.flow_feat_degrees:
in_degree = subsetg.in_degree(node)
in_degree = min(in_degree, self.max_in_degree)
flow_features[cur_idx + in_degree] = 1.0
cur_idx += self.max_in_degree+1
# outgoing edges
out_degree = subsetg.out_degree(node)
out_degree = min(out_degree, self.max_out_degree)
flow_features[cur_idx + out_degree] = 1.0
cur_idx += self.max_out_degree+1
if self.flow_feat_tables:
# # num tables
max_table_idx = len(self.aliases)-1
nt = len(node)
# assert nt <= max_tables
nt = min(nt, max_table_idx)
flow_features[cur_idx + nt] = 1.0
cur_idx | |
"""
uncond_dcgan1 made with 64x64 images from https://s3.amazonaws.com/udipublic/acro.images.tgz for train.tar.gz
"""
import argparse
parser = argparse.ArgumentParser(description='train uncoditional dcgan')
parser.add_argument('--desc',
default='uncond_dcgan',
help='name to uniquely describe this run')
parser.add_argument('--path',
default='data/jpg.hdf5',
help='where to read fuel hdf5 data file with training')
parser.add_argument('--val', type=float,
default=0.,
help="what part of the training data to use for validation")
parser.add_argument('--model',
help='start from a pre-existing model.'
' The suffixes _gen_params.jl'
' and _discrim_params.jl'
' are added to the path you supply')
parser.add_argument('--batch', type=int,
default=128,
help='batch size')
parser.add_argument('-k', type=int,
default=0,
help='# of discrim updates for each gen update.'
' 0 - alternate > 0 more d, < 0 more g')
parser.add_argument('--maxk', type=int,
default=1,
help='max value for k')
parser.add_argument('--mink', type=int,
default=-1,
help='min value for k')
parser.add_argument('--l2d', type=float,
default=1.e-5,
help="discriminator l2")
parser.add_argument('--l2decay', type=float,
default=0.,
help="reduce l2d by 1-l2decay")
parser.add_argument('--l2step', type=float,
default=0.,
help="increase(decrease) discriminator's l2"
" when generator cost is above 1.3(below 0.9)")
parser.add_argument('--dropout', type=float,
default=0.,
help="discriminator dropout")
parser.add_argument('--lr', type=float,
default=0.0002,
help="initial learning rate for adam")
parser.add_argument('--lrstep', type=float,
default=1.,
help="increa/decrease g/d learning rate")
parser.add_argument('--dbn', action='store_false',
help='dont perfrom batch normalization on discriminator')
parser.add_argument('--db1', action='store_true',
help='add bias to first layer of discriminator')
parser.add_argument('--ngf', type=int,
default=128,
help='# of gen filters')
parser.add_argument('--ndf', type=int,
default=128,
help='# of discriminator filters')
parser.add_argument('--updates', type=int,
default=100,
help='compute score every n_updates')
parser.add_argument('-z', type=int,
default=100,
help='number of hidden variables')
parser.add_argument('--znorm', action='store_true',
help='normalize z values to unit sphere')
parser.add_argument('--generate', action='store_true',
help='generate sample png and gif')
parser.add_argument('--ngif', type=int, default=1,
help='# of png images to generate. If 1 then no gif')
parser.add_argument('--nvis2', type=int,
default=14,
help='number of rows/cols of sub-images to generate')
parser.add_argument('--generate_d', type=float, default=0.,
help="minimal discrimation score when generating samples")
parser.add_argument('--generate_c', type=float, default=0.,
help="minimal classification score when generating samples")
parser.add_argument('--generate_v', type=float,
help='generate sample along a random direction with this step size')
parser.add_argument('--classify', action='store_true',
help='classify target')
parser.add_argument('--onlyclassify', action='store_true',
help='just do classify target')
parser.add_argument('--seed', type=int,
default=123,
help='seed all random generators')
parser.add_argument('--filter_label', type=int,
help='take only training data with this label (does not work with classify')
parser.add_argument('--nepochs', type=int,
default=25,
help='total number of epochs')
parser.add_argument('--niter', type=int,
default=25,
help='# of iter at starting learning rate')
parser.add_argument('--start', type=int,
default=0,
help='If not 0 then start from this epoch after loading the last model')
args = parser.parse_args()
if args.onlyclassify:
args.classify = True
if args.classify:
assert args.filter_label is None, "you can't classify and limit your data to one lable"
if args.model is None and args.start > 0:
args.model = 'models/%s/%d'%(args.desc, args.start)
import random
random.seed(args.seed)
import numpy as np
np.random.seed(args.seed)
import sys
sys.path.append('..')
import os
import json
from time import sleep
from time import time
from tqdm import tqdm, trange
from matplotlib import pyplot as plt
from sklearn.externals import joblib
import theano
import theano.tensor as T
from theano.sandbox.cuda.dnn import dnn_conv
from lib import activations
from lib import updates
from lib import inits
from lib.vis import color_grid_vis
from lib.rng import py_rng, np_rng
from lib.ops import batchnorm, conv_cond_concat, deconv, dropout, l2normalize
from lib.metrics import nnc_score, nnd_score
from lib.theano_utils import floatX, sharedX
from lib.data_utils import OneHot, shuffle, iter_data, center_crop, patch
from load import streams
def transform(X):
# X = [center_crop(x, npx) for x in X] # only works for (H,W,3)
assert X[0].shape == (npx,npx,3) or X[0].shape == (3,npx,npx)
if X[0].shape == (npx,npx,3):
X = X.transpose(0, 3, 1, 2)
return floatX(X/127.5 - 1.)
def inverse_transform(X):
X = (X.reshape(-1, nc, npx, npx).transpose(0, 2, 3, 1)+1.)/2.
return X
k = 0 # # of discrim updates for each gen update. 0 - alternate > 0 more d, < 0 more g
l2 = 1e-5 # l2 weight decay
l2d = args.l2d # discriminator l2
l2step = args.l2step # increase(decrease) discriminator l2 when generator cost is above 1.3(below 0.9)
margin = 0.3 # Dont optimize discriminator(generator) when classification error below margin(above 1-margin)
nvis2 = args.nvis2
nvis = nvis2*nvis2 # # of samples to visualize during training
b1 = 0.5 # momentum term of adam
nc = 3 # # of channels in image
nbatch = args.batch # # of examples in batch
npx = 64 # # of pixels width/height of images
nz = args.z # # of dim for Z
ngf = args.ngf # # of gen filters in first conv layer
ndf = args.ndf # # of discrim filters in first conv layer
nx = npx*npx*nc # # of dimensions in X
niter = args.niter # # of iter at starting learning rate
niter_decay = args.nepochs - niter # # of iter to linearly decay learning rate to zero
lr = args.lr # initial learning rate for adam
ntrain = None # # of examples to train on. None take all
ngif = args.ngif # # of images in a gif
desc = args.desc
model_dir = 'models/%s'%desc
samples_dir = 'samples/%s'%desc
if not os.path.exists('logs/'):
os.makedirs('logs/')
if not os.path.exists(model_dir):
os.makedirs(model_dir)
if not os.path.exists(samples_dir):
os.makedirs(samples_dir)
###########################################
# data
if not args.generate:
tr_data, tr_stream, val_stream, ntrain_s, nval_s = streams(ntrain=ntrain,
batch_size=args.batch,
path=args.path,
val = args.val,
filter_label=args.filter_label)
if ntrain is None:
ntrain = tr_data.num_examples
print '# examples', tr_data.num_examples
print '# training examples', ntrain_s
print '# validation examples', nval_s
tr_handle = tr_data.open()
vaX,labels = tr_data.get_data(tr_handle, slice(0, 10000))
vaX = transform(vaX)
means = labels.mean(axis=0)
print('labels ',labels.shape,means,means[0]/means[1])
vaY,labels = tr_data.get_data(tr_handle, slice(10000, min(ntrain, 20000)))
vaY = transform(vaY)
va_nnd_1k = nnd_score(vaY.reshape((len(vaY),-1)), vaX.reshape((len(vaX),-1)), metric='euclidean')
print 'va_nnd_1k = %.2f'%(va_nnd_1k)
means = labels.mean(axis=0)
print('labels ',labels.shape,means,means[0]/means[1])
#####################################
# shared variables
gifn = inits.Normal(scale=0.02)
difn = inits.Normal(scale=0.02)
gain_ifn = inits.Normal(loc=1., scale=0.02)
bias_ifn = inits.Constant(c=0.)
gw = gifn((nz, ngf*8*4*4), 'gw')
gg = gain_ifn((ngf*8*4*4), 'gg')
gb = bias_ifn((ngf*8*4*4), 'gb')
gw2 = gifn((ngf*8, ngf*4, 5, 5), 'gw2')
gg2 = gain_ifn((ngf*4), 'gg2')
gb2 = bias_ifn((ngf*4), 'gb2')
gw3 = gifn((ngf*4, ngf*2, 5, 5), 'gw3')
gg3 = gain_ifn((ngf*2), 'gg3')
gb3 = bias_ifn((ngf*2), 'gb3')
gw4 = gifn((ngf*2, ngf, 5, 5), 'gw4')
gg4 = gain_ifn((ngf), 'gg4')
gb4 = bias_ifn((ngf), 'gb4')
gwx = gifn((ngf, nc, 5, 5), 'gwx')
dw = difn((ndf, nc, 5, 5), 'dw')
db = bias_ifn((ndf), 'db')
dw2 = difn((ndf*2, ndf, 5, 5), 'dw2')
dg2 = gain_ifn((ndf*2), 'dg2')
db2 = bias_ifn((ndf*2), 'db2')
dw3 = difn((ndf*4, ndf*2, 5, 5), 'dw3')
dg3 = gain_ifn((ndf*4), 'dg3')
db3 = bias_ifn((ndf*4), 'db3')
dw4 = difn((ndf*8, ndf*4, 5, 5), 'dw4')
dg4 = gain_ifn((ndf*8), 'dg4')
db4 = bias_ifn((ndf*8), 'db4')
dwy = difn((ndf*8*4*4, 1), 'dwy')
dwy1 = difn((ndf*8*4*4, 1), 'dwy')
# models
relu = activations.Rectify()
sigmoid = activations.Sigmoid()
lrelu = activations.LeakyRectify()
tanh = activations.Tanh()
bce = T.nnet.binary_crossentropy
# generator model
gen_params = [gw, gg, gb, gw2, gg2, gb2, gw3, gg3, gb3, gw4, gg4, gb4, gwx]
def gen(Z, w, g, b, w2, g2, b2, w3, g3, b3, w4, g4, b4, wx):
h = relu(batchnorm(T.dot(Z, w), g=g, b=b))
h = h.reshape((h.shape[0], ngf*8, 4, 4))
h2 = relu(batchnorm(deconv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2))
h3 = relu(batchnorm(deconv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3))
h4 = relu(batchnorm(deconv(h3, w4, subsample=(2, 2), border_mode=(2, 2)), g=g4, b=b4))
x = tanh(deconv(h4, wx, subsample=(2, 2), border_mode=(2, 2)))
return x
# discriminator model
"""
#old model
if args.dbn:
if args.db1:
print "Bias on layer 1 + batch normalization"
discrim_params = [dw, db, dw2, dg2, db2, dw3, dg3, db3, dw4, dg4, db4, dwy, dwy1]
def discrim(X, w, b, w2, g2, b2, w3, g3, b3, w4, g4, b4, wy, wy1):
h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2))+b.dimshuffle('x', 0, 'x', 'x'))
h = dropout(h, args.dropout)
h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2))
h2 = dropout(h2, args.dropout)
h3 = lrelu(batchnorm(dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3))
h3 = dropout(h3, args.dropout)
h4 = lrelu(batchnorm(dnn_conv(h3, w4, subsample=(2, 2), border_mode=(2, 2)), g=g4, b=b4))
h4 = dropout(h4, args.dropout)
h4 = T.flatten(h4, 2)
y = sigmoid(T.dot(h4, wy))
y1 = sigmoid(T.dot(h4, wy1))
return y, y1
else:
print "Batch normalization"
discrim_params = [dw, dw2, dg2, db2, dw3, dg3, db3, dw4, dg4, db4, dwy, dwy1]
def discrim(X, w, w2, g2, b2, w3, g3, b3, w4, g4, b4, wy, wy1):
h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))
h = dropout(h, args.dropout)
h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2))
h2 = dropout(h2, args.dropout)
h3 = lrelu(batchnorm(dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3))
h3 = dropout(h3, args.dropout)
h4 = lrelu(batchnorm(dnn_conv(h3, w4, subsample=(2, 2), border_mode=(2, 2)), g=g4, b=b4))
h4 = dropout(h4, args.dropout)
h4 = T.flatten(h4, 2)
y = sigmoid(T.dot(h4, wy))
y1 = sigmoid(T.dot(h4, wy1))
return y, y1
else:
if args.db1:
print "Bias on layer 1"
discrim_params = [dw, db, dw2, db2, dw3, db3, dw4, db4, dwy, dwy1]
def discrim(X, w, b, w2, b2, w3, b3, w4, b4, wy, wy1):
h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2))+b.dimshuffle('x', 0, 'x', 'x'))
h = dropout(h, args.dropout)
h2 = lrelu(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2))+b2.dimshuffle('x', 0, 'x', 'x'))
h2 = dropout(h2, args.dropout)
h3 = lrelu(dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2))+b3.dimshuffle('x', 0, 'x', 'x'))
h3 = dropout(h3, args.dropout)
h4 = lrelu(dnn_conv(h3, w4, subsample=(2, 2), border_mode=(2, 2))+b4.dimshuffle('x', 0, 'x', 'x'))
h4 = dropout(h4, args.dropout)
h4 = T.flatten(h4, 2)
y = sigmoid(T.dot(h4, wy))
y1 = sigmoid(T.dot(h4, wy1))
return y, y1
else:
discrim_params = [dw, dw2, db2, dw3, db3, dw4, db4, | |
import time
import cv2
import numpy as np
from queue import Empty
import os
from improv.actor import Actor, Spike, RunManager
from improv.store import ObjectNotFoundError
import logging; logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class MeanAnalysis(Actor):
#TODO: Add additional error handling
def __init__(self, *args):
super().__init__(*args)
def setup(self, param_file=None):
''' Set custom parameters here
Can also be done by e.g. loading them in from
a configuration file. #TODO
'''
np.seterr(divide='ignore')
self.num_stim = 21
self.frame = 0
self.stim = {}
self.stimStart = {}
self.window = 500 #TODO: make user input, choose scrolling window for Visual
self.C = None
self.S = None
self.Call = None
self.Cx = None
self.Cpop = None
self.coords = None
self.color = None
self.runMean = None
self.runMeanOn = None
self.runMeanOff = None
self.lastOnOff = None
self.recentStim = [0]*self.window
def run(self):
self.total_times = []
self.puttime = []
self.colortime = []
self.stimtime = []
self.timestamp = []
with RunManager(self.name, self.runAvg, self.setup, self.q_sig, self.q_comm) as rm:
logger.info(rm)
print('Analysis broke, avg time per frame: ', np.mean(self.total_times, axis=0))
print('Analysis broke, avg time per put analysis: ', np.mean(self.puttime))
print('Analysis broke, avg time per color frame: ', np.mean(self.colortime))
print('Analysis broke, avg time per stim avg: ', np.mean(self.stimtime))
print('Analysis got through ', self.frame, ' frames')
if not os._exists('output'):
try:
os.makedirs('output')
except:
pass
if not os._exists('output/timing'):
try:
os.makedirs('output/timing')
except:
pass
np.savetxt('output/timing/analysis_frame_time.txt', np.array(self.total_times))
np.savetxt('output/timing/analysisput_frame_time.txt', np.array(self.puttime))
np.savetxt('output/timing/analysiscolor_frame_time.txt', np.array(self.colortime))
np.savetxt('output/timing/analysis_timestamp.txt', np.array(self.timestamp))
def runAvg(self):
''' Take numpy estimates and frame_number
Create X and Y for plotting
'''
t = time.time()
ids = None
try:
sig = self.links['input_stim_queue'].get(timeout=0.0001)
self.updateStim_start(sig)
except Empty as e:
pass #no change in input stimulus
try:
ids = self.q_in.get(timeout=0.0001)
ids = [id[0] for id in ids]
if ids is not None and ids[0]==1:
print('analysis: missing frame')
self.total_times.append(time.time()-t)
self.q_out.put([1])
raise Empty
# t = time.time()
self.frame = ids[-1]
(self.coordDict, self.image, self.S) = self.client.getList(ids[:-1])
self.C = self.S
self.coords = [o['coordinates'] for o in self.coordDict]
# Compute tuning curves based on input stimulus
# Just do overall average activity for now
self.stimAvg_start()
self.globalAvg = np.mean(self.estsAvg[:,:8], axis=0)
self.tune = [self.estsAvg[:,:8], self.globalAvg]
# Compute coloring of neurons for processed frame
# Also rotate and stack as needed for plotting
# TODO: move to viz, but we don't need to compute this 30 times/sec
self.color = self.plotColorFrame()
if self.frame >= self.window:
window = self.window
else:
window = self.frame
if self.C.shape[1]>0:
self.Cpop = np.nanmean(self.C, axis=0)
self.Cx = np.arange(0,self.Cpop.size)+(self.frame-window)
self.Call = self.C #already a windowed version #[:,self.frame-window:self.frame]
self.putAnalysis()
self.timestamp.append([time.time(), self.frame])
self.total_times.append(time.time()-t)
except ObjectNotFoundError:
logger.error('Estimates unavailable from store, droppping')
except Empty as e:
pass
except Exception as e:
logger.exception('Error in analysis: {}'.format(e))
def updateStim(self, stim):
''' Recevied new signal from some Acquirer to change input stimulus
[possibly other action items here...? Validation?]
'''
# stim in format dict frame_num:[n, on/off]
frame = list(stim.keys())[0]
whichStim = stim[frame][0]
# stim is dict with stimID as key and lists for indexing on/off into that stim
# length is number of frames
if whichStim not in self.stim.keys():
self.stim.update({whichStim:{}})
if abs(stim[frame][1])>1 :
if 'on' not in self.stim[whichStim].keys():
self.stim[whichStim].update({'on':[]})
self.stim[whichStim]['on'].append(frame)
else:
if 'off' not in self.stim[whichStim].keys():
self.stim[whichStim].update({'off':[]})
self.stim[whichStim]['off'].append(frame)
# also store which stim is active for each frame, up to a recent window
self.recentStim[frame%self.window] = whichStim
def updateStim_start(self, stim):
frame = list(stim.keys())[0]
whichStim = stim[frame][0]
if whichStim not in self.stimStart.keys():
self.stimStart.update({whichStim:[]})
if abs(stim[frame][1])>1 :
curStim = 1 #on
else:
curStim = 0 #off
if self.lastOnOff is None:
self.lastOnOff = curStim
elif self.lastOnOff == 0 and curStim == 1: #was off, now on
self.stimStart[whichStim].append(frame)
print('Stim ', whichStim, ' started at ', frame)
self.lastOnOff = curStim
def putAnalysis(self):
''' Throw things to DS and put IDs in queue for Visual
'''
t = time.time()
ids = []
ids.append([self.client.put(self.Cx, 'Cx'+str(self.frame)), 'Cx'+str(self.frame)])
ids.append([self.client.put(self.Call, 'Call'+str(self.frame)), 'Call'+str(self.frame)])
ids.append([self.client.put(self.Cpop, 'Cpop'+str(self.frame)), 'Cpop'+str(self.frame)])
ids.append([self.client.put(self.tune, 'tune'+str(self.frame)), 'tune'+str(self.frame)])
ids.append([self.client.put(self.color, 'color'+str(self.frame)), 'color'+str(self.frame)])
ids.append([self.client.put(self.coordDict, 'analys_coords'+str(self.frame)), 'analys_coords'+str(self.frame)])
ids.append([self.frame, str(self.frame)])
self.put(ids, save= [False, False, False, False, False, False, False])
self.puttime.append(time.time()-t)
def stimAvg_start(self):
ests = self.S #ests = self.C
ests_num = ests.shape[1]
t = time.time()
polarAvg = [np.zeros(ests.shape[0])]*12
estsAvg = [np.zeros(ests.shape[0])]*self.num_stim
for s,l in self.stimStart.items():
l = np.array(l)
if l.size>0:
onInd = np.array([np.arange(o+5,o+15) for o in np.nditer(l)]).flatten()
onInd = onInd[onInd<ests_num]
offInd = np.array([np.arange(o-10,o-1) for o in np.nditer(l)]).flatten() #TODO replace
offInd = offInd[offInd>=0]
offInd = offInd[offInd<ests_num]
try:
if onInd.size>0:
onEst = np.mean(ests[:,onInd], axis=1)
else:
onEst = np.zeros(ests.shape[0])
if offInd.size>0:
offEst = np.mean(ests[:,offInd], axis=1)
else:
offEst = np.zeros(ests.shape[0])
try:
estsAvg[int(s)] = onEst #(onEst / offEst) - 1
except FloatingPointError:
print('Could not compute on/off: ', onEst, offEst)
estsAvg[int(s)] = onEst
except ZeroDivisionError:
estsAvg[int(s)] = np.zeros(ests.shape[0])
except FloatingPointError: #IndexError:
logger.error('Index error ')
print('int s is ', int(s))
# else:
# estsAvg[int(s)] = np.zeros(ests.shape[0])
estsAvg = np.array(estsAvg)
polarAvg[2] = estsAvg[9,:] #np.sum(estsAvg[[9,11,15],:], axis=0)
polarAvg[1] = estsAvg[10, :]
polarAvg[0] = estsAvg[3, :] #np.sum(estsAvg[[3,5,8],:], axis=0)
polarAvg[7] = estsAvg[12, :]
polarAvg[6] = estsAvg[13, :] #np.sum(estsAvg[[13,17,18],:], axis=0)
polarAvg[5] = estsAvg[14, :]
polarAvg[4] = estsAvg[4, :] #np.sum(estsAvg[[4,6,7],:], axis=0)
polarAvg[3] = estsAvg[16, :]
# for color summation
polarAvg[8] = estsAvg[5, :]
polarAvg[9] = estsAvg[6, :]
polarAvg[10] = estsAvg[7, :]
polarAvg[11] = estsAvg[8, :]
self.estsAvg = np.abs(np.transpose(np.array(polarAvg)))
self.estsAvg = np.where(np.isnan(self.estsAvg), 0, self.estsAvg)
self.estsAvg[self.estsAvg == np.inf] = 0
self.stimtime.append(time.time()-t)
def stimAvg(self):
ests = self.S #ests = self.C
ests_num = ests.shape[1]
# S = self.S
t = time.time()
polarAvg = [np.zeros(ests.shape[0])]*8
estsAvg = [np.zeros(ests.shape[0])]*self.num_stim
if self.runMeanOn is None:
self.runMeanOn = [np.zeros(ests.shape[0])]*self.num_stim
if self.runMeanOff is None:
self.runMeanOff = [np.zeros(ests.shape[0])]*self.num_stim
if self.runMean is None:
self.runMean = [np.zeros(ests.shape[0])]*self.num_stim
if self.frame > 0: #self.window: #recompute entire mean
for s,l in self.stim.items():
if 'on' in l.keys() and 'off' in l.keys():
onInd = np.array(l['on'])
onInd = onInd[onInd<ests_num]
offInd = np.array(l['off'])
offInd = offInd[offInd<ests_num]
try:
on = np.mean(ests[:,onInd], axis=1)
off = np.mean(ests[:,offInd], axis=1)
try:
estsAvg[int(s)] = (on / off) - 1
except FloatingPointError:
print('Could not compute on/off: ', on, off)
estsAvg[int(s)] = on
except IndexError:
logger.error('Index error ')
print('int s is ', int(s))
else:
estsAvg[int(s)] = np.zeros(ests.shape[0])
else:
# keep running mean as well as recalc mean for possible updates
# ests only contains self.window number of most recent frames
# running mean of last newest frame, recalc mean of all more recent frames
for s,l in self.stim.items():
print(s, l)
if 'on' in l.keys() and 'off' in l.keys():
onInd = np.array(l['on'])
offInd = np.array(l['off'])
# print('onInd ', onInd)
# print('offInd ', offInd)
try:
if self.frame == onInd[-1]:
self.runMeanOn[int(s)] += np.mean(ests[:, onInd[-1]], axis=1)
elif self.frame == offInd[-1]:
self.runMeanOff[int(s)] += np.mean(ests[:, offInd[-1]], axis=1)
on = np.mean(ests[:,onInd[:-1]], axis=1)
off = np.mean(ests[:,offInd[:-1]], axis=1)
try:
estsAvg[int(s)] = (on / off) - 1
except FloatingPointError:
estsAvg[int(s)] = on
except IndexError:
pass
else:
estsAvg[int(s)] = np.zeros(ests.shape[0])
estsAvg = np.array(estsAvg)
polarAvg[2] = estsAvg[9,:] #np.sum(estsAvg[[9,11,15],:], axis=0)
polarAvg[1] = estsAvg[10, :]
polarAvg[0] = estsAvg[3, :] #np.sum(estsAvg[[3,5,8],:], axis=0)
polarAvg[7] = estsAvg[12, :]
polarAvg[6] = estsAvg[13, :] #np.sum(estsAvg[[13,17,18],:], axis=0)
polarAvg[5] = estsAvg[14, :]
polarAvg[4] = estsAvg[4, :] #np.sum(estsAvg[[4,6,7],:], axis=0)
polarAvg[3] = estsAvg[16, :]
# for color summation
polarAvg[8] = estsAvg[5, :]
polarAvg[9] = estsAvg[6, :]
polarAvg[10] = estsAvg[7, :]
polarAvg[11] = estsAvg[8, :]
self.estsAvg = np.abs(np.transpose(np.array(polarAvg)))
self.estsAvg = np.where(np.isnan(self.estsAvg), 0, self.estsAvg)
self.estsAvg[self.estsAvg == np.inf] = 0
#self.estsAvg = np.clip(self.estsAvg*4, 0, 4)
self.stimtime.append(time.time()-t)
def plotColorFrame(self):
''' Computes colored nicer background+components frame
'''
t = time.time()
image = self.image
color = np.stack([image, image, image, image], axis=-1).astype(np.uint8).copy()
color[...,3] = 255
# color = self.color.copy() #TODO: don't stack image each time?
if self.coords is not None:
for i,c in enumerate(self.coords):
#c = np.array(c)
ind = c[~np.isnan(c).any(axis=1)].astype(int)
cv2.fillConvexPoly(color, ind, self._tuningColor(i, color[ind[:,1], ind[:,0]]))
# TODO: keep list of neural colors. Compute tuning colors and IF NEW, fill ConvexPoly.
# if self.image.shape[0] < self.image.shape[1]:
# self.flip = True
# raw = raw.T
# else:
# np.swapaxes(color,0,1)
#TODO: user input for rotating frame? See Visual class
#print('time plotColorFrame ', time.time()-t)
self.colortime.append(time.time()-t)
return color
def _tuningColor(self, ind, inten):
''' ind identifies the neuron by number
'''
if self.estsAvg[ind] is not None: # and np.sum(np.abs(self.estsAvg[ind]))>2:
try:
# trying sort and compare
# rel = self.estsAvg[ind]/np.max(self.estsAvg[ind])
# order = np.argsort(rel)
# if rel[order[-1]] - rel[order[-2]] > 0.2: #ensure strongish tuning
# r, g, b = | |
%s buildstep for %s builder: %s\n" % (buildStepName, builderName, e))
return doTest
expected_build_steps = {
'Apple Win 10 Debug (Tests)': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'compile', 'download-built-product', 'extract-built-product', 'jscore-test', 'layout-test', 'run-api-tests', 'webkitpy-test', 'webkitperl-test', 'bindings-generation-tests', 'builtins-generator-tests', 'archive-test-results', 'upload', 'MasterShellCommand'],
'Apple Win 10 Release (Tests)': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'compile', 'download-built-product', 'extract-built-product', 'jscore-test', 'layout-test', 'run-api-tests', 'webkitpy-test', 'webkitperl-test', 'bindings-generation-tests', 'builtins-generator-tests', 'archive-test-results', 'upload', 'MasterShellCommand'],
'Apple Win 10 Debug (Build)': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'compile', 'compile-webkit', 'archive-built-product', 'upload', 'transfer-to-s3', 'trigger'],
'Apple Win 10 Release (Build)': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'compile', 'compile-webkit', 'archive-built-product', 'upload', 'transfer-to-s3', 'trigger'],
'Apple-BigSur-Release-Build': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'compile-webkit'],
'Apple-Catalina-Debug-Build': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'compile-webkit', 'archive-built-product', 'upload', 'archive-built-product', 'upload', 'transfer-to-s3', 'trigger'],
'Apple-Catalina-Debug-JSC-Tests': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'download-built-product', 'extract-built-product', 'jscore-test'],
'Apple-Catalina-Debug-Test262-Tests': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'download-built-product', 'extract-built-product', 'test262-test'],
'Apple-Catalina-Debug-WK1-Tests': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'download-built-product', 'extract-built-product', 'wait-for-crash-collection', 'layout-test', 'run-api-tests', 'lldb-webkit-test', 'webkitpy-test', 'webkitperl-test', 'bindings-generation-tests', 'builtins-generator-tests', 'dashboard-tests', 'trigger-crash-log-submission', 'archive-test-results', 'upload', 'MasterShellCommand'],
'Apple-Catalina-Debug-WK2-Tests': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'download-built-product', 'extract-built-product', 'wait-for-crash-collection', 'layout-test', 'run-api-tests', 'lldb-webkit-test', 'webkitpy-test', 'webkitperl-test', 'bindings-generation-tests', 'builtins-generator-tests', 'dashboard-tests', 'trigger-crash-log-submission', 'archive-test-results', 'upload', 'MasterShellCommand'],
'Apple-Catalina-Debug-WK2-WebGL-Tests': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'download-built-product', 'extract-built-product', 'wait-for-crash-collection', 'layout-test', 'run-api-tests', 'lldb-webkit-test', 'webkitpy-test', 'webkitperl-test', 'bindings-generation-tests', 'builtins-generator-tests', 'dashboard-tests', 'trigger-crash-log-submission', 'archive-test-results', 'upload', 'MasterShellCommand'],
'Apple-Catalina-Debug-WK2-GPUProcess-Tests': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'download-built-product', 'extract-built-product', 'wait-for-crash-collection', 'layout-test', 'run-api-tests', 'lldb-webkit-test', 'webkitpy-test', 'webkitperl-test', 'bindings-generation-tests', 'builtins-generator-tests', 'dashboard-tests', 'trigger-crash-log-submission', 'archive-test-results', 'upload', 'MasterShellCommand'],
'Apple-Catalina-LLINT-CLoop-BuildAndTest': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'compile-webkit', 'webkit-jsc-cloop-test'],
'Apple-Catalina-Release-Build': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'compile-webkit', 'archive-built-product', 'upload', 'archive-built-product', 'upload', 'transfer-to-s3', 'trigger'],
'Apple-Catalina-Release-JSC-Tests': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'download-built-product', 'extract-built-product', 'jscore-test'],
'Apple-Catalina-Release-Test262-Tests': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'download-built-product', 'extract-built-product', 'test262-test'],
'Apple-Catalina-Release-WK1-Tests': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'download-built-product', 'extract-built-product', 'wait-for-crash-collection', 'layout-test', 'run-api-tests', 'lldb-webkit-test', 'webkitpy-test', 'webkitperl-test', 'bindings-generation-tests', 'builtins-generator-tests', 'dashboard-tests', 'trigger-crash-log-submission', 'archive-test-results', 'upload', 'MasterShellCommand'],
'Apple-Catalina-Release-WK2-Tests': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'download-built-product', 'extract-built-product', 'wait-for-crash-collection', 'layout-test', 'run-api-tests', 'lldb-webkit-test', 'webkitpy-test', 'webkitperl-test', 'bindings-generation-tests', 'builtins-generator-tests', 'dashboard-tests', 'trigger-crash-log-submission', 'archive-test-results', 'upload', 'MasterShellCommand'],
'Apple-Catalina-Release-WK2-Perf': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'download-built-product', 'extract-built-product', 'perf-test'],
'Apple Mojave Debug (Build)': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'compile-webkit', 'archive-built-product', 'upload', 'archive-built-product', 'upload', 'transfer-to-s3', 'trigger'],
'Apple Mojave Debug WK1 (Tests)': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'download-built-product', 'extract-built-product', 'wait-for-crash-collection', 'layout-test', 'run-api-tests', 'lldb-webkit-test', 'webkitpy-test', 'webkitperl-test', 'bindings-generation-tests', 'builtins-generator-tests', 'dashboard-tests', 'trigger-crash-log-submission', 'archive-test-results', 'upload', 'MasterShellCommand'],
'Apple Mojave Debug WK2 (Tests)': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'download-built-product', 'extract-built-product', 'wait-for-crash-collection', 'layout-test', 'run-api-tests', 'lldb-webkit-test', 'webkitpy-test', 'webkitperl-test', 'bindings-generation-tests', 'builtins-generator-tests', 'dashboard-tests', 'trigger-crash-log-submission', 'archive-test-results', 'upload', 'MasterShellCommand'],
'Apple Mojave Release (Build)': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'compile-webkit', 'archive-built-product', 'upload', 'archive-built-product', 'upload', 'transfer-to-s3', 'trigger'],
'Apple Mojave Release WK1 (Tests)': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'download-built-product', 'extract-built-product', 'wait-for-crash-collection', 'layout-test', 'run-api-tests', 'lldb-webkit-test', 'webkitpy-test', 'webkitperl-test', 'bindings-generation-tests', 'builtins-generator-tests', 'dashboard-tests', 'trigger-crash-log-submission', 'archive-test-results', 'upload', 'MasterShellCommand'],
'Apple Mojave Release WK2 (Tests)': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'download-built-product', 'extract-built-product', 'wait-for-crash-collection', 'layout-test', 'run-api-tests', 'lldb-webkit-test', 'webkitpy-test', 'webkitperl-test', 'bindings-generation-tests', 'builtins-generator-tests', 'dashboard-tests', 'trigger-crash-log-submission', 'archive-test-results', 'upload', 'MasterShellCommand'],
'Apple Mojave Release (Build)': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'compile-webkit', 'archive-built-product', 'upload', 'archive-built-product', 'upload', 'transfer-to-s3', 'trigger'],
'Apple iOS 13 Release (Build)': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'compile-webkit'],
'Apple iOS 13 Simulator Release (Build)': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'compile-webkit', 'archive-built-product', 'upload', 'archive-built-product', 'upload', 'transfer-to-s3', 'trigger'],
'Apple iOS 13 Simulator Release WK2 (Tests)': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'download-built-product', 'extract-built-product', 'wait-for-crash-collection', 'layout-test', 'run-api-tests', 'webkitpy-test', 'webkitperl-test', 'bindings-generation-tests', 'builtins-generator-tests', 'dashboard-tests', 'trigger-crash-log-submission', 'archive-test-results', 'upload', 'MasterShellCommand'],
'Apple iOS 13 Simulator Debug (Build)': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'compile-webkit', 'archive-built-product', 'upload', 'archive-built-product', 'upload', 'transfer-to-s3', 'trigger'],
'Apple iOS 13 Simulator Debug WK2 (Tests)': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'download-built-product', 'extract-built-product', 'wait-for-crash-collection', 'layout-test', 'run-api-tests', 'webkitpy-test', 'webkitperl-test', 'bindings-generation-tests', 'builtins-generator-tests', 'dashboard-tests', 'trigger-crash-log-submission', 'archive-test-results', 'upload', 'MasterShellCommand'],
'Apple iPadOS 13 Simulator Release WK2 (Tests)': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'download-built-product', 'extract-built-product', 'wait-for-crash-collection', 'layout-test', 'run-api-tests', 'webkitpy-test', 'webkitperl-test', 'bindings-generation-tests', 'builtins-generator-tests', 'dashboard-tests', 'trigger-crash-log-submission', 'archive-test-results', 'upload', 'MasterShellCommand'],
'Apple iPadOS 13 Simulator Debug WK2 (Tests)': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'download-built-product', 'extract-built-product', 'wait-for-crash-collection', 'layout-test', 'run-api-tests', 'webkitpy-test', 'webkitperl-test', 'bindings-generation-tests', 'builtins-generator-tests', 'dashboard-tests', 'trigger-crash-log-submission', 'archive-test-results', 'upload', 'MasterShellCommand'],
'Apple-tvOS-13-Release-Build': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'compile-webkit'],
'Apple-tvOS-Simulator-13-Release-Build': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'compile-webkit'],
'Apple-watchOS-6-Release-Build': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'compile-webkit'],
'Apple-watchOS-Simulator-6-Release-Build': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'compile-webkit'],
'JSCOnly Linux ARMv7 Thumb2 Release': ['configure build', 'svn', 'delete WebKitBuild directory', 'delete stale build files', 'compile-webkit', 'jscore-test'],
'JSCOnly Linux ARMv7 Thumb2 SoftFP Release': ['configure build', 'svn', 'delete WebKitBuild directory', 'delete stale build files', 'compile-webkit', 'jscore-test'],
'JSCOnly Linux AArch64 Release': ['configure build', 'svn', 'delete WebKitBuild directory', 'delete stale build files', 'compile-webkit', 'jscore-test'],
'JSCOnly Linux MIPS32el Release': ['configure build', 'svn', 'delete WebKitBuild directory', 'delete stale build files', 'compile-webkit', 'jscore-test'],
'GTK Linux 64-bit Debug (Build)': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'jhbuild', 'compile-webkit', 'archive-built-product', 'upload', 'transfer-to-s3', 'trigger'],
'GTK Linux 64-bit Debug (Tests)': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'jhbuild', 'download-built-product', 'extract-built-product', 'layout-test', 'webkitpy-test', 'webkitperl-test', 'bindings-generation-tests', 'builtins-generator-tests', 'dashboard-tests', 'archive-test-results', 'upload', 'MasterShellCommand', 'API tests'],
'GTK Linux 64-bit Debug (Tests WebDriver)': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'jhbuild', 'download-built-product', 'extract-built-product', 'webdriver-test'],
'GTK Linux 64-bit Debug (Tests JS)': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'jhbuild', 'download-built-product', 'extract-built-product', 'jscore-test', 'test262-test'],
'GTK Linux 64-bit Release (Build)': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'jhbuild', 'compile-webkit', 'archive-built-product', 'upload', 'generate-jsc-bundle', 'transfer-to-s3', 'trigger'],
'GTK Linux 64-bit Release (Perf)': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'jhbuild', 'download-built-product', 'extract-built-product', 'perf-test', 'benchmark-test'],
'GTK Linux 64-bit Release (Tests)': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'jhbuild', 'download-built-product', 'extract-built-product', 'layout-test', 'webkitpy-test', 'webkitperl-test', 'bindings-generation-tests', 'builtins-generator-tests', 'dashboard-tests', 'archive-test-results', 'upload', 'MasterShellCommand', 'API tests'],
'GTK Linux 64-bit Release (Tests WebDriver)': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'jhbuild', 'download-built-product', 'extract-built-product', 'webdriver-test'],
'GTK Linux 64-bit Release (Tests JS)': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'jhbuild', 'download-built-product', 'extract-built-product', 'jscore-test', 'test262-test'],
'GTK Linux 64-bit Release Wayland (Tests)': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'jhbuild', 'download-built-product', 'extract-built-product', 'layout-test', 'webkitpy-test', 'webkitperl-test', 'bindings-generation-tests', 'builtins-generator-tests', 'dashboard-tests', 'archive-test-results', 'upload', 'MasterShellCommand', 'API tests', 'webdriver-test'],
'GTK Linux 64-bit Release Ubuntu LTS (Build)': ['configure build', 'svn', 'kill old processes', 'delete WebKitBuild directory', 'delete stale build files', 'compile-webkit'],
'GTK | |
import spyral
import types
import sys
import functools
import math
import string
import pygame
from bisect import bisect_right
class BaseWidget(spyral.View):
"""
The BaseWidget is the simplest possible widget that all other widgets
must subclass. It handles tracking its owning form and the styling that
should be applied.
"""
def __init__(self, form, name):
self.__style__ = form.__class__.__name__ + '.' + name
self.name = name
self.form = form
spyral.View.__init__(self, form)
self.mask = spyral.Rect(self.pos, self.size)
def _changed(self):
"""
Called when the Widget is changed; since Widget's masks are a function
of their component widgets, it needs to be notified.
"""
self._recalculate_mask()
spyral.View._changed(self)
def _recalculate_mask(self):
"""
Recalculate this widget's mask based on its size, position, and padding.
"""
self.mask = spyral.Rect(self.pos, self.size + self.padding)
# Widget Implementations
class MultiStateWidget(BaseWidget):
"""
The MultiStateWidget is an abstract widget with multiple states. It should
be subclassed and implemented to have different behavior based on its
states.
In addition, it supports having a Nine Slice image; it will cut a given
image into a 3x3 grid of images that can be stretched into a button. This
is a boolean property.
:param form: The parent form that this Widget belongs to.
:type form: :class:`Form <spyral.Form>`
:param str name: The name of this widget.
:param states: A list of the possible states that the widget can be in.
:type states: A ``list`` of ``str``.
"""
def __init__(self, form, name, states):
self._states = states
self._state = self._states[0]
self.button = None # Hack for now; TODO need to be able to set properties on it even though it doesn't exist yet
BaseWidget.__init__(self, form, name)
self.layers = ["base", "content"]
self._images = {}
self._content_size = (0, 0)
self.button = spyral.Sprite(self)
self.button.layer = "base"
def _render_images(self):
"""
Recreates the cached images of this widget (based on the
**self._image_locations** internal variabel) and sets the widget's image
based on its current state.
"""
for state in self._states:
if self._nine_slice:
size = self._padding + self._content_size
nine_slice_image = spyral.Image(self._image_locations[state])
self._images[state] = spyral.image.render_nine_slice(nine_slice_image, size)
else:
self._images[state] = spyral.Image(self._image_locations[state])
self.button.image = self._images[self._state]
self.mask = spyral.Rect(self.pos, self.button.size)
self._on_state_change()
def _set_state(self, state):
old_value = self.value
self._state = state
if self.value != old_value:
e = spyral.Event(name="changed", widget=self, form=self.form, value=self._get_value())
self.scene._queue_event("form.%(form_name)s.%(widget)s.changed" %
{"form_name": self.form.__class__.__name__,
"widget": self.name},
e)
self.button.image = self._images[state]
self.mask = spyral.Rect(self.pos, self.button.size)
self._on_state_change()
def _get_value(self):
"""
Returns the current value of this widget; defaults to the ``state`` of
the widget.
"""
return self._state
def _get_state(self):
"""
This widget's state; when changed, a form.<name>.<widget>.changed
event will be triggered. Represented as a ``str``.
"""
return self._state
def _set_nine_slice(self, nine_slice):
self._nine_slice = nine_slice
self._render_images()
def _get_nine_slice(self):
"""
The :class:`Image <spyral.Image>` that will be nine-sliced into this
widget's background.
"""
return self._nine_slice
def _set_padding(self, padding):
if isinstance(padding, spyral.Vec2D):
self._padding = padding
else:
self._padding = spyral.Vec2D(padding, padding)
self._render_images()
def _get_padding(self):
"""
A :class:`Vec2D <spyral.Vec2D>` that represents the horizontal and
vertical padding associated with this button. Can also be set with a
``int`` for equal amounts of padding, although it will always return a
:class:`Vec2D <spyral.Vec2D>`.
"""
return self._padding
def _set_content_size(self, size):
"""
The size of the content within this button, used to calculate the mask.
A :class:`Vec2D <spyral.Vec2D>`
..todo:: It's most likely the case that this needs to be refactored into
the mask property, since they're probably redundant with each other.
"""
self._content_size = size
self._render_images()
def _get_content_size(self):
return self._get_content_size
def _on_size_change(self):
"""
A function triggered whenever this widget changes size.
"""
pass
def _get_anchor(self):
"""
Defines an `anchor point <anchors>` where coordinates are relative to
on the widget. ``str``.
"""
return self._anchor
def _set_anchor(self, anchor):
if self.button is not None:
self.button.anchor = anchor
self._text_sprite.anchor = anchor
BaseWidget._set_anchor(self, anchor)
anchor = property(_get_anchor, _set_anchor)
value = property(_get_value)
padding = property(_get_padding, _set_padding)
nine_slice = property(_get_nine_slice, _set_nine_slice)
state = property(_get_state, _set_state)
content_size = property(_get_content_size, _set_content_size)
def __stylize__(self, properties):
"""
Applies the *properties* to this scene. This is called when a style
is applied.
:param properties: a mapping of property names (strings) to values.
:type properties: ``dict``
"""
self._padding = properties.pop('padding', 4)
if not isinstance(self._padding, spyral.Vec2D):
self._padding = spyral.Vec2D(self._padding, self._padding)
self._nine_slice = properties.pop('nine_slice', False)
self._image_locations = {}
for state in self._states:
# TODO: try/catch to ensure that the property is set?
self._image_locations[state] = properties.pop('image_%s' % (state,))
spyral.View.__stylize__(self, properties)
class ButtonWidget(MultiStateWidget):
"""
A ButtonWidget is a simple button that can be pressed. It can have some
text. If you don't specify an explicit width, then it will be sized
according to it's text.
:param form: The parent form that this Widget belongs to.
:type form: :class:`Form <spyral.Form>`
:param str name: The name of this widget.
:param str text: The text that will be rendered on this button.
"""
def __init__(self, form, name, text = "Okay"):
MultiStateWidget.__init__(self, form, name,
['up', 'down', 'down_focused', 'down_hovered',
'up_focused', 'up_hovered'])
self._text_sprite = spyral.Sprite(self)
self._text_sprite.layer = "content"
self.text = text
def _get_value(self):
"""
Whether or not this widget is currently ``"up"`` or ``"down"``.
"""
if "up" in self._state:
return "up"
else:
return "down"
def _get_text(self):
"""
The text rendered on this button (``str``).
"""
return self._text
def _set_text(self, text):
self._text = text
self._text_sprite.image = self.font.render(self._text)
self._content_size = self._text_sprite.image.size
self._render_images()
def _on_state_change(self):
"""
A function triggered whenever this widget changes size.
"""
self._text_sprite.pos = spyral.util._anchor_offset(self._anchor,
self._padding[0] / 2,
self._padding[1] / 2)
value = property(_get_value)
text = property(_get_text, _set_text)
def _handle_mouse_up(self, event):
"""
The function called when the mouse is released while on this widget.
"""
if self.state.startswith('down'):
self.state = self.state.replace('down', 'up')
e = spyral.Event(name="clicked", widget=self, form=self.form, value=self._get_value())
self.scene._queue_event("form.%(form_name)s.%(widget)s.clicked" %
{"form_name": self.form.__class__.__name__,
"widget": self.name},
e)
def _handle_mouse_down(self, event):
"""
The function called when the mouse is pressed while on this widget.
Fires a ``clicked`` event.
"""
if self.state.startswith('up'):
self.state = self.state.replace('up', 'down')
def _handle_mouse_out(self, event):
"""
The function called when this button is no longer being hovered over.
"""
if "_hovered" in self.state:
self.state = self.state.replace('_hovered', '')
def _handle_mouse_over(self, event):
"""
The function called when the mouse starts hovering over this button.
"""
if not "_hovered" in self.state:
self.state = self.state.replace('_focused', '') + "_hovered"
def _handle_mouse_motion(self, event):
"""
The function called when the mouse moves while over this button.
"""
pass
def _handle_focus(self, event):
"""
Applies the focus state to this widget
"""
if self.state in ('up', 'down'):
self.state+= '_focused'
def _handle_blur(self, event):
"""
Removes the focused state from this widget.
"""
if self.state in ('up_focused', 'down_focused'):
self.state = self.state.replace('_focused', '')
def _handle_key_down(self, event):
"""
When the enter or space key is pressed, triggers this button being
pressed.
"""
if event.key in (spyral.keys.enter, spyral.keys.space):
self._handle_mouse_down(event)
def _handle_key_up(self, event):
"""
When the enter or space key is pressed, triggers this button being
released.
"""
if event.key in (spyral.keys.enter, spyral.keys.space):
self._handle_mouse_up(event)
def __stylize__(self, properties):
"""
Applies the *properties* to this scene. This is called when a style
is applied.
:param properties: a mapping of property names (strings) to values.
:type properties: ``dict``
"""
self.font = spyral.Font(*properties.pop('font'))
self._text = properties.pop('text', "Button")
MultiStateWidget.__stylize__(self, properties)
class ToggleButtonWidget(ButtonWidget):
"""
A ToggleButtonWidget is similar to a Button, except that it will stay down
after it's been clicked, until it is clicked again.
:param form: The parent form that this Widget belongs to.
:type form: :class:`Form <spyral.Form>`
:param str name: The name of this widget.
:param str text: The text that will be rendered on this button.
"""
def __init__(self, form, name, text = "Okay"):
ButtonWidget.__init__(self, form, name, text)
def _handle_mouse_up(self, event):
"""
The function called when the mouse is released while on this widget.
"""
pass
def _handle_mouse_down(self, event):
"""
Triggers the mouse to change states.
"""
if self.state.startswith('down'):
self.state = self.state.replace('down', 'up')
elif self.state.startswith('up'):
self.state = self.state.replace('up', 'down')
class CheckboxWidget(ToggleButtonWidget):
"""
A CheckboxWidget is identical to a ToggleButtonWidget, only it doesn't have
any text.
"""
def __init__(self, form, name):
ToggleButtonWidget.__init__(self, form, name, "")
class RadioButtonWidget(ToggleButtonWidget):
"""
A RadioButton is similar to a CheckBox, except it is to be placed into a
RadioGroup, which will ensure that only one RadioButton in it's group is
selected at a time.
..warning:: This widget is incomplete.
"""
| |
<filename>calvin/tests/test_calvinscript.py
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.csparser.parser import calvin_parser
from calvin.csparser.analyzer import generate_app_info
from calvin.csparser.checker import check
import unittest
import json
import difflib
import pytest
def absolute_filename(filename):
import os.path
return os.path.join(os.path.dirname(__file__), filename)
class CalvinTestBase(unittest.TestCase):
def setUp(self):
self.test_script_dir = absolute_filename('scripts/')
def tearDown(self):
pass
def _read_file(self, file):
try:
with open(file, 'r') as source:
text = source.read()
except Exception as e:
print "Error: Could not read file: '%s'" % file
raise e
return text
def _format_unexpected_error_message(self, errors):
msg_list = ["Expected empty error, not {0}".format(err) for err in errors]
return '\n'.join(msg_list)
def invoke_parser(self, test, source_text=None):
if not source_text:
test = self.test_script_dir + test + '.calvin'
source_text = self._read_file(test)
return calvin_parser(source_text, test)
def invoke_parser_assert_syntax(self, test, source_text=None):
"""Verify that the source is free from syntax errors and return parser output"""
result, errors, warnings = self.invoke_parser(test, source_text)
self.assertFalse(errors, self._format_unexpected_error_message(errors))
return result
def assert_script(self, test):
"""Check parsing of script against a reference result"""
result = self.invoke_parser_assert_syntax(test)
ref_file = self.test_script_dir + test + '.ref'
reference = self._read_file(ref_file)
# Canonical form
sorted_result = json.dumps(result, indent=4, sort_keys=True)
sorted_result = "\n".join([line for line in sorted_result.splitlines() if "sourcefile" not in line])
reference = "\n".join([line for line in reference.splitlines() if "sourcefile" not in line])
diff_lines = difflib.unified_diff(sorted_result.splitlines(), reference.splitlines())
diff = '\n'.join(diff_lines)
self.assertFalse(diff, diff)
class CalvinScriptParserTest(CalvinTestBase):
"""Test the CalvinScript parser"""
def testSimpleStructure(self):
"""Basic sanity check"""
self.assert_script('test1')
def testComplexScript(self):
self.assert_script('test9')
def testComponentDefinitions(self):
self.assert_script('test8')
def testSyntaxError(self):
"""Check syntax error output"""
test = 'test10'
result, errors, warnings = self.invoke_parser(test)
self.assertEqual(errors[0], {'reason': 'Syntax error.', 'line': 6, 'col': 2})
class CalvinScriptAnalyzerTest(CalvinTestBase):
"""Test the CalvinsScript analyzer"""
def assert_app_info(self, test, app_info):
"""Check app_info against a reference result"""
ref_file = self.test_script_dir + test + '.app_info'
reference = self._read_file(ref_file)
# Canonical form
sorted_app_info = json.dumps(app_info, indent=4, sort_keys=True)
diff_lines = difflib.unified_diff(sorted_app_info.splitlines(), reference.splitlines())
diff = '\n'.join(diff_lines)
self.assertFalse(diff, diff)
def testSimpleScript(self):
test = 'test9'
# First make sure result below is error-free
result = self.invoke_parser_assert_syntax(test)
app_info = generate_app_info(result)
self.assert_app_info(test, app_info)
def testMissingActor(self):
script = """a:std.NotLikely()"""
result = self.invoke_parser_assert_syntax('inline', script)
app_info = generate_app_info(result)
self.assertFalse(app_info['valid'])
class CalvinScriptCheckerTest(CalvinTestBase):
"""Test the CalvinsScript checker"""
def testCheckSimpleScript(self):
script = """
a:Foo()
b:Bar()
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertTrue(errors)
def testCheckLocalComponent(self):
script = """
component Foo() -> out {
f:std.CountTimer()
f.integer > .out
}
a:Foo()
b:io.StandardOut()
a.out > b.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertFalse(errors, '\n'.join([str(error) for error in errors]))
self.assertFalse(warnings, '\n'.join([str(warning) for warning in warnings]))
def testCheckOutportConnections(self):
script = """
a:std.CountTimer()
b:std.CountTimer()
c:io.StandardOut()
a.integer > c.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(errors[0]['reason'], "Actor b (std.CountTimer) is missing connection to outport 'integer'")
self.assertFalse(warnings)
def testCheckInportConnections(self):
script = """
c:io.StandardOut()
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(errors[0]['reason'], "Missing connection to inport 'c.token'")
self.assertFalse(warnings)
def testCheckInportConnections(self):
script = """
a:std.CountTimer()
b:std.CountTimer()
c:io.StandardOut()
a.integer > c.token
b.integer > c.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 2)
self.assertEqual(errors[0]['reason'], "Actor c (io.StandardOut) has multiple connections to inport 'token'")
self.assertFalse(warnings)
def testBadComponent1(self):
script = """
component Foo() -> out {
a:std.CountTimer()
b:std.CountTimer()
a.integer > .out
}
a:Foo()
b:io.StandardOut()
a.out > b.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]['reason'], "Actor b (std.CountTimer) is missing connection to outport 'integer'")
self.assertFalse(warnings)
def testBadComponent2(self):
script = """
component Foo() -> out {
a:std.CountTimer()
b:io.StandardOut()
a.integer > b.token
}
a:Foo()
b:io.StandardOut()
a.out > b.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]['reason'], "Component Foo is missing connection to outport 'out'")
self.assertFalse(warnings)
def testBadComponent3(self):
script = """
component Foo() -> out {
a:std.CountTimer()
a.integer > .out
a.integer > .out
}
a:Foo()
b:io.StandardOut()
a.out > b.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 2)
self.assertEqual(errors[0]['reason'], "Component Foo has multiple connections to outport 'out'")
self.assertEqual(errors[1]['reason'], "Component Foo has multiple connections to outport 'out'")
self.assertFalse(warnings)
def testBadComponent4(self):
script = """
component Foo() in -> {
a:io.StandardOut()
}
b:Foo()
a:std.CountTimer()
a.integer > b.in
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 2)
self.assertEqual(errors[0]['reason'], "Component Foo is missing connection to inport 'in'")
self.assertEqual(errors[1]['reason'], "Actor a (io.StandardOut) is missing connection to inport 'token'")
self.assertFalse(warnings)
def testBadComponent5(self):
script = """
component Foo() in -> {
a:io.StandardOut()
.foo > a.token
}
b:Foo()
a:std.CountTimer()
a.integer > b.in
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 2)
self.assertEqual(errors[0]['reason'], "Component Foo has no inport 'foo'")
self.assertEqual(errors[1]['reason'], "Component Foo is missing connection to inport 'in'")
self.assertEqual(len(warnings), 0)
def testBadComponent6(self):
script = """
component Foo() -> out {
a:std.CountTimer()
a.integer > .foo
}
b:Foo()
a:io.StandardOut()
b.out > a.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 2)
self.assertEqual(errors[0]['reason'], "Component Foo has no outport 'foo'")
self.assertEqual(errors[1]['reason'], "Component Foo is missing connection to outport 'out'")
self.assertEqual(len(warnings), 0)
def testBadComponent7(self):
script = """
component Foo() in -> out {
.in > .out
}
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]['reason'], "Component Foo passes port 'in' directly to port 'out'")
self.assertEqual(len(warnings), 0)
def testUndefinedActors(self):
script = """
a.token > b.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 2)
self.assertEqual(errors[0]['reason'], "Undefined actor: 'a'")
self.assertEqual(errors[1]['reason'], "Undefined actor: 'b'")
def testUndefinedArguments(self):
script = """
a:std.Constant()
b:io.StandardOut()
a.token > b.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]['reason'], "Missing argument: 'data'")
def testComponentUndefinedArgument(self):
script = """
component Foo(file) in -> {
a:io.StandardOut()
.in > a.token
}
b:Foo()
a:std.CountTimer()
a.integer > b.in
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 2)
self.assertEqual(errors[0]['reason'], "Unused argument: 'file'")
self.assertEqual(errors[1]['reason'], "Missing argument: 'file'")
def testComponentUnusedArgument(self):
script = """
component Foo(file) in -> {
a:io.StandardOut()
.in > a.token
}
b:Foo(file="Foo.txt")
a:std.CountTimer()
a.integer > b.in
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 1)
self.assertEqual(len(warnings), 0)
self.assertEqual(errors[0]['reason'], "Unused argument: 'file'")
def testLocalComponentRecurse(self):
script = """
component E() in -> out {
f:std.Identity()
.in > f.token
f.token > .out
}
component B() in -> out {
e:E()
.in > e.in
e.out > .out
}
a:std.Counter()
b:B()
c:io.StandardOut()
a.integer > b.in
b.out > c.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 0)
self.assertEqual(len(warnings), 0)
@pytest.mark.xfail(reason="Since component def is now a dict, order is not preserved. Needs fix.")
def testLocalComponentBad(self):
script = """
component B() in -> out {
e:E()
.in > e.in
e.out > .out
}
component E() in -> out {
f:std.Identity()
.in > f.token
f.token > .out
}
a:std.Counter()
b:B()
c:io.StandardOut()
a.integer > b.in
b.out > c.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]['reason'], "Unknown actor type: 'E'")
self.assertEqual(len(warnings), 0)
def testNoSuchPort(self):
script = """
i:std.Identity()
src:std.CountTimer()
dst:io.StandardOut()
src.integer > i.foo
i.bar > dst.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
self.assertEqual(len(errors), 4)
self.assertEqual(errors[0]['reason'], "Actor i (std.Identity) has no inport 'foo'")
self.assertEqual(errors[1]['reason'], "Actor i (std.Identity) has no outport 'bar'")
self.assertEqual(errors[2]['reason'], "Actor i (std.Identity) is missing connection to inport 'token'")
self.assertEqual(errors[3]['reason'], "Actor i (std.Identity) is missing connection to outport 'token'")
self.assertEqual(len(warnings), 0)
@pytest.mark.xfail()
def testRedfineInstance(self):
script = """
i:std.Identity()
src:std.CountTimer()
dst:io.StandardOut()
i:std.RecTimer()
src.integer > i.token
i.token > dst.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
print errors
self.assertEqual(len(errors), 1)
self.assertEqual(len(warnings), 0)
def testUndefinedActorInComponent(self):
script = """
component Bug() -> out {
b.out > .out
}
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
print errors
self.assertEqual(len(errors), 1)
self.assertEqual(len(warnings), 0)
class CalvinScriptDefinesTest(CalvinTestBase):
"""Test CalvinsScript defines"""
def testUndefinedConstant(self):
script = """
src : std.Constant(data=FOO)
snk : io.StandardOut()
src.token > snk.token
"""
result = self.invoke_parser_assert_syntax('inline', script)
errors, warnings = check(result)
print errors
self.assertEqual(len(errors), 1)
self.assertEqual(len(warnings), 0)
self.assertEqual(errors[0]['reason'], "Undefined identifier: 'FOO'")
def testDefinedConstant(self):
| |
not use the GC calculation from Biopython
# Because it does not deal with 'N' nucleotides
# gc_content = GC(sequence)/100
else:
gc_content = np.NaN
return gc_content
# TODO GC exact computation to account for ambiguous nucleotides S(G or C)
# TODO Test gc_cds for GC1, GC2, GC3 contents
def gc_codon(fasta, gff, chromosome, start, end, min_bp=6):
"""
Estimate the fraction of G+C bases within CDS at codon positions 1, 2 and 3.
Use a list of CDS features (start, end, frame, phase) to subset a list of DNA sequences
and estimate GC content at each position.
:param fasta: str, A fasta object with the same coordinates as the gff
:param gff: DataFrame, A gff data frame
:param chromosome: str, Chromosome name
:param start: int, Start position of the sequence
:param end: int, End position of the sequence
:param min_bp: int, the minimal number of nucleotides to consider a sequence
:return: Numeric values of the global GC proportion in the sequence and
GC proportion at each codon position in the sequence
"""
# Subset features
# exons contain UTR that can alter the frame shift
# It is preferable to estimate GC content on CDS
feat = gff[(gff['seqname'] == str(chromosome)) &
(gff['start'] >= int(start)) &
(gff['end'] <= int(end)) &
(gff['feature'] == "CDS")]
if (feat.shape[0] > 0):
# Sample all sequences from chromosomes and start-end positions
# Subset a list of DNA sequences according to features positions
list_seq = list(feat.apply(lambda x: fasta.sample_sequence(x["seqname"], x["start"], x["end"]), axis=1))
# Take care of short sequences (typically < 6bp) that introduce errors below
# Remove sequences shorter than the required number of nucleotides
# list_seq = list(map(lambda x: x.upper(), list_seq))
length_seq = list(map(lambda x: len(re.findall("[ATCGatcg]", x)), list_seq))
# Reduce the dataset
feat = feat.loc[list(map(lambda x: int(x) > min_bp, length_seq))]
list_seq = list(feat.apply(lambda x: fasta.sample_sequence(x["seqname"], x["start"], x["end"]), axis=1))
if (feat.shape[0] > 0):
# Merge overlapping coordinates to estimate CDS proportion properly
# in case of splicing variants and overlapping sequences
# Refactoring: use sample_sequence_masked to get CDS regions masked then p = 1 - q/l
# where p = proportion of CDS and q = length of sequence after masking CDS and l = total length of sequence
# Masking regions
mask = [(x, y) for x, y in zip(list(feat.start), list(feat.end))]
# Sample sequences
seq = fasta.sample_sequence_masked(chromosome, start, end, mask)
try:
cds_proportion = 1 - abs(len(seq) / (end - start + 1))
except ZeroDivisionError:
cds_proportion = np.NaN
# Strand of the feature
# Reverse the DNA sequence if strand == "-"
strand = list(feat.apply(lambda x: x['strand'], axis=1))
for i, seq in enumerate(list_seq):
if strand[i] == "-":
list_seq[i] = seq[::-1]
# list_seq[i] = str(Seq(seq).reverse_complement())
# Phase of CDS features
# Remove 0, 1 or 2 bp at the beginning
frame = list(feat.apply(lambda x: x['frame'], axis=1))
for i, seq in enumerate(list_seq):
list_seq[i] = seq[int(frame[i])::]
# Split in three vectors of codon position
codons = "".join(map(lambda x: x[::], list_seq))
codon1 = "".join(map(lambda x: x[0::3], list_seq))
codon2 = "".join(map(lambda x: x[1::3], list_seq))
codon3 = "".join(map(lambda x: x[2::3], list_seq))
# Estimate GC content at each codon position
gc123 = gc(codons, min_bp=min_bp)
gc1 = gc(codon1, min_bp=min_bp)
gc2 = gc(codon2, min_bp=min_bp)
gc3 = gc(codon3, min_bp=min_bp)
else:
gc123 = np.NaN
gc1 = np.NaN
gc2 = np.NaN
gc3 = np.NaN
cds_proportion = np.NaN
else:
gc123 = np.NaN
gc1 = np.NaN
gc2 = np.NaN
gc3 = np.NaN
cds_proportion = np.NaN
gc_content = (gc123, gc1, gc2, gc3, cds_proportion)
return gc_content
def gc_noncoding(fasta, gff, chromosome, start, end, min_bp=6):
"""
Estimate the fraction of G+C bases within non-coding sequences.
Use a list of CDS features (start, end, frame, phase) to subset a list of non-coding DNA sequences
:param fasta: str, A fasta object with the same coordinates as the gff
:param gff: DataFrame, A gff data frame
:param chromosome: str, Chromosome name
:param start: int, Start position of the sequence
:param end: int, End position of the sequence
:param min_bp: int, the minimal number of nucleotides to consider a sequence
:return: int, a tuple with the GC content in non-coding sequences and the proportion of non-coding sequence in the window
"""
feat = gff[(gff['seqname'] == str(chromosome)) &
(gff['start'] >= int(start)) &
(gff['end'] <= int(end)) &
(gff['feature'] == "CDS")]
if (feat.shape[0] == 0):
noncoding_seq = fasta.sample_sequence(chromosome, start, end)
noncoding_prop = 1
gc_noncoding = gc(noncoding_seq, min_bp=min_bp)
elif (feat.shape[0] > 0):
# Masking regions
mask = [(x,y) for x,y in zip(list(feat.start), list(feat.end))]
# Sample sequences
seq = fasta.sample_sequence_masked(chromosome, start, end, mask)
gc_noncoding = gc(seq)
try:
noncoding_prop = len(seq)/(end-start)
except ZeroDivisionError:
noncoding_prop = np.NaN
else:
gc_noncoding = np.NaN
noncoding_prop = np.NaN
return (gc_noncoding, noncoding_prop)
def gc_intergenic(fasta, gff, chromosome, start, end, min_bp=6):
"""
Estimate the fraction of G+C bases within intergenic sequences.
Use a list of gene features (start, end) to subset a list of intergenic DNA sequences
:param fasta: str, A fasta object with the same coordinates as the gff
:param gff: DataFrame, A gff data frame
:param chromosome: str, Chromosome name
:param start: int, Start position of the sequence
:param end: int, End position of the sequence
:param min_bp: int, the minimal number of nucleotides to consider a sequence
:return: int, a tuple with the GC content in intergenic sequences and the proportion of intergenic sequence in the window
"""
feat = gff[(gff['seqname'] == str(chromosome)) &
(gff['start'] >= int(start)) &
(gff['end'] <= int(end)) &
(gff['feature'] == "gene")]
if (feat.shape[0] == 0):
noncoding_seq = fasta.sample_sequence(chromosome, start, end)
intergenic_prop = 1
gc_intergenic = gc(noncoding_seq, min_bp=min_bp)
elif (feat.shape[0] > 0):
# Masking regions
mask = [(x,y) for x,y in zip(list(feat.start), list(feat.end))]
# Sample sequences
seq = fasta.sample_sequence_masked(chromosome, start, end, mask)
gc_intergenic = gc(seq)
try:
intergenic_prop = len(seq)/(end-start)
except ZeroDivisionError:
intergenic_prop = np.NaN
else:
gc_intergenic = np.NaN
intergenic_prop = np.NaN
return (gc_intergenic, intergenic_prop)
def gc_intron(fasta, gff, chromosome, start, end, min_bp=6, splicing_strategy="merge"):
"""
Estimate the fraction of G+C bases within intron sequences.
Use a list of intron features (start, end) to subset a list of intron DNA sequences
:param fasta: str, A fasta object with the same coordinates as the gff
:param gff: DataFrame, A gff data frame
:param chromosome: str, Chromosome name
:param start: int, Start position of the sequence
:param end: int, End position of the sequence
:param splicing_strategy: int, the minimal number of nucleotides to consider a sequence
:return: int, a tuple with the GC content in intron sequences and the proportion of intron sequence in the window
"""
feat = gff[(gff['seqname'] == str(chromosome)) &
(gff['start'] >= int(start)) &
(gff['end'] <= int(end)) &
(gff['feature'] == "intron")]
if (feat.shape[0] == 0):
gc_intron = np.NaN
intron_prop = np.NaN
# If only one feature, Dataframe is transformed in Series
elif (isinstance(feat, pd.Series)):
list_start = [min(feat["start"], feat["end"])]
list_end = [max(feat["start"], feat["end"])]
list_seq = [fasta.sample_sequence(chromosome, x, y) for x,y in zip(list_start, list_end)]
# Sample sequences
seq = "".join(list_seq)
gc_intron = gc(seq, min_bp)
try:
intron_prop = len(seq)/(end-start)
except ZeroDivisionError:
intron_prop = np.NaN
elif (feat.shape[0] > 0):
if (splicing_strategy == "merge"):
list_start = [x[1] for x in feat["start"].items()]
list_end = [x[1] for x in feat["end"].items()]
intervals = [(min(x, y), max(x, y)) for x, y in zip(list_start, list_end) if x != y]
merge_splicing = intervaltree.IntervalTree.from_tuples(intervals)
list_start = [x.begin for x in merge_splicing]
list_end = [x.end for x in merge_splicing]
# if ((len(list_start) > 1) & (len(list_end) > 1)):
# intervals = [(x,y) for x,y in zip([list_start], [list_end])]
# merge_splicing = intervaltree.IntervalTree.from_tuples(intervals)
# list_start = [x.begin for x in merge_splicing]
# list_end = [x.end for x in merge_splicing]
# else:
# # Inverse coordinates if sequence is on "-" strand (i.e. start > end)
# list_start = min(list_start, list_end)
# list_end = min(list_start, list_end)
list_seq = [fasta.sample_sequence(chromosome, x, y) for x,y in zip(list_start, list_end)]
# Sample sequences
seq = "".join(list_seq)
gc_intron = gc(seq, min_bp)
try:
intron_prop = len(seq)/(end-start)
except ZeroDivisionError:
intron_prop = np.NaN
else:
gc_intron = np.NaN
intron_prop = np.NaN
return (gc_intron, intron_prop)
def gc1(fasta, gff, chromosome, start, end):
gc1 = gc_codon(fasta, gff, | |
#!/usr/bin/env python 3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 PanXu, Inc. All Rights Reserved
#
"""
基于 <<Chinese NER Using Lattice LSTM>> 论文
论文地址: https://www.aclweb.org/anthology/P18-1144/
实现 lattice lstm 模型
相关说明文档参考:
docs/ner/Chinese NER Using Lattice LSTM.md
Authors: PanXu
Date: 2021/01/20 19:48:00
"""
import logging
from typing import Tuple, List, Dict
import numpy as np
import torch
from torch import nn
from torch.nn import init
from torch.nn import Module, Parameter
class WordLSTMCell(Module):
"""
word cell
相关说明文档: docs/ner/Chinese NER Using Lattice LSTM.md
Part1 中,计算的结果,运算得到 c^w_{b,e}
"""
def __init__(self, input_size, hidden_size, bias=True):
"""
初始化
:param input_size: w_{b,e} 也就是词向量的维度
:param hidden_size: 输出的隐层维度, 注意实际会 hidden_size*3
:param bias: 是否有 bias
"""
super(WordLSTMCell, self).__init__()
# input size
self.input_size = input_size
# 输出的 hidden size
self.hidden_size = hidden_size
# 是否使用 bias
self.use_bias = bias
# W*[x^w_{b,e}; h^c_b] + b = weight_ih*(x^w_{b,e}) + weight_hh*(h^c_b) + b 计算过程
# weight_ih*(x^w_{b,e}) 计算该部分的参数
# 注意: 3 * hidden_size 是因为一次性将 i, f, o 三个值计算出来,再通过 split 分开得到 3 个值,所以需要乘以 3
self.weight_ih = nn.Parameter(
torch.FloatTensor(input_size, 3 * hidden_size))
# weight_hh*(h^c_b) 计算该部分的参数
self.weight_hh = nn.Parameter(
torch.FloatTensor(hidden_size, 3 * hidden_size))
if bias:
self.bias = nn.Parameter(torch.FloatTensor(3 * hidden_size))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
"""
reset 参数
:return:
"""
init.orthogonal(self.weight_ih)
weight_hh_data = torch.eye(self.hidden_size)
weight_hh_data = weight_hh_data.repeat(1, 3)
with torch.no_grad():
self.weight_hh.copy_(weight_hh_data)
if self.bias is not None:
init.constant(self.bias, val=0)
def forward(self, input_, hx) -> torch.Tensor:
"""
Args:
input_: 是词向量,也就是 x^w_{b,e} 的词向量, size: (B, input_size)
hx: (h_0, c_0), 是 h^c_b, 也就是在 b 处的隐层输出向量,size: (B, hidden_size).
Returns:
c_1: 是 part1 部分计算的结果,是 c^w_{b,e} 该值, size: (B, hidden_size)
"""
# h_0: h^c_b 也就是 [b,e] 的 b 所在的 h
h_0, c_0 = hx
batch_size = h_0.size(0)
if self.bias is not None:
bias_batch = (self.bias.unsqueeze(0).expand(batch_size, *self.bias.size()))
# weight_hh * h^c_b + b
if self.bias is not None:
wh_b = torch.addmm(bias_batch, h_0, self.weight_hh)
else:
wh_b = torch.mm(h_0, self.weight_hh)
# weight_ih * x^w_{b,e}
wi = torch.mm(input_, self.weight_ih)
# 计算 f, i, g
f, i, g = torch.split(wh_b + wi, split_size_or_sections=self.hidden_size, dim=1)
# 最终计算出 c^w_{b,e}
c_1 = torch.sigmoid(f)*c_0 + torch.sigmoid(i)*torch.tanh(g)
return c_1
def __repr__(self):
s = '{name}({input_size}, {hidden_size})'
return s.format(name=self.__class__.__name__, **self.__dict__)
class MultiInputLSTMCell(nn.Module):
"""
结合 WordLSTMCell 和 LSTM Cell 的计算.
Part2 和 Part3 计算, 计算得到多个 i, 并将 word (WordLSTMCell 计算的结果) 与 char 的 c_t 合并在一起。
"""
def __init__(self, input_size, hidden_size, bias=True):
"""
初始化
:param input_size: input_size, 是指输入的 字 的 embedding 维度,也就是 x^c_j
:param hidden_size: 输出的隐层维度
:param bias: True: 使用 bias; False: 不使用 bias
"""
super().__init__()
# 输入的 embedding 维度
self.input_size = input_size
# 输出的 隐层 embedding 维度
self.hidden_size = hidden_size
# 与 x^c_j 进行相乘的参数
self.weight_ih = nn.Parameter(
torch.FloatTensor(input_size, 3 * hidden_size))
# 与 h^c_{j-1} 进行相乘的参数
self.weight_hh = nn.Parameter(
torch.FloatTensor(hidden_size, 3 * hidden_size))
# 用来计算 x^e_{b,e}(WordLSTMCell 计算出的结果) 与 当前 x^c_j 的系数的参数, 这是与 x^c_j 乘积的部分
self.alpha_weight_ih = nn.Parameter(
torch.FloatTensor(input_size, hidden_size))
# 用来计算 x^e_{b,e}(WordLSTMCell 计算出的结果) 与 当前 x^c_j 的系数的参数, 这是与 x^e_{b,e} 乘积的部分
self.alpha_weight_hh = nn.Parameter(
torch.FloatTensor(hidden_size, hidden_size))
if bias:
self.bias = nn.Parameter(torch.FloatTensor(3 * hidden_size))
self.alpha_bias = nn.Parameter(torch.FloatTensor(hidden_size))
else:
self.register_parameter('bias', None)
self.register_parameter('alpha_bias', None)
self.reset_parameters()
def reset_parameters(self):
"""
reset 参数
:return:
"""
init.orthogonal(self.weight_ih)
init.orthogonal(self.alpha_weight_ih)
weight_hh_data = torch.eye(self.hidden_size)
weight_hh_data = weight_hh_data.repeat(1, 3)
with torch.no_grad():
self.weight_hh.copy_(weight_hh_data)
alpha_weight_hh_data = torch.eye(self.hidden_size)
alpha_weight_hh_data = alpha_weight_hh_data.repeat(1, 1)
with torch.no_grad():
self.alpha_weight_hh.copy_(alpha_weight_hh_data)
# The bias is just set to zero vectors.
if self.bias is not None:
init.constant(self.bias, val=0)
init.constant(self.alpha_bias, val=0)
def forward(self, input_, c_input, hx) -> Tuple[torch.Tensor, torch.Tensor]:
"""
注意: 当前模型仅仅支持 batch_size = 1
:param input_: 字 embedding 输入向量
:param c_input: 由 WordLSTMCell 计算得出的 所有 词的向量。
:param hx: 在 j-1 步的输出, h_{j-1}, c_{j-1}
:return: h_{j}, c_{j} 当前 cell 输出的隐层 和 cell 输出
"""
# h^c_{j-1}, c^c_{j-1}, 前一个输出的 h, c, 该函数运算 lstm 一个 cell, 并得到 h^c_j, c^c_j
h_0, c_0 = hx
batch_size = h_0.size(0)
# 注意只能处理 batch_size 为 1 的情况
assert(batch_size == 1)
if self.bias is not None:
bias_batch = (self.bias.unsqueeze(0).expand(batch_size, *self.bias.size()))
# $W*[x^c_j;h^c_{j-1}] + b = (weight_hh * h^c_{j-1}) + (weight_ih * x^c_j) + b$
# $(weight_hh * h^c_{j-1}) + b$
if self.bias is not None:
wh_b = torch.addmm(bias_batch, h_0, self.weight_hh)
else:
wh_b = torch.mm(h_0, self.weight_hh)
# weight_ih * x^c_j
wi = torch.mm(input_, self.weight_ih)
# 计算 i, o, g, g 就是 $\tilde{c_j}$
i, o, g = torch.split(wh_b + wi, split_size_or_sections=self.hidden_size, dim=1)
# 计算 i, o, g
i = torch.sigmoid(i)
g = torch.tanh(g)
o = torch.sigmoid(o)
# c_sum, 是指当前j为结尾,一共命中了多少词
c_num = len(c_input)
if c_num == 0:
# 没有命中词,则使用常规的 lstm 方法进行处理
f = 1 - i
c_1 = f*c_0 + i*g
h_1 = o * torch.tanh(c_1)
else:
# 命中了多个词
# 将所有命中词的向量,组合成一个向量, 按照0维合并
c_input_var = torch.cat(c_input, 0)
# 缩减维度,去掉 batch_size
c_input_var = c_input_var.squeeze(1)
# 计算 part3 中的 $i^c_{b,e}$
# i^c_{b,e} = W*[x^c_j;c^w_{b,e}] + b = alpha_weight_ih*x^c_j + alpha_weight_hh * c^w_{b,e}
# 其中 c^w_{b,e} 是在 WordLSTMCell 中计算的结果
if self.alpha_bias is not None:
alpha_wi = torch.addmm(self.alpha_bias, input_, self.alpha_weight_ih).expand(c_num, self.hidden_size)
else:
alpha_wi = torch.mm(input_, self.alpha_weight_ih).expand(c_num, self.hidden_size)
alpha_wh = torch.mm(c_input_var, self.alpha_weight_hh)
# alpha 就是 i^c_{b,e}
alpha = torch.sigmoid(alpha_wi + alpha_wh)
# 将所有的 i^c_j 与 所有的 i^c_{b,e} 组合在一起, 进行 softmax 计算
alpha = torch.exp(torch.cat([i, alpha],0))
alpha_sum = alpha.sum(0)
alpha = torch.div(alpha, alpha_sum)
# 最后一步,将 g=$\tilde{c_j}$, 以及 所有 c^w_{b,e} 放在一起,分别乘以权重进行计算
merge_i_c = torch.cat([g, c_input_var],0)
# 分别乘以权重,得到 c_1
c_1 = merge_i_c * alpha
c_1 = c_1.sum(0).unsqueeze(0)
# 与常规 lstm 一样计算 h_1
h_1 = o * torch.tanh(c_1)
return h_1, c_1
def __repr__(self):
s = '{name}({input_size}, {hidden_size})'
return s.format(name=self.__class__.__name__, **self.__dict__)
class LatticeLSTM(nn.Module):
"""
基于 MultiInputLSTMCell 的 LSTM 模型
"""
def __init__(self,
input_dim: int,
hidden_dim: int,
gaz_word_embedding_dim: int,
gaz_word_embedding: torch.Tensor,
gaz_word_embedding_dropout: float,
left2right: bool):
"""
Lattice 初始出啊
:param input_dim: 输入的维度
:param hidden_dim: 隐层输出的维度
:param gaz_word_embedding_dim: gaz 词向量的维度
:param gaz_word_embedding: gaz 的词向量
:param gaz_word_embedding_dropout: gaz 词向量的 dropout
:param left2right: 从左向右 还是 从右向左,就是语言模型的两个方向
"""
super().__init__()
self.hidden_dim = hidden_dim
self.word_emb = gaz_word_embedding
self.word_dropout = nn.Dropout(gaz_word_embedding_dropout)
self.rnn = MultiInputLSTMCell(input_dim, hidden_dim)
self.word_rnn = WordLSTMCell(gaz_word_embedding_dim, hidden_dim)
self.left2right = left2right
self.reset_parameters()
def reset_parameters(self):
pass
def forward(self,
input: torch.Tensor,
skip_input: List[List[List[List]]],
hidden: Tuple[torch.Tensor, torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor]:
"""
执行模型
:param input: 字序列向量, shape: (B, seq_len, embedding_size),
但是 batch_size = 1, 必须是 1,也就是不支持其他 batch_size
:param skip_input: skip_input: 是4维 list, 因为 B=1, 所以内部是一个 3维list,
该 3维 list 总长度是 seq_len(与字符序列是一样长度的).
每一个元素,是对应到词汇表中词的 id 和 对应的长度。例如:
[[], [[25,13],[2,4]], [], [[33], [2]], []], 表示在字序列中,第 2个 字,所对应的词 id 是 25 和13 , 对应的长度是 2 和 4。
例如: "到 长 江 大 桥", 该序列长度是 5, 所以 skip_input 也是 5, 其中 "长" index=1,
对应 "长江" 和 "长江大桥", 其中 "长江" 在词汇表中的 id 是25, 长度是 2;
"长江大桥" 对应词汇表中 id 是 13, 长度是 4;
同样 "大桥", 对应 词汇表 id 33, 长度是 2.
:param hidden: 预定义的 (h,c) 输入
:return: (h1, c1), ..., (hn, cn), 返回的是 sequence 隐层序列, hj 或 cj 的 shape: (B, seq_len, hidden_dim), 其中 B=1
"""
input = input.transpose(1, 0)
seq_len = input.size(0)
batch_size = input.size(1)
# 只能处理 batch_size 为 1
assert (batch_size == 1)
# 因为 batch_size == 1, 所以仅仅取第一个
skip_input = skip_input[0]
if not self.left2right: # 如果是 right2left 需要将 构成的词汇表也进行逆向
skip_input = convert_forward_gaz_to_backward(skip_input)
hidden_out = [] # h
memory_out = [] # c
if hidden: # 如果 hidden_{t-1} 存在,则使用; 否则,使用 0
(hx, cx) = hidden
else:
hx = torch.zeros(batch_size, self.hidden_dim, device=input.device)
cx = torch.zeros(batch_size, self.hidden_dim, device=input.device)
id_list = range(seq_len)
if not self.left2right:
id_list = list(reversed(id_list))
# 用来存储 WordLSTMCell 计算得到的 c^w_t, 就是在某个位置上 word 的 ct
# 初始是空的,会通过 skip_input, 在计算的过程中逐渐填充
# 注意: 当 t=0 时, input_c_list[0] 一定是 空的。因为,第一个没有前面的字,所以无法组成词。
input_c_list = init_list_of_objects(seq_len)
for t in id_list:
(hx, cx) = self.rnn(input[t], input_c_list[t], (hx, cx))
hidden_out.append(hx)
memory_out.append(cx)
if skip_input[t]: # 如果当前 t 位置的字,有词构成,那么,则使用 WordLSTMCell 生成 c^w_t
# 一共匹配了 多少个 词
matched_num = len(skip_input[t][0])
# 获取所有 word id, 组成 word id tensor, 注意是 多个 word id, 不是一个
word_var = torch.tensor(skip_input[t][0], device=input.device, dtype=torch.long)
# 获取所有 word id 以及词向量
word_emb = self.word_emb(word_var)
word_emb = self.word_dropout(word_emb)
# 计算所有 word id 的 c^w_t, 一次性计算得到多个 word id 的 c^w
ct = self.word_rnn(word_emb, (hx, cx))
assert (ct.size(0) == len(skip_input[t][1]))
# 将计算得到的所有 c^w_t 全部存放到 input_c_list 对应的 字 的位置上。
for idx in range(matched_num):
length = skip_input[t][1][idx]
if self.left2right:
# if t+length <= seq_len -1:
input_c_list[t + length - 1].append(ct[idx, :].unsqueeze(0))
else:
# if t-length >=0:
input_c_list[t - length + 1].append(ct[idx, :].unsqueeze(0))
if not self.left2right:
hidden_out = list(reversed(hidden_out))
memory_out = list(reversed(memory_out))
output_hidden, output_memory = torch.cat(hidden_out, 0), torch.cat(memory_out, 0)
# 输出的 shape: (batch, seq_len, hidden_dim)
return output_hidden.unsqueeze(0), output_memory.unsqueeze(0)
def init_list_of_objects(size) -> List[List]:
"""
构建一个 word ct 的 | |
<reponame>evolv-ai/experiment-management-cli
from .util import EvolvError
from .request import EvolvRequest
from .collections import METAMODELS, ACCOUNTS, EXPERIMENTS, ENVIRONMENTS, CANDIDATES
class EvolvClient:
def __init__(self, config):
"""Constructs a new Experiment Client.
:param EvolvConfig config: configuration for the Evolv account
"""
self._config = config
self._requester = EvolvRequest(config)
def set_api_key(self, api_key):
"""Sets the api key being used for the request to the experiments api.
:param string api_key: unique key
"""
self._config.api_key = api_key
self._requester = EvolvRequest(self._config)
def set_bearer_token(self, bearer_token):
"""Sets the api key being used for the request to the experiments api.
:param string bearer_token: jwt
"""
self._config.bearer_token = bearer_token
self._requester = EvolvRequest(self._config)
def get_account(self, account_id):
"""Retrieves an account. Defaults to root account.
:param string account_id: id of the account
:return: the root account for the sdk key supplied
:rtype: json
"""
account = self._requester.get(ACCOUNTS, entity_id=account_id)
return account
def get_environment(self, environment_id, account_id, version=None, content_only=False):
"""Retrieves an environment
:param string environment_id: the id of the environment
:param string account_id: id of the account the environment exists in
:param string version: get specific version
:param bool content_only: get environments contents
:return: the environment
:rtype: json
"""
environment = self._requester.get(ENVIRONMENTS, entity_id=environment_id, content_only=content_only,
version=version, account_id=account_id)
return environment
def get_metamodel(self, metamodel_id, account_id, version=None, content_only=False):
"""Retrieves specific metamodel.
:param string metamodel_id: a guid which identifies a specific metamodel
:param string account_id: id of the account the metamodel exists in
:param string version: version of the metamodel being requested
:param bool content_only: get metamodels contents
:return: a metamodel
:rtype: json
"""
metamodel = self._requester.get(METAMODELS, entity_id=metamodel_id, content_only=content_only,
version=version, account_id=account_id)
return metamodel
def get_experiment(self, experiment_id, account_id, metamodel_id, version=None, content_only=False):
"""Retrieves experiment specified.
:param string experiment_id: unique identifier of the experiment requested
:param string account_id: id of the account the experiment exists in
:param string metamodel_id: id of the metamodel the experiment exists in
:param string version: version of the experiment being requested
:param bool content_only: get experiments contents
:return: an experiment
:rtype: json
"""
experiment = self._requester.get(EXPERIMENTS, entity_id=experiment_id, content_only=content_only,
version=version, account_id=account_id, metamodel_id=metamodel_id)
return experiment
def get_candidate(self, candidate_id, account_id, metamodel_id, experiment_id):
"""Retrieves specified candidate.
:param string candidate_id: unique identifier of the candidate requested
:param string account_id: id of the account the candidate exists in
:param string metamodel_id: id of the metamodel the candidate exists in
:param string experiment_id: id of the experiment the candidate exists in
:return: a candidate
:rtype: json
"""
candidate = self._requester.get(CANDIDATES, entity_id=candidate_id, account_id=account_id,
metamodel_id=metamodel_id, experiment_id=experiment_id)
return candidate
def list_accounts(self):
"""Retrieves list of accounts the user has access to.
:return: a list of accounts
:rtype: json
"""
accounts = self._requester.query(ACCOUNTS)
return accounts
def list_environments(self, account_id, query=None):
"""Retrieves list of environments available under the users account.
:param string account_id: id of the account the environments exist in
:param string query: experiment api query
:return: a list of environments
:rtype: json
"""
querystring = {}
if query:
querystring['query'] = query
environments = self._requester.query(ENVIRONMENTS, query=querystring, account_id=account_id)
return environments
def list_metamodels(self, account_id, query=None):
"""Retrieves a list of metamodels for the configured account.
:param string account_id: id of the account the metamodels exist in
:param string query: experiment api query
:return: a list of metamodels
:rtype: json
"""
querystring = {}
if query:
querystring['query'] = query
metamodels = self._requester.query(METAMODELS, query=querystring, account_id=account_id)
return metamodels
def list_experiments(self, account_id, metamodel_id, query=None, statuses=None):
"""Retrieves a list of experiments for the configured account.
:param string account_id: id of the account the experiments exist in
:param string metamodel_id: id of the metamodel the experiments exist in
:param string query: experiment api query
:param string statuses: comma seperated list of statuses to filter upon
:return: a list of experiments
:rtype: json
"""
querystring = {}
if query:
querystring['query'] = query
if statuses:
querystring['statuses'] = statuses
experiments = self._requester.query(EXPERIMENTS, query=querystring, account_id=account_id,
metamodel_id=metamodel_id)
return experiments
def list_candidates(self, account_id, metamodel_id, experiment_id, query=None):
"""Retrieves list of candidates available under the users account.
:param string account_id: id of the account the candidates exist in
:param string metamodel_id: id of the metamodel the candidates exist in
:param string experiment_id: id of the experiment the candidates exist in
:param string query: experiment api query
:return: list of candidates
:rtype: json
"""
querystring = {}
if query:
querystring['query'] = query
return self._requester.query(CANDIDATES, query=querystring, account_id=account_id, metamodel_id=metamodel_id,
experiment_id=experiment_id)
def create_account(self, name):
"""Creates an account.
:param string name: account name
:return: the created account
:rtype: json
"""
account = self._requester.post(ACCOUNTS, json={'name': name})
return account
def create_environment(self, name, account_id, default=False, content=None, content_type=None, protected=False):
"""Creates an environment.
:param string name: the environment name
:param string account_id: id of the account the environment will exist in
:param bool default: whether or not to make the environment the default
:param string content: extra content defining the experiment
:param string content_type: content type formatting being used ['application/json', 'application/yaml']
:param bool protected: whether on not the environment is protected
:return: the created environment
:rtype: json
"""
payload = {
'name': name,
'default': default,
'protected': protected
}
if content:
assert content_type
payload['content'] = content
payload['content_type'] = content_type
environment = self._requester.post(ENVIRONMENTS, json=payload, account_id=account_id)
return environment
def create_metamodel(self, name, yaml, account_id):
"""Creates a metamodel.
:param string name: metamodel name
:param string yaml: yaml representing the metamodel
:param string account_id: id of the account the metamodel will exist in
:return: the created metamodel
:rtype: json
"""
metamodel = self._requester.post(METAMODELS, json={
'name': name,
'content_type': 'application/yaml',
'content': yaml
}, account_id=account_id)
return metamodel
def create_experiment(self, name, metamodel_id, metamodel_version, optimization_targets, account_id,
environment_id, experiment_type='CONTROL', population_size=10, budget=None,
precursor=None, algorithm_version=None, estimated_cr=5.0, long_tail_correction=True,
sample_rate=100, target_confidence='STANDARD', audience_query=None, content=None,
content_type=None):
"""Creates an experiment.
:param string name: name of the experiment
:param string metamodel_id: id of metamodel to create experiment from
:param string metamodel_version: version of the metamodel to create the experiment from
:param list optimization_targets: events to optimize the experiment upon
:param string account_id: id of the account the experiment will exist in
:param string environment_id: the id of the environment in which to create the experiment
:param string experiment_type: type of experiment to build ['CONTROL', 'ABN', 'EVO']
:param int population_size: size of the initial experiment
:param int budget: number of participants to test before terminating
:param string precursor: id of the experiment that precedes this one
:param string algorithm_version: version of the algorithm to use for the experiment
:param float estimated_cr: the estimated coversion rate of the experiment
:param bool long_tail_correction: whether to use long tail correction or not
:param int sample_rate: the percentage traffic allocation
:param string target_confidence: the level of confidence needed before progressing
:param string audience_query: dict defining the experiments target audience
:param string content: extra content defining the experiment
:param string content_type: content type formatting being used ['application/json', 'application/yaml']
:return: the created experiment
:rtype: json
"""
experiment = self._requester.post(EXPERIMENTS, json={
'name': name,
'type': experiment_type,
'population_size': population_size,
'environment_id': environment_id,
'metamodel_version': metamodel_version,
'optimization_targets': optimization_targets,
'precursor': precursor,
'algorithm_version': algorithm_version,
'estimated_cr': estimated_cr / 100,
'budget': budget,
'sample_rate': sample_rate / 100,
'long_tail_correction': long_tail_correction,
'target_confidence': target_confidence,
'audience_query': audience_query,
'content': content,
'content_type': content_type
}, account_id=account_id, metamodel_id=metamodel_id)
return experiment
def create_candidate(self, metamodel_id, metamodel_version, genome, account_id, environment_id, experiment_id,
allocation_probability=1.0):
"""Creates a candidate.
:param string metamodel_id: id of the metamodel
:param string metamodel_version: metamodel version to associate with the candidate
:param string account_id: id of the account the candidate will exist in
:param string environment_id: the id of the environment in which to create the candidate
:param string experiment_id: id of the experiment for the candidate to exist in
:param string genome: representation of the candidates genome
:param float allocation_probability: probability that the candidate gets allocated
:return: a candidate
:rtype: json
"""
candidate = self._requester.post(CANDIDATES, json={
'environment_id': environment_id,
'metamodel_version': metamodel_version,
'genome': genome,
'allocation_probability': allocation_probability,
'strategy': 'crafted'
}, account_id=account_id, metamodel_id=metamodel_id, experiment_id=experiment_id)
return candidate
def update_environment(self, environment_id, name, content, content_type, account_id):
"""Updates an environment
:param string environment_id: the environment id
:param string name: name of the environment
:param string content: content to be updated
:param string content_type: content type ['application/json', 'application/yaml']
:param string account_id: id of the account the environment exists in
:return: the updated environment
:rtype: json
"""
environment = self._requester.put(ENVIRONMENTS, environment_id, json={
'id': environment_id,
'name': | |
#
# Copyright (c) SAS Institute Inc.
# SPDX-License-Identifier: Apache-2.0
# this has been significantly modified from the original#
#
# Visit https://aboutcode.org and https://github.com/nexB/univers for support and download.
import unittest
from univers.maven import Version
from univers.maven import VersionRange
from univers.maven import RestrictionParseError
from univers.maven import VersionRangeParseError
from univers.maven import Restriction
class TestRestriction(unittest.TestCase):
def test_everyting_spec(self):
r = Restriction("[,)")
assert r.lower_bound is None
assert r.lower_bound_inclusive
assert r.upper_bound is None
assert not r.upper_bound_inclusive
assert "0.1.0" in r
assert "1.0" in r
assert "2.0" in r
def test_exclusive_lower_bound(self):
r = Restriction("(1.0,2.0]")
assert str(r.lower_bound) == "1.0"
assert not r.lower_bound_inclusive
assert str(r.upper_bound) == "2.0"
assert r.upper_bound_inclusive
assert "0.1" not in r
assert "1.0" not in r
assert "1.1" in r
def test_no_lower_limit(self):
r = Restriction("(,1.0]")
assert r.lower_bound is None
assert not r.lower_bound_inclusive
assert str(r.upper_bound) == "1.0"
assert r.upper_bound_inclusive
assert "0.1" in r
assert "1.0" in r
assert "1.1" not in r
assert "2.0" not in r
def test_inclusive_range(self):
r = Restriction("[1.0]")
assert str(r.lower_bound) == "1.0"
assert r.lower_bound_inclusive
assert str(r.upper_bound) == "1.0"
assert r.upper_bound_inclusive
assert "0.8" not in r
assert "1.0" in r
assert "1.1" not in r
assert "2.0" not in r
r = Restriction("[1.2,1.3]")
assert str(r.lower_bound) == "1.2"
assert r.lower_bound_inclusive
assert str(r.upper_bound) == "1.3"
assert r.upper_bound_inclusive
assert "0.8" not in r
assert "1.1" not in r
assert "1.2" in r
assert "1.2.1" in r
assert "1.3" in r
assert "1.3.1" not in r
assert "2.0" not in r
def test_exclusive_upper_bound(self):
r = Restriction("[1.0,2.0)")
assert str(r.lower_bound) == "1.0"
assert r.lower_bound_inclusive
assert str(r.upper_bound) == "2.0"
assert not r.upper_bound_inclusive
assert "0.8" not in r
assert "1.0" in r
assert "1.9" in r
assert "2.0" not in r
assert "3.0" not in r
r = Restriction("[1.5,)")
assert str(r.lower_bound) == "1.5"
assert r.lower_bound_inclusive
assert r.upper_bound is None
assert not r.upper_bound_inclusive
assert "0.8" not in r
assert "1.4" not in r
assert "1.5" in r
assert "100.3" in r
def test_invalid_restrictions(self):
tests = (
"(1.0)",
"[1.0)",
"(1.0]",
"(1.0,1.0]",
"[1.0,1.0)",
"(1.0,1.0)",
"[1.1,1.0]",
)
for spec in tests:
self.assertRaises(
RestrictionParseError,
Restriction,
spec,
)
def test_compare(self):
r1 = Restriction("(1.0,2.0]")
assert r1 == r1
assert r1 == "(1.0,2.0]"
assert 1 < r1
def test_string_repr(self):
for test in (
"[1.0]",
"[1.0,)",
"[1.0,2.0]",
"[1.0,2.0)",
"(1.0,2.0)",
"[,2.0]",
"[,2.0)",
"(,2.0)",
):
assert str(Restriction(test)) == test
class TestVersion(unittest.TestCase):
"""Tests the Version object"""
def _assert_version_equal(self, v1, v2):
V1 = Version(v1)
V2 = Version(v2)
# test Version equal Version
assert V1 == V2, "%s != %s" % (V1, V2)
assert V2 == V1, "%s != %s" % (V2, V1)
# test str equal Version
assert v1 == V2, "%s != %s" % (v1, V2)
assert v2 == V1, "%s != %s" % (v2, V1)
# test Version equal str
assert V1 == v2, "%s != %s" % (V1, v2)
assert V2 == v1, "%s != %s" % (V2, v1)
def _assert_version_order(self, v1, v2):
V1 = Version(v1)
V2 = Version(v2)
# Version <-> Version order
assert V1 < V2, "%s >= %s" % (V1._parsed, V2._parsed)
assert V2 > V1, "%s >= %s" % (V2._parsed, V1._parsed)
# Version <-> str order
assert V1 < v2, "%s >= %s" % (V1._parsed, v2)
assert V2 > v1, "%s >= %s" % (V2._parsed, v1)
# str <-> Version order
assert v1 < V2, "%s >= %s" % (v1, V2._parsed)
assert v2 > V1, "%s >= %s" % (v2, V1._parsed)
def test_from_string(self):
test_pairs = (
# weird versions
(".1", (0, 1)),
("-1", ((1,),)),
# test some major.minor.tiny parsing
("1", (1,)),
("1.0", (1,)),
("1.0.0", (1,)),
("1.0.0.0", (1,)),
("11", (11,)),
("11.0", (11,)),
("1-1", (1, (1,))),
("1-1-1", (1, (1, (1,)))),
(" 1 ", (1,)),
# test qualifeirs
("1.0-ALPHA", (1, ("alpha",))),
("1-alpha", (1, ("alpha",))),
("1.0ALPHA", (1, ("alpha",))),
("1-alpha", (1, ("alpha",))),
("1.0-A", (1, ("a",))),
("1-a", (1, ("a",))),
("1.0A", (1, ("a",))),
("1a", (1, ("a",))),
("1.0-BETA", (1, ("beta",))),
("1-beta", (1, ("beta",))),
("1.0-B", (1, ("b",))),
("1-b", (1, ("b",))),
("1.0B", (1, ("b",))),
("1b", (1, ("b",))),
("1.0-MILESTONE", (1, ("milestone",))),
("1.0-milestone", (1, ("milestone",))),
("1-M", (1, ("m",))),
("1.0-m", (1, ("m",))),
("1M", (1, ("m",))),
("1m", (1, ("m",))),
("1.0-RC", (1, ("rc",))),
("1-rc", (1, ("rc",))),
("1.0-SNAPSHOT", (1, ("snapshot",))),
("1.0-snapshot", (1, ("snapshot",))),
("1-SP", (1, ("sp",))),
("1.0-sp", (1, ("sp",))),
("1-GA", (1,)),
("1-ga", (1,)),
("1.0-FINAL", (1,)),
("1-final", (1,)),
("1.0-CR", (1, ("rc",))),
("1-cr", (1, ("rc",))),
# test some transition
("1.0-alpha1", (1, ("alpha", (1,)))),
("1.0-alpha2", (1, ("alpha", (2,)))),
("1.0.0alpha1", (1, ("alpha", (1,)))),
("1.0-beta1", (1, ("beta", (1,)))),
("1-beta2", (1, ("beta", (2,)))),
("1.0.0beta1", (1, ("beta", (1,)))),
("1.0-BETA1", (1, ("beta", (1,)))),
("1-BETA2", (1, ("beta", (2,)))),
("1.0.0BETA1", (1, ("beta", (1,)))),
("1.0-milestone1", (1, ("milestone", (1,)))),
("1.0-milestone2", (1, ("milestone", (2,)))),
("1.0.0milestone1", (1, ("milestone", (1,)))),
("1.0-MILESTONE1", (1, ("milestone", (1,)))),
("1.0-milestone2", (1, ("milestone", (2,)))),
("1.0.0MILESTONE1", (1, ("milestone", (1,)))),
("1.0-alpha2snapshot", (1, ("alpha", (2, ("snapshot",))))),
)
for test, expected in test_pairs:
v = Version(test)
assert v._parsed == expected, "Version(%s) == %s, want %s" % (test, v._parsed, expected)
def test_version_qualifiers(self):
version_qualifiers = (
"1-alpha2snapshot",
"1-alpha2",
"1-alpha-123",
"1-beta-2",
"1-beta123",
"1-m2",
"1-m11",
"1-rc",
"1-cr2",
"1-rc123",
"1-SNAPSHOT",
"1",
"1-sp",
"1-sp2",
"1-sp123",
"1-abc",
"1-def",
"1-pom-1",
"1-1-snapshot",
"1-1",
"1-2",
"1-123",
)
for idx, low in enumerate(version_qualifiers[:-1]):
for high in version_qualifiers[idx + 1 :]:
self._assert_version_order(low, high)
def test_version_numbers(self):
version_numbers = (
"2.0",
"2-1",
"2.0.a",
"2.0.0.a",
"2.0.2",
"2.0.123",
"2.1.0",
"2.1-a",
"2.1b",
"2.1-x",
"2.1-1",
"2.1.0.1",
"2.2",
"2.123",
"11.a2",
"11.a11",
"11.b2",
"11.b11",
"11.m2",
"11.m11",
"11",
"11.a",
"11b",
"11c",
"11m",
)
for idx, low in enumerate(version_numbers[:-1]):
for high in version_numbers[idx + 1 :]:
self._assert_version_order(low, high)
unicode_version_numbers = (
# again, but with unicode input
u"2.0",
u"2-1",
u"2.0.a",
u"2.0.0.a",
u"2.0.2",
u"2.0.123",
u"2.1.0",
u"2.1-a",
u"2.1b",
u"2.1-x",
u"2.1-1",
u"2.1.0.1",
u"2.2",
u"2.123",
u"11.a2",
u"11.a11",
u"11.b2",
u"11.b11",
u"11.m2",
u"11.m11",
u"11",
u"11.a",
u"11b",
u"11c",
u"11m",
)
for idx, low in enumerate(unicode_version_numbers[:-1]):
for high in unicode_version_numbers[idx + 1 :]:
self._assert_version_order(low, high)
def test_version_equality(self):
self._assert_version_equal("1", "1")
self._assert_version_equal("1", "1.0")
self._assert_version_equal("1", "1.0.0")
self._assert_version_equal("1.0", "1.0.0")
self._assert_version_equal("1", "1-0")
self._assert_version_equal("1", "1.0-0")
self._assert_version_equal("1.0", "1.0-0")
# no separator between number and character
self._assert_version_equal("1a", "1-a")
self._assert_version_equal("1a", "1.0-a")
self._assert_version_equal("1a", "1.0.0-a")
self._assert_version_equal("1.0a", "1-a")
self._assert_version_equal("1.0.0a", "1-a")
self._assert_version_equal("1x", "1-x")
self._assert_version_equal("1x", "1.0-x")
self._assert_version_equal("1x", "1.0.0-x")
self._assert_version_equal("1.0x", "1-x")
self._assert_version_equal("1.0.0x", "1-x")
# aliases
self._assert_version_equal("1ga", "1")
self._assert_version_equal("1final", "1")
self._assert_version_equal("1cr", "1rc")
# special "aliases" a, b and m for alpha, beta and milestone
self._assert_version_equal("1a1", "1-alpha-1")
self._assert_version_equal("1b2", "1-beta-2")
self._assert_version_equal("1m3", "1-milestone-3")
# case insensitive
self._assert_version_equal("1X", "1x")
self._assert_version_equal("1A", "1a")
self._assert_version_equal("1B", "1b")
self._assert_version_equal("1M", "1m")
self._assert_version_equal("1Ga", "1")
self._assert_version_equal("1GA", "1")
self._assert_version_equal("1Final", "1")
self._assert_version_equal("1FinaL", "1")
self._assert_version_equal("1FINAL", "1")
self._assert_version_equal("1Cr", "1Rc")
self._assert_version_equal("1cR", "1rC")
self._assert_version_equal("1m3", "1Milestone3")
self._assert_version_equal("1m3", "1MileStone3")
self._assert_version_equal("1m3", "1MILESTONE3")
# unicode
self._assert_version_equal(u"1", "1")
self._assert_version_equal(u"1", "1.0")
self._assert_version_equal(u"1", "1.0.0")
self._assert_version_equal(u"1.0", "1.0.0")
self._assert_version_equal(u"1", "1-0")
self._assert_version_equal(u"1", "1.0-0")
self._assert_version_equal(u"1.0", "1.0-0")
self._assert_version_equal("1", u"1")
self._assert_version_equal("1", u"1.0")
self._assert_version_equal("1", u"1.0.0")
self._assert_version_equal("1.0", u"1.0.0")
self._assert_version_equal("1", u"1-0")
self._assert_version_equal("1", u"1.0-0")
self._assert_version_equal("1.0", u"1.0-0")
self._assert_version_equal(u"1", u"1")
self._assert_version_equal(u"1", u"1.0")
self._assert_version_equal(u"1", u"1.0.0")
self._assert_version_equal(u"1.0", u"1.0.0")
self._assert_version_equal(u"1", u"1-0")
self._assert_version_equal(u"1", u"1.0-0")
self._assert_version_equal(u"1.0", u"1.0-0")
def test_version_compare(self):
self._assert_version_order("1", "2")
self._assert_version_order("1.5", "2")
self._assert_version_order("1", "2.5")
self._assert_version_order("1.0", "1.1")
self._assert_version_order("1.1", "1.2")
self._assert_version_order("1.0.0", "1.1")
self._assert_version_order("1.0.1", "1.1")
self._assert_version_order("1.1", "1.2.0")
self._assert_version_order("1.0-alpha-1", "1.0")
self._assert_version_order("1.0-alpha-1", "1.0-alpha-2")
self._assert_version_order("1.0-alpha-1", "1.0-beta-1")
self._assert_version_order("1.0-beta-1", "1.0-SNAPSHOT")
self._assert_version_order("1.0-SNAPSHOT", "1.0")
self._assert_version_order("1.0-alpha-1-SNAPSHOT", "1.0-alpha-1")
self._assert_version_order("1.0", "1.0-1")
self._assert_version_order("1.0-1", "1.0-2")
self._assert_version_order("1.0.0", "1.0-1")
self._assert_version_order("2.0-1", "2.0.1")
self._assert_version_order("2.0.1-klm", "2.0.1-lmn")
self._assert_version_order("2.0.1", "2.0.1-xyz")
self._assert_version_order("2.0.1", "2.0.1-123")
self._assert_version_order("2.0.1-xyz", "2.0.1-123")
# unicode input
self._assert_version_order(u"1", "2")
self._assert_version_order(u"1.5", "2")
self._assert_version_order(u"1", "2.5")
self._assert_version_order(u"1.0", "1.1")
self._assert_version_order(u"1.1", "1.2")
self._assert_version_order(u"1.0.0", "1.1")
self._assert_version_order(u"1.0.1", "1.1")
self._assert_version_order(u"1.1", "1.2.0")
self._assert_version_order(u"1.0-alpha-1", "1.0")
self._assert_version_order(u"1.0-alpha-1", "1.0-alpha-2")
self._assert_version_order(u"1.0-alpha-1", "1.0-beta-1")
self._assert_version_order(u"1.0-beta-1", "1.0-SNAPSHOT")
self._assert_version_order(u"1.0-SNAPSHOT", "1.0")
self._assert_version_order(u"1.0-alpha-1-SNAPSHOT", "1.0-alpha-1")
self._assert_version_order(u"1.0", "1.0-1")
self._assert_version_order(u"1.0-1", "1.0-2")
self._assert_version_order(u"1.0.0", "1.0-1")
self._assert_version_order(u"2.0-1", "2.0.1")
self._assert_version_order(u"2.0.1-klm", "2.0.1-lmn")
self._assert_version_order(u"2.0.1", "2.0.1-xyz")
self._assert_version_order(u"2.0.1", "2.0.1-123")
self._assert_version_order(u"2.0.1-xyz", "2.0.1-123")
self._assert_version_order("1", u"2")
self._assert_version_order("1.5", u"2")
self._assert_version_order("1", u"2.5")
self._assert_version_order("1.0", u"1.1")
self._assert_version_order("1.1", u"1.2")
self._assert_version_order("1.0.0", u"1.1")
self._assert_version_order("1.0.1", u"1.1")
self._assert_version_order("1.1", u"1.2.0")
self._assert_version_order("1.0-alpha-1", u"1.0")
self._assert_version_order("1.0-alpha-1", u"1.0-alpha-2")
self._assert_version_order("1.0-alpha-1", u"1.0-beta-1")
self._assert_version_order("1.0-beta-1", u"1.0-SNAPSHOT")
self._assert_version_order("1.0-SNAPSHOT", u"1.0")
self._assert_version_order("1.0-alpha-1-SNAPSHOT", u"1.0-alpha-1")
self._assert_version_order("1.0", u"1.0-1")
self._assert_version_order("1.0-1", u"1.0-2")
self._assert_version_order("1.0.0", u"1.0-1")
self._assert_version_order("2.0-1", u"2.0.1")
self._assert_version_order("2.0.1-klm", u"2.0.1-lmn")
self._assert_version_order("2.0.1", u"2.0.1-xyz")
self._assert_version_order("2.0.1", u"2.0.1-123")
self._assert_version_order("2.0.1-xyz", u"2.0.1-123")
self._assert_version_order(u"1", u"2")
self._assert_version_order(u"1.5", u"2")
self._assert_version_order(u"1", u"2.5")
self._assert_version_order(u"1.0", u"1.1")
self._assert_version_order(u"1.1", u"1.2")
self._assert_version_order(u"1.0.0", u"1.1")
self._assert_version_order(u"1.0.1", u"1.1")
self._assert_version_order(u"1.1", u"1.2.0")
self._assert_version_order(u"1.0-alpha-1", u"1.0")
self._assert_version_order(u"1.0-alpha-1", u"1.0-alpha-2")
self._assert_version_order(u"1.0-alpha-1", u"1.0-beta-1")
self._assert_version_order(u"1.0-beta-1", u"1.0-SNAPSHOT")
self._assert_version_order(u"1.0-SNAPSHOT", u"1.0")
self._assert_version_order(u"1.0-alpha-1-SNAPSHOT", u"1.0-alpha-1")
self._assert_version_order(u"1.0", u"1.0-1")
self._assert_version_order(u"1.0-1", u"1.0-2")
self._assert_version_order(u"1.0.0", u"1.0-1")
self._assert_version_order(u"2.0-1", u"2.0.1")
self._assert_version_order(u"2.0.1-klm", u"2.0.1-lmn")
self._assert_version_order(u"2.0.1", u"2.0.1-xyz")
self._assert_version_order(u"2.0.1", u"2.0.1-123")
self._assert_version_order(u"2.0.1-xyz", u"2.0.1-123")
def test_compare(self):
assert 1 < Version("1.0")
assert VersionRange("1.0") == Version("1.0")
def test_compare_errors(self):
v = Version("1.0")
self.assertRaises(RuntimeError, v._compare, v, 1.0)
self.assertRaises(RuntimeError, v._int_compare, v, 1.0)
self.assertRaises(RuntimeError, v._list_compare, v, 1.0)
self.assertRaises(RuntimeError, v._string_compare, v, 1.0)
class TestVersionRange(unittest.TestCase):
def test_no_lower_limit(self):
vr = VersionRange("(,1.0]")
assert len(vr.restrictions) == 1
assert vr.version is None
def test_single_spec(self):
vr = VersionRange("1.0")
assert len(vr.restrictions) == 1
assert str(vr.version) == "1.0"
def test_inclusive_range(self):
vr = | |
<filename>kaloom_kvs_agent/kvs_net.py
# Copyright 2019 Kaloom, Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import binascii
import os
import socket,struct,netaddr
from oslo_log import log as logging
from neutron_lib import constants as n_const
from neutron.agent.l3 import namespaces
import grpc
# import the generated classes
from kaloom_kvs_agent.stub import service_pb2, service_pb2_grpc
from kaloom_kvs_agent.stub import kvs_msg_pb2, ports_pb2, error_pb2
from kaloom_kvs_agent.common \
import constants as a_const
LOG = logging.getLogger(__name__)
def listPorts(socket_dir,filePrefix):
# open a gRPC channel
channel = grpc.insecure_channel(a_const.KVS_SERVER)
# create a stub (client)
stub = service_pb2_grpc.kvsStub(channel)
# create a valid request message: get ports
req = kvs_msg_pb2.GetPortsRequest()
# make the call
response = stub.GetPorts(req)
#close channel
channel.close()
#print(response)
if response.error.Code != a_const.noError:
LOG.error("Error on grpc GetPorts call: Code %(code)s, Message %(msg)s",{'code':response.error.Code, 'msg':response.error.Errormsg})
else:
devices = {}
pathPrefix=os.path.join(socket_dir,filePrefix)
for Port in response.Ports:
if Port.Type.WhichOneof("PortConfig") == "vHostPortConfig" and \
Port.Type.vHostPortConfig.Path.startswith(pathPrefix):
#print Port.AdminState
#print Port.PortID
PortName=Port.Type.vHostPortConfig.Path.replace(pathPrefix,'')
devices[PortName]=Port.PortID
elif Port.Type.WhichOneof("PortConfig") == "vDevPortConfig" and \
Port.Type.vDevPortConfig.IfaceName.startswith((n_const.TAP_DEVICE_PREFIX, namespaces.INTERNAL_DEV_PREFIX, namespaces.EXTERNAL_DEV_PREFIX )):
PortName=Port.Type.vDevPortConfig.IfaceName
devices[PortName]=Port.PortID
return devices
def getPort(kvs_device_name, socket_dir, file_prefix):
path_prefix = os.path.join(socket_dir, file_prefix)
port_name = kvs_device_name.replace(path_prefix , '')
ports = listPorts(socket_dir, file_prefix)
port_index = None
if port_name in ports.keys():
port_index = ports[port_name]
return port_index
def getPort_partialmatch(part_device_name, socket_dir, file_prefix):
ports = listPorts(socket_dir, file_prefix)
port_index = None
prefixes=[namespaces.EXTERNAL_DEV_PREFIX, namespaces.INTERNAL_DEV_PREFIX]
for prefix in prefixes:
port_name = prefix + part_device_name
if port_name in ports.keys():
port_index = ports[port_name]
return port_name, port_index
def get_interface_ifindex(socket_dir, socket_file):
# open a gRPC channel
channel = grpc.insecure_channel(a_const.KVS_SERVER)
# create a stub (client)
stub = service_pb2_grpc.kvsStub(channel)
# create a valid request message: get port ID
req=kvs_msg_pb2.GetPortIDRequest()
req.Type.vHostPortConfig.Path = os.path.join(socket_dir,socket_file)
# make the call
response = stub.GetPortID(req)
#close
channel.close()
#print(response)
if response.error.Code != a_const.noError:
LOG.error("Error on grpc GetPorts call: Code %(code)s, Message %(msg)s",{'code':response.error.Code, 'msg':response.error.Errormsg})
else:
return response.PortID
def add_mac_entry(knid, mac, port_index, vlan=0):
LOG.info("Adding MAC entry for VHOST %s", mac)
macbytes = binascii.unhexlify(mac.replace(b':', b''))
# open a gRPC channel
channel = grpc.insecure_channel(a_const.KVS_SERVER)
# create a stub (client)
stub = service_pb2_grpc.kvsStub(channel)
req=kvs_msg_pb2.AddStaticLocalIfaceMacEntryRequest()
req.Knid = knid
req.MACAddress = macbytes
req.PortID = port_index
req.VlanID = vlan
# make the call
response = stub.AddStaticLocalIfaceMacEntry(req)
#close
channel.close()
if response.error.Code == a_const.noError:
LOG.info("AddStaticLocalIfaceMacEntry successful: KNID: %(knid)s MAC: %(mac)s port: %(port_index)s VLAN:%(vlan)s ",
{'knid': knid, 'mac': mac, 'port_index': port_index, 'vlan':vlan})
return True
else:
LOG.error("gRPC Error during add_mac_entry: Error Code: %(code)s Errormsg: %(msg)s ",
{'code': response.error.Code, 'msg': response.error.Errormsg})
return False
def delete_mac_entry(knid, mac):
LOG.info("Deleting MAC entry for VHOST %s", mac)
macbytes = binascii.unhexlify(mac.replace(b':', b''))
# open a gRPC channel
channel = grpc.insecure_channel(a_const.KVS_SERVER)
# create a stub (client)
stub = service_pb2_grpc.kvsStub(channel)
req=kvs_msg_pb2.DeleteStaticMacEntryRequest()
req.Knid = knid
req.MACAddress = macbytes
# make the call
response = stub.DeleteStaticMacEntry(req)
#close
channel.close()
if response.error.Code == a_const.noError:
LOG.info("DeleteStaticMacEntryRequest successful: KNID: %(knid)s MAC: %(mac)s",
{'knid': knid, 'mac': mac})
return True
else:
LOG.error("gRPC Error during delete_mac_entry: Error Code: %(code)s Errormsg: %(msg)s ",
{'code': response.error.Code, 'msg': response.error.Errormsg})
return False
def _attach_interface(kvs_device_name, port_index, knid, vlan=0):
# open a gRPC channel
channel = grpc.insecure_channel(a_const.KVS_SERVER)
# create a stub (client)
stub = service_pb2_grpc.kvsStub(channel)
# create a valid request message: AttachPortToL2NetworkRequest
req = kvs_msg_pb2.AttachPortToL2NetworkRequest()
req.PortID = port_index
req.VlanID = vlan
req.Knid = knid
# make the call
response = stub.AttachPortToL2Network(req)
# close
channel.close()
#print(response)
if response.error.Code == a_const.noError:
LOG.info("AttachPortToL2Network successful: kvs_device_name %(device)s, knid %(knid)s, vlan %(vlan)s",{'device':kvs_device_name, 'knid':knid, 'vlan':vlan})
return True, port_index
elif response.error.Code == a_const.AlreadyExists: #already attached to network
LOG.info("AttachPortToL2Network call: Code %(code)s, Message %(msg)s",{'code':response.error.Code, 'msg':response.error.Errormsg})
return True, port_index # say successful
else:
LOG.error("Error on grpc AttachPortToL2Network call: Code %(code)s, Message %(msg)s",{'code':response.error.Code, 'msg':response.error.Errormsg})
return False, port_index
def attach_interface(network_id, network_type,
physical_network, knid,
kvs_device_name, device_owner, mtu, socket_dir,filePrefix):
LOG.info("inside attach_interface")
ports = listPorts(socket_dir,filePrefix)
pathPrefix = os.path.join(socket_dir,filePrefix)
#if kvs_device_name.startswith((n_const.TAP_DEVICE_PREFIX, namespaces.INTERNAL_DEV_PREFIX, namespaces.EXTERNAL_DEV_PREFIX )):
# PortName=kvs_device_name
#else:
# PortName=kvs_device_name.replace(pathPrefix,'')
PortName = kvs_device_name.replace(pathPrefix,'')
port_index = None
if PortName in ports.keys():
port_index = ports[PortName]
if port_index is None:
LOG.error("port_index could not found for vhost/vdev %s",kvs_device_name)
return False, port_index
else:
return _attach_interface(kvs_device_name, port_index, knid)
def configurePort(vhost_path, admin_state_up):
# open a gRPC channel
channel = grpc.insecure_channel(a_const.KVS_SERVER)
# create a stub (client)
stub = service_pb2_grpc.kvsStub(channel)
# create a valid request message: ConfigurePortRequest
req=kvs_msg_pb2.ConfigurePortRequest()
#req.PortID= ##TODO
req.Conf.MACAddress="00:00:00:00:00:00"
req.Conf.MTU=0
if admin_state_up:
req.Conf.AdminState="AdminStateUp"
else:
req.Conf.AdminState="AdminStateDown"
# make the call
response = stub.ConfigurePort(req)
#close
channel.close()
#print(response)
if response.error.Code != a_const.noError:
LOG.error("Error on grpc ConfigurePortRequest call: Code %(code)s, Message %(msg)s",{'code':response.error.Code, 'msg':response.error.Errormsg})
def create_kvs_vdev_port(device_name, mac, mtu=None):
#create vif
LOG.info("creating kvs vdev port")
# open a gRPC channel
channel = grpc.insecure_channel(a_const.KVS_SERVER)
# create a stub (client)
stub = service_pb2_grpc.kvsStub(channel)
req=kvs_msg_pb2.AddPortRequest()
req.Type.vDevPortConfig.IfaceName = device_name
if mac:
macbytes = binascii.unhexlify(mac.replace(b':', b''))
LOG.debug("%s", mac)
req.Type.vDevPortConfig.MACAddress = macbytes
if mtu:
req.Type.vDevPortConfig.MTU = mtu
# make the call
response=stub.AddPort(req)
#close channel
channel.close()
if response.error.Code == a_const.noError:
return True
else:
LOG.error("gRPC Error during create_kvs_vdev_port: Error Code: %(code)s Errormsg: %(msg)s ",
{'code': response.error.Code, 'msg': response.error.Errormsg})
return False
def _detach_interface(kvs_device_name, port_index, vlan=0):
# open a gRPC channel
channel = grpc.insecure_channel(a_const.KVS_SERVER)
# create a stub (client)
stub = service_pb2_grpc.kvsStub(channel)
#detach from network, if already attached.
req_detach = kvs_msg_pb2.DetachPortFromL2NetworkRequest()
req_detach.PortID = port_index
req_detach.VlanID = vlan
#make the call
response_detach = stub.DetachPortFromL2Network(req_detach)
if response_detach.error.Code != a_const.noError and response_detach.error.Code != a_const.PortNotAttached:
LOG.error("gRPC Error during detach_network on vhost/vdev %(kvs_device_name)s: Error Code: %(code)s"
" Errormsg: %(msg)s ", {'kvs_device_name': kvs_device_name, 'code': response_detach.error.Code,
'msg': response_detach.error.Errormsg})
#close channel
channel.close()
def delete_kvs_port(kvs_device_name, socket_dir=None,file_prefix=None):
LOG.info("deleting kvs vdev port")
if socket_dir is None:
socket_dir = os.path.dirname(kvs_device_name)
if file_prefix is None:
file_prefix= a_const.KVS_VHOSTUSER_PREFIX
ports = listPorts(socket_dir, file_prefix)
pathPrefix = os.path.join(socket_dir, file_prefix)
PortName = kvs_device_name.replace(pathPrefix, '')
port_index = None
if PortName in ports.keys():
port_index = ports[PortName]
if port_index is None:
LOG.error("port_index could not found for vhost/vdev %s", kvs_device_name)
return False
# detach interface
_detach_interface(kvs_device_name, port_index)
# open a gRPC channel
channel = grpc.insecure_channel(a_const.KVS_SERVER)
# create a stub (client)
stub = service_pb2_grpc.kvsStub(channel)
# delete port
req=kvs_msg_pb2.DeletePortRequest()
req.PortID = port_index
# make the call
response=stub.DeletePort(req)
#close channel
channel.close()
if response.error.Code == a_const.noError:
return True
else:
LOG.error("gRPC Error during delete vhost/vdev %(kvs_device_name)s: Error Code: %(code)s Errormsg: %(msg)s ",
{'kvs_device_name': kvs_device_name, 'code': response.error.Code, 'msg': response.error.Errormsg})
return False
def add_anti_spoofing_rule(port_index, mac, ip, vlan = 0):
# open a gRPC channel
channel = grpc.insecure_channel(a_const.KVS_SERVER)
# create a stub (client)
stub = service_pb2_grpc.kvsStub(channel)
# create a valid request message: AddAntiSpoofingRuleRequest
req=kvs_msg_pb2.AddAntiSpoofingRuleRequest()
req.PortID = port_index
req.VlanID = vlan
req.VlanValid = True #VlanID is valid
if mac:
LOG.debug("%s", mac)
macbytes = binascii.unhexlify(mac.replace(b':', b''))
req.Rule.MACAddress = macbytes
if ip:
if netaddr.IPNetwork(ip).version == 4 :
ipbytes = socket.inet_aton(ip)
elif netaddr.IPNetwork(ip).version == 6 :
ipbytes = socket.inet_pton(socket.AF_INET6, ip)
req.Rule.IP = ipbytes
# make the call
response = stub.AddAntiSpoofingRule(req)
#close
channel.close()
#print(response)
if response.error.Code != a_const.noError:
LOG.error("Error on grpc AddAntiSpoofingRuleRequest call: Code %(code)s, Message %(msg)s",
{'code':response.error.Code, 'msg':response.error.Errormsg})
else:
LOG.info("add_anti_spoofing_rule on port_index %s vlan:%s for mac %s and ip %s ",
port_index, vlan, mac, ip)
def delete_anti_spoofing_rule(port_index, mac, ip, vlan = 0):
# open a gRPC channel
channel = grpc.insecure_channel(a_const.KVS_SERVER)
# create a stub (client)
stub = service_pb2_grpc.kvsStub(channel)
# create a valid request message: DeleteAntiSpoofingRuleRequest
req = kvs_msg_pb2.DeleteAntiSpoofingRuleRequest()
req.PortID = port_index
req.VlanID = vlan
req.VlanValid = True #VlanID is valid
if mac:
LOG.debug("%s", mac)
macbytes = binascii.unhexlify(mac.replace(b':', b''))
req.Rule.MACAddress = macbytes
if ip:
if netaddr.IPNetwork(ip).version == 4 :
ipbytes = socket.inet_aton(ip)
elif netaddr.IPNetwork(ip).version == 6 :
ipbytes = socket.inet_pton(socket.AF_INET6, ip)
LOG.debug("%s", ip)
req.Rule.IP = ipbytes
# make the call
response = stub.DeleteAntiSpoofingRule(req)
#close
channel.close()
#print(response)
if response.error.Code != a_const.noError:
LOG.error("Error on grpc DeleteAntiSpoofingRule call: Code %(code)s, Message %(msg)s",
{'code':response.error.Code, 'msg':response.error.Errormsg})
else:
LOG.info("delete_anti_spoofing_rule on port_index %s vlan:%s for mac %s and ip %s ",
port_index, vlan, mac, ip)
def _get_string_pair(bytes_ip, bytes_mac):
len_ip = len(bytes_ip)
if len_ip == 4: ##4 bytes of IPv4
ip = socket.inet_ntoa(bytes_ip)
elif len_ip == 16: ##16 bytes of IPv6
ip = socket.inet_ntop(socket.AF_INET6, bytes_ip)
mac = "%02x:%02x:%02x:%02x:%02x:%02x" % struct.unpack("BBBBBB", bytes_mac)
pair = {"ip": ip , "mac": mac}
return pair
def list_anti_spoofing_rules(port_index, vlan = 0):
# open a gRPC channel
channel = grpc.insecure_channel(a_const.KVS_SERVER)
| |
from collections import defaultdict
from itertools import count
from operator import itemgetter
from pathlib import Path
from typing import Dict, Optional
from typing import List, Tuple, Union
import htbuilder
import streamlit as st
from htbuilder import span, div, script, style, link, styles, HtmlElement, br
from htbuilder.units import px
from spacy.tokens import Doc
palette = [
"#66c2a5",
"#fc8d62",
"#8da0cb",
"#e78ac3",
"#a6d854",
"#ffd92f",
"#e5c494",
"#b3b3b3",
]
inactive_color = "#BBB"
def local_stylesheet(path):
with open(path) as f:
css = f.read()
return style()(
css
)
def remote_stylesheet(url):
return link(
href=url
)
def local_script(path):
with open(path) as f:
code = f.read()
return script()(
code
)
def remote_script(url):
return script(
src=url
)
def get_color(sent_idx):
return palette[sent_idx % len(palette)]
def hex_to_rgb(hex):
hex = hex.replace("#", '')
return tuple(int(hex[i:i + 2], 16) for i in (0, 2, 4))
def color_with_opacity(hex_color, opacity):
rgb = hex_to_rgb(hex_color)
return f"rgba({rgb[0]},{rgb[1]},{rgb[2]},{opacity:.2f})"
class Component:
def show(self, width=None, height=None, scrolling=True, **kwargs):
out = div(style=styles(
**kwargs
))(self.html())
html = str(out)
st.components.v1.html(html, width=width, height=height, scrolling=scrolling)
def html(self):
raise NotImplemented
class MainView(Component):
def __init__(
self,
document: Doc,
summaries: List[Doc],
semantic_alignments: Optional[List[Dict]],
lexical_alignments: Optional[List[Dict]],
layout: str,
scroll: bool,
gray_out_stopwords: bool
):
self.document = document
self.summaries = summaries
self.semantic_alignments = semantic_alignments
self.lexical_alignments = lexical_alignments
self.layout = layout
self.scroll = scroll
self.gray_out_stopwords = gray_out_stopwords
def html(self):
# Add document elements
if self.document._.name == 'Document':
document_name = 'Source Document'
else:
document_name = self.document._.name + ' summary'
doc_header = div(
id_="document-header"
)(
document_name
)
doc_elements = []
# Add document content, which comprises multiple elements, one for each summary. Only the elment corresponding to
# selected summary will be visible.
mu = MultiUnderline()
for summary_idx, summary in enumerate(self.summaries):
token_idx_to_sent_idx = {}
for sent_idx, sent in enumerate(summary.sents):
for token in sent:
token_idx_to_sent_idx[token.i] = sent_idx
is_selected_summary = (summary_idx == 0) # By default, first summary is selected
if self.semantic_alignments is not None:
doc_token_idx_to_matches = defaultdict(list)
semantic_alignment = self.semantic_alignments[summary_idx]
for summary_token_idx, matches in semantic_alignment.items():
for doc_token_idx, sim in matches:
doc_token_idx_to_matches[doc_token_idx].append((summary_token_idx, sim))
else:
doc_token_idx_to_matches = {}
token_elements = []
for doc_token_idx, doc_token in enumerate(self.document):
if doc_token.is_stop or doc_token.is_punct:
classes = ["stopword"]
if self.gray_out_stopwords:
classes.append("grayed-out")
el = span(
_class=" ".join(classes)
)(
doc_token.text
)
else:
matches = doc_token_idx_to_matches.get(doc_token_idx)
if matches:
summary_token_idx, sim = max(matches, key=itemgetter(1))
sent_idx = token_idx_to_sent_idx[summary_token_idx]
color_primary = get_color(sent_idx)
highlight_color_primary = color_with_opacity(color_primary, sim)
props = {
'data-highlight-id': str(doc_token_idx),
'data-primary-color': highlight_color_primary
}
match_classes = []
for summary_token_idx, sim in matches:
sent_idx = token_idx_to_sent_idx[summary_token_idx]
match_classes.append(f"summary-highlight-{summary_idx}-{summary_token_idx}")
color = color_with_opacity(get_color(sent_idx), sim)
props[f"data-color-{summary_idx}-{summary_token_idx}"] = color
props["data-match-classes"] = " ".join(match_classes)
el = self._highlight(
doc_token.text,
highlight_color_primary,
color_primary,
match_classes + ["annotation-hidden"],
**props
)
else:
el = doc_token.text
token_elements.append(el)
spans = []
if self.lexical_alignments is not None:
lexical_alignment = self.lexical_alignments[summary_idx]
for summary_span, doc_spans in lexical_alignment.items():
summary_span_start, summary_span_end = summary_span
span_id = f"{summary_idx}-{summary_span_start}-{summary_span_end}"
sent_idx = token_idx_to_sent_idx[summary_span_start]
for doc_span_start, doc_span_end in doc_spans:
spans.append((
doc_span_start,
doc_span_end,
sent_idx,
get_color(sent_idx),
span_id
))
token_elements = mu.markup(token_elements, spans)
classes = ["main-doc", "bordered"]
if self.scroll:
classes.append("scroll")
main_doc = div(
_class=" ".join(classes)
)(
token_elements
),
classes = ["doc"]
if is_selected_summary:
classes.append("display")
else:
classes.append("nodisplay")
doc_elements.append(
div(
**{
"class": " ".join(classes),
"data-index": summary_idx
}
)(
main_doc,
div(_class="proxy-doc"),
div(_class="proxy-scroll")
)
)
summary_title = "Summary"
summary_header = div(
id_="summary-header"
)(
summary_title,
div(id="summary-header-gap"),
)
summary_items = []
for summary_idx, summary in enumerate(self.summaries):
token_idx_to_sent_idx = {}
for sent_idx, sent in enumerate(summary.sents):
for token in sent:
token_idx_to_sent_idx[token.i] = sent_idx
spans = []
matches_ngram = [False] * len(list(summary))
if self.lexical_alignments is not None:
lexical_alignment = self.lexical_alignments[summary_idx]
for summary_span in lexical_alignment.keys():
start, end = summary_span
matches_ngram[slice(start, end)] = [True] * (end - start)
span_id = f"{summary_idx}-{start}-{end}"
sent_idx = token_idx_to_sent_idx[start]
spans.append((
start,
end,
sent_idx,
get_color(sent_idx),
span_id
))
if self.semantic_alignments is not None:
semantic_alignment = self.semantic_alignments[summary_idx]
else:
semantic_alignment = {}
token_elements = []
for token_idx, token in enumerate(summary):
if token.is_stop or token.is_punct:
classes = ["stopword"]
if self.gray_out_stopwords:
classes.append("grayed-out")
el = span(
_class=" ".join(classes)
)(
token.text
)
else:
classes = []
if token.ent_iob_ in ('I', 'B'):
classes.append("entity")
if matches_ngram[token_idx]:
classes.append("matches-ngram")
matches = semantic_alignment.get(token_idx)
if matches:
top_match = max(matches, key=itemgetter(1))
top_sim = max(top_match[1], 0)
top_doc_token_idx = top_match[0]
props = {
"data-highlight-id": f"{summary_idx}-{token_idx}",
"data-top-doc-highlight-id": str(top_doc_token_idx),
"data-top-doc-sim": f"{top_sim:.2f}",
}
classes.extend([
"annotation-hidden",
f"summary-highlight-{summary_idx}-{token_idx}"
])
sent_idx = token_idx_to_sent_idx[token_idx]
el = self._highlight(
token.text,
color_with_opacity(get_color(sent_idx), top_sim),
color_with_opacity(get_color(sent_idx), 1),
classes,
**props
)
else:
if classes:
el = span(_class=" ".join(classes))(token.text)
else:
el = token.text
token_elements.append(el)
token_elements = mu.markup(token_elements, spans)
classes = ["summary-item"]
if summary_idx == 0: # Default is for first summary to be selected
classes.append("selected")
summary_items.append(
div(
**{"class": ' '.join(classes), "data-index": summary_idx}
)(
div(_class="name")(summary._.name),
div(_class="content")(token_elements)
)
)
classes = ["summary-list", "bordered"]
if self.scroll:
classes.append("scroll")
if self.lexical_alignments is not None:
classes.append("has-lexical-alignment")
if self.semantic_alignments is not None:
classes.append("has-semantic-alignment")
summary_list = div(
_class=" ".join(classes)
)(
summary_items
)
annotation_key = \
"""
<ul class="annotation-key">
<li class="annotation-key-label">Annotations:</li>
<li id="option-lexical" class="option selected">
<span class="annotation-key-ngram">N-Gram overlap</span>
</li>
<li id="option-semantic" class="option selected">
<span class="annotation-key-semantic">Semantic overlap</span>
</li>
<li id="option-novel" class="option selected">
<span class="annotation-key-novel">Novel words</span>
</li>
<li id="option-entity" class="option selected">
<span class="annotation-key-entity">Novel entities</span>
</li>
</ul>
"""
body = div(
annotation_key,
div(
_class=f"vis-container {self.layout}-layout"
)(
div(
_class="doc-container"
)(
doc_header,
*doc_elements
),
div(
_class="summary-container"
)(
summary_header,
summary_list
)
),
)
return [
"""<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.0.1/dist/css/bootstrap.min.css" rel="stylesheet" integrity="<KEY>" crossorigin="anonymous">""",
local_stylesheet(Path(__file__).parent / "resources" / "summvis.css"),
"""<link rel="preconnect" href="https://fonts.gstatic.com">
<link href="https://fonts.googleapis.com/css2?family=Roboto:wght@400;500&display=swap" rel="stylesheet">""",
body,
"""<script
src="https://code.jquery.com/jquery-3.5.1.min.js"
integrity="sha256-9/aliU8dGd2tb6OSsuzixeV4y/faTqgFtohetphbbj0="
crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/bootstrap@4.6.0/dist/js/bootstrap.bundle.min.js"
integrity="<KEY>"
crossorigin="anonymous"></script>""",
local_script(Path(__file__).parent / "resources" / "jquery.color-2.1.2.min.js"),
local_script(Path(__file__).parent / "resources" / "summvis.js"),
"""<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.0.1/dist/js/bootstrap.bundle.min.js" integrity="<KEY>" crossorigin="anonymous"></script>"""
]
def _highlight(
self,
token: Union[str, HtmlElement],
background_color,
dotted_underline_color,
classes: List[str],
**props
):
return span(
_class=" ".join(classes + ["highlight"]),
style=styles(
background_color=background_color,
border_bottom=f"4px dotted {dotted_underline_color}",
),
**props
)(token)
SPACE = " "
class MultiUnderline:
def __init__(
self,
underline_thickness=3,
underline_spacing=1
):
self.underline_thickness = underline_thickness
self.underline_spacing = underline_spacing
def markup(
self,
tokens: List[Union[str, HtmlElement]],
spans: List[Tuple[int, int, int, str, str]]
):
"""Style text with multiple layers of colored underlines.
Args:
tokens: list of tokens, either string or html element
spans: list of (start_pos, end_pos, rank, color, id) tuples defined as:
start_pos: start position of underline span
end_pos: end position of underline span
rank: rank for stacking order of underlines, all else being equal
color: color of underline
id: id of underline (encoded as a class label in resulting html element)
Returns:
List of HTML elements
"""
# Map from span start position to span
start_to_spans = defaultdict(list)
for span in spans:
start = span[0]
start_to_spans[start].append(span)
# Map from each underline slot position to list of active spans
slot_to_spans = {}
# Collection of html elements
elements = []
first_token_in_line = True
for pos, token in enumerate(tokens):
# Remove spans that are no longer active (end < pos)
slot_to_spans = defaultdict(
list,
{
slot: [span for span in spans if span[1] > pos] # span[1] contains end of spans
for slot, spans in slot_to_spans.items() if spans
}
)
# Add underlines to space between tokens for any continuing underlines
if first_token_in_line:
first_token_in_line = False
else:
elements.append(self._get_underline_element(SPACE, slot_to_spans))
# Find slot for any new spans
new_spans = start_to_spans.pop(pos, None)
if new_spans:
new_spans.sort(
key=lambda span: (-(span[1] - span[0]), span[2])) # Sort by span length (reversed), rank
for new_span in new_spans:
# Find an existing slot or add a new one
for slot, spans in sorted(slot_to_spans.items(), key=itemgetter(0)): # Sort by slot index
if spans:
containing_span = spans[
0] # The first span in the slot strictly contains all other spans
containing_start, containing_end = containing_span[0:2]
containing_color = containing_span[3]
start, end = new_span[0:2]
color = new_span[3]
# If the new span (1) is strictly contained in this span, or (2) exactly matches this span
# and is the same color, then add span to this slot
if end <= containing_end and (
(start > containing_start or end < containing_end) or
(start == containing_start and end == containing_end and color == containing_color)
):
spans.append(new_span)
break
else:
# Find a new slot index to add the span
for slot_index in count():
spans = slot_to_spans[slot_index]
if not spans: # If slot is free, take it
spans.append(new_span)
break
if token in ("\n", "\r", "\r\n"):
elements.append(br())
first_token_in_line = True
else:
# Add underlines to token for all active spans
elements.append(self._get_underline_element(token, slot_to_spans))
return elements
def _get_underline_element(self, token, slot_to_spans):
if not slot_to_spans:
return token
max_slot_index = max(slot_to_spans.keys())
element = token
for slot_index in range(max_slot_index + 1):
spans = | |
with the specified name exists in the database.
:param name: name of the mission
:return: True if such mission exists
"""
record = self._run_query("MATCH (m:Mission) \
WHERE m.name = $name \
RETURN m.name",
**{'name': name})
return record.single() is not None
def delete_mission(self, name):
"""
Delete mission with specified name.
:param name: name of the mission
:return: True if such mission exists
"""
return self._run_query("MATCH (m:Mission) \
WHERE m.name = $name \
DETACH DELETE m",
**{'name': name})
def get_mission_configurations(self, name):
"""
Returns representation of all configurations related to the mission.
:param name: name of mission
:return:
"""
return self._run_query(
'MATCH (m:Mission {name: $name})-[:HAS]->(c:Configuration) RETURN {config_id: c.config_id, \
confidentiality: c.confidentiality, integrity: c.integrity, availability: c.availability, \
time: c.time} AS configuration', **{'name': name}).data()
def get_mission_hosts(self, name):
"""
Returns representation of all hosts related to the mission.
:param name: name of mission
:return:
"""
mission_data = self._run_query('MATCH (m:Mission {name: $name}) RETURN m.structure AS structure',
**{'name': name}).single().data()['structure']
mission_json = json.loads(mission_data)
hosts = mission_json['nodes']['hosts']
return hosts
def get_configuration(self, mission, config_id):
"""
Returns representation of the specified configuration.
:param mission: name of the mission
:param config_id: ID of the configuration for the specificied mission
:return:
"""
return self._run_query(
'MATCH (:Mission {name: $name})-[:HAS]->(c:Configuration {config_id: $config_id})-[r:CONTAINS]->(h:Host)<-[:IS_A]-(:Node)-[:HAS_ASSIGNED]->(ip:IP) \
RETURN {confidentiality: r.confidentiality, integrity: r.integrity, availability: r.availability, \
ip_address: ip.address, hostname: h.hostname} as host', **{'name': mission, 'config_id': config_id}).data()
def get_missions_hosts_evaluation(self):
"""
Returns all hosts which belong to the misions together with their worst-case evaluation in one of the missions.
Evaluation is a tuple consisting of probability for confidentiality, integrity and availability.
:return:
"""
return self._run_query(
"MATCH (n:Mission)-[:HAS]->(:Configuration)-[c:CONTAINS]->(h:Host)<-[:IS_A]-(:Node)-[:HAS_ASSIGNED]->(ip:IP) \
RETURN {conf: max(c.confidentiality), integ: max(c.integrity), avail: max(c.availability), \
hostname: h.hostname, ip_address: ip.address} as host").data()
"______________________________________LIGHT BLUE LAYER_____________________________________________________"
def get_all_ips(self, limit):
"""
Get all IPs from database up to limit {limit}.
:param limit: self explanatory
:return: IPs
"""
return self._run_query("MATCH (ip:IP) RETURN ip LIMIT $limit", **{'limit': limit})
def ip_exists(self, ip):
"""
Checks whether ip with specified address exists in the database.
:param ip: ip address
:return: True if such ip exists
"""
record = self._run_query("MATCH (ip:IP) WHERE ip.address = $ip RETURN ip.address", **{'ip': ip})
return record.single() is not None
def get_ip_details(self, ip):
"""
Get details about given IP from the database (domain_name, subnet, org_unit, contact).
:param ip: checked ip
:return: Details
"""
return self._run_query("MATCH (ip:IP) "
"WHERE ip.address = $ip "
"WITH ip "
"MATCH (ip)-[:PART_OF]->(subnet:Subnet)-[:HAS]->(contact:Contact) "
"OPTIONAL MATCH (ip)-[:RESOLVES_TO]->(domain:DomainName) "
"WITH ip, subnet, domain, contact "
"OPTIONAL MATCH (subnet)-[:PART_OF]->(org:OrganizationUnit)"
"RETURN {subnet: collect(DISTINCT(subnet))"
", contact: collect(DISTINCT(contact))"
", domain_name: collect(DISTINCT(domain))"
", organization: collect(DISTINCT(org))} AS details", **{'ip': ip})
def get_ip_sec_events(self, ip, limit):
"""
Get all security events related to given IP.
:param ip: checked ip
:param limit: self explanatory
:return: Sec. events
"""
return self._run_query("MATCH (ip:IP {address: $ip})-[:SOURCE_OF]->(sec:SecurityEvent) "
"RETURN {time: toString(sec.detection_time)"
", type: sec.type"
", description: sec.description"
", confirmed: sec.confirmed} AS event "
"ORDER BY event.time desc LIMIT $limit", **{'ip': ip, 'limit': limit})
def get_ip_active_events(self, ip, limit):
"""
Get last detected events from every type of events related to given IP.
:param ip: checked ip
:param limit: self explanatory
:return: Sec. events
"""
return self._run_query("MATCH (ip:IP {address: $ip})-[:SOURCE_OF]->(sec:SecurityEvent) "
"WITH date({year:sec.detection_time.year"
", month:sec.detection_time.month"
", day:sec.detection_time.day}) as last, sec.type as threat "
"WITH threat, max(last) as last "
"MATCH (ip:IP {address: $ip})-[:SOURCE_OF]->(sec:SecurityEvent {type: threat}) "
"WHERE last = date(sec.detection_time) "
"RETURN {time: toString(sec.detection_time)"
", type: sec.type"
", description: sec.description"
", confirmed: sec.confirmed} AS event", **{'ip': ip, 'limit': limit})
def get_ip_date_events(self, ip, date, limit):
"""
Get all events which happened on date <date> and are related to ip <ip> up to limit <limit>.
:param ip: checked ip
:param date: date
:param limit: limit
:return: Sec. events
"""
if len(date) == 4: # only year provided
return self._run_query("MATCH (ip:IP {address: $ip})-[:SOURCE_OF]->(sec:SecurityEvent) "
"WHERE sec.detection_time.year = date($date).year "
"RETURN {time: toString(sec.detection_time)"
", type: sec.type"
", description: sec.description"
", confirmed: sec.confirmed} AS event "
"LIMIT $limit", **{'ip': ip, 'date': date, 'limit': limit})
elif len(date) == 7: # only year and month provided
return self._run_query("MATCH (ip:IP {address: $ip})-[:SOURCE_OF]->(sec:SecurityEvent) "
"WHERE sec.detection_time.year = date($date).year "
"AND sec.detection_time.month = date($date).month "
"RETURN {time: toString(sec.detection_time)"
", type: sec.type"
", description: sec.description"
", confirmed: sec.confirmed} AS event "
"LIMIT $limit", **{'ip': ip, 'date': date, 'limit': limit})
return self._run_query("MATCH (ip:IP {address: $ip})-[:SOURCE_OF]->(sec:SecurityEvent) "
"WHERE sec.detection_time.year = date($date).year "
"AND sec.detection_time.month = date($date).month "
"AND sec.detection_time.day = date($date).day "
"RETURN {time: toString(sec.detection_time)"
", type: sec.type"
", description: sec.description"
", confirmed: sec.confirmed} AS event "
"LIMIT $limit", **{'ip': ip, 'date': date, 'limit': limit})
def get_ip_services(self, ip, limit):
"""
Get all services connected to ip <ip> up to limit <limit>.
:param ip: self explanatory
:param limit: self explanatory
:return: Services
"""
return self._run_query("MATCH (ip:IP {address: $ip})<-[:HAS_ASSIGNED]-(nod:Node)-[:IS_A]->(host:Host) "
"WITH host "
"OPTIONAL MATCH (host)<-[:ON]-(net:NetworkService) "
"RETURN net LIMIT $limit", **{'ip': ip, 'limit': limit})
def get_ip_software(self, ip, limit):
"""
Get all services connected to ip <ip> up to limit <limit>.
:param ip: self explanatory
:param limit: self explanatory
:return: Services
"""
return self._run_query("MATCH (ip:IP {address: $ip})<-[:HAS_ASSIGNED]-(nod:Node)-[:IS_A]->(host:Host) "
"WITH host "
"OPTIONAL MATCH (host)<-[:ON]-(soft:SoftwareVersion) "
"RETURN DISTINCT(soft) "
"LIMIT $limit", **{'ip': ip, 'limit': limit})
def get_ip_cve(self, ip, limit):
"""
Returns all CVE related to ip <ip> up to limit <limit>.
:param ip: ip
:param limit: limit
:return: CVEs
"""
return self._run_query("MATCH (ip:IP {address: $ip})<-[:HAS_ASSIGNED]-(nod:Node)-[:IS_A]-(host:Host) "
"WITH host "
"MATCH (host)<-[:ON]-(soft:SoftwareVersion)<-[:IN]-(vul:Vulnerability)-[:REFERS_TO]->(cve:CVE) "
"RETURN cve LIMIT $limit", **{'ip': ip, 'limit': limit})
def subnet_exists(self, subnet):
"""
Checks whether subnet exists in database.
:param subnet: self explanatory
:return: Details
"""
record = self._run_query("MATCH (subnet:Subnet) "
"WHERE subnet.range = $subnet RETURN subnet", **{'subnet': subnet})
return record.single() is not None
def get_subnets(self, limit):
"""
Returns all subnets up to limit <limit>.
:param limit: self explanatory
:return: Subnets
"""
return self._run_query("MATCH (subnet:Subnet) RETURN subnet LIMIT $limit", **{'limit': limit})
def get_subnets_details(self, subnet):
"""
Returns detailed info about subnet (it's contact and organization unit).
:param subnet: self explanatory
:return: Subnet Details
"""
return self._run_query("MATCH (subnet:Subnet {range: $subnet}) "
"WITH subnet "
"OPTIONAL MATCH (subnet)-[:PART_OF]->(org:OrganizationUnit) "
"OPTIONAL MATCH (subnet)-[:HAS]-(contact:Contact) "
"RETURN {contact: collect(DISTINCT(contact))"
", organization: collect(DISTINCT(org))} AS details ", **{'subnet': subnet})
def get_subnet_ips(self, subnet, limit):
"""
Returns ip's under subnet <subnet> up to limit <limit>.
:param subnet: self explanatory
:param limit: self explanatory
:return: Ips
"""
return self._run_query("MATCH (subnet:Subnet {range: $subnet})<-[:PART_OF]-(ip:IP) "
"RETURN DISTINCT(ip) LIMIT $limit", **{'subnet': subnet, 'limit': limit})
"____________________________________________GREEN LAYER________________________________________________________"
def get_software_resources(self, limit):
"""
Get software resources up to limit <limit>.
:param limit: self explanatory
:return: Software resources
"""
return self._run_query("MATCH (soft:SoftwareVersion) RETURN soft LIMIT $limit", **{'limit': limit})
def software_exists(self, software):
"""
Checks whether given software exists in database.
:param software: to be checked
:return: True if software is in database, false otherwise
"""
record = self._run_query("MATCH (soft:SoftwareVersion) "
"WHERE soft.version = $software RETURN soft.version", **{'software': software})
return record.single() is not None
def get_software_ips(self, software, limit):
"""
Get all ips which has software <software> up to limit <limit>.
:param software: self explanatory
:param limit: self explanatory
:return: IPs
"""
return self._run_query("MATCH(soft:SoftwareVersion {version: $software})-[:ON]->(host:Host)<-[:IS_A]-(node:Node)-[:HAS_ASSIGNED]->(ip:IP) "
"RETURN ip LIMIT $limit", **{'software': software, 'limit': limit})
def get_network_services(self, limit):
"""
Get network services up to limit <limit>.
:param limit: self explanatory
:return: Network Services
"""
return self._run_query("MATCH (service:NetworkService) RETURN service LIMIT $limit", **{'limit': limit})
def network_service_exists(self, service):
"""
Checks whether service <service> exists.
:param service: checked entity
:return: True if service is in database, false otherwise
"""
record = self._run_query("MATCH (serv:NetworkService) "
"WHERE serv.service = $service RETURN serv.service", **{'service': service})
return record.single() is not None
def get_network_service_details(self, service):
"""
Get details of network service <service>.
:param service: self explanatory
:return: Service details
"""
return self._run_query("MATCH(serv:NetworkService { service: $service}) "
"RETURN {name: serv.service, tag: serv.tag, port: serv.port, protocol: serv.protocol}",
**{'service': service})
def get_network_service_ips(self, service, limit):
"""
Get all ips which has network service <service> up to limit <limit>.
:param service: checked entity
:param limit: self explanatory
:return: IPs
"""
return self._run_query("MATCH(serv:NetworkService {service: $service})-[:ON]->(host:Host)<-[:IS_A]-(node:Node)-[:HAS_ASSIGNED]->(ip:IP) "
"RETURN ip LIMIT $limit", **{'service': service, 'limit': limit})
"____________________________________________ORANGE LAYER________________________________________________________"
def get_all_events(self, limit):
| |
def connect(self, connector):
if self.parent is None:
self.parent = Node(connector)
self.parent.children.append(self)
def __or__(self, rhs):
self.connect('OR')
return self.parent | rhs
def __and__(self, rhs):
self.connect('AND')
return self.parent & rhs
def __invert__(self):
self.negated = not self.negated
return self
def __unicode__(self):
bits = ['%s = %s' % (k, v) for k, v in self.query.items()]
if len(self.query.items()) > 1:
connector = ' AND '
expr = '(%s)' % connector.join(bits)
else:
expr = bits[0]
if self.negated:
expr = 'NOT %s' % expr
return expr
def parseq(*args, **kwargs):
node = Node()
for piece in args:
if isinstance(piece, (Q, Node)):
node.children.append(piece)
else:
raise TypeError('Unknown object: %s', piece)
if kwargs:
node.children.append(Q(**kwargs))
return node
class EmptyResultException(Exception):
pass
class BaseQuery(object):
query_separator = '__'
requires_commit = True
force_alias = False
def __init__(self, model):
self.model = model
self.query_context = model
self.database = self.model._meta.database
self.operations = self.database.adapter.operations
self.interpolation = self.database.adapter.interpolation
self._dirty = True
self._where = {}
self._joins = []
def clone(self):
raise NotImplementedError
def lookup_cast(self, lookup, value):
return self.database.adapter.lookup_cast(lookup, value)
def parse_query_args(self, model, **query):
parsed = {}
for lhs, rhs in query.iteritems():
if self.query_separator in lhs:
lhs, op = lhs.rsplit(self.query_separator, 1)
else:
op = 'eq'
try:
field = model._meta.get_field_by_name(lhs)
except AttributeError:
field = model._meta.get_related_field_by_name(lhs)
if field is None:
raise
if isinstance(rhs, Model):
rhs = rhs.get_pk()
if op == 'in':
if isinstance(rhs, SelectQuery):
lookup_value = rhs
operation = 'IN (%s)'
else:
if not rhs:
raise EmptyResultException
lookup_value = [field.db_value(o) for o in rhs]
operation = self.operations[op] % \
(','.join([self.interpolation for v in lookup_value]))
elif op == 'is':
if rhs is not None:
raise ValueError('__is lookups only accept None')
operation = 'IS NULL'
lookup_value = []
else:
lookup_value = field.db_value(rhs)
operation = self.operations[op]
parsed[field.name] = (operation, self.lookup_cast(op, lookup_value))
return parsed
@returns_clone
def where(self, *args, **kwargs):
self._where.setdefault(self.query_context, [])
self._where[self.query_context].append(parseq(*args, **kwargs))
@returns_clone
def join(self, model, join_type=None, on=None):
if self.query_context._meta.rel_exists(model):
self._joins.append((model, join_type, on))
self.query_context = model
else:
raise AttributeError('No foreign key found between %s and %s' % \
(self.query_context.__name__, model.__name__))
@returns_clone
def switch(self, model):
if model == self.model:
self.query_context = model
return
for klass, join_type, on in self._joins:
if model == klass:
self.query_context = model
return
raise AttributeError('You must JOIN on %s' % model.__name__)
def use_aliases(self):
return len(self._joins) > 0 or self.force_alias
def combine_field(self, alias, field_name):
if alias:
return '%s.%s' % (alias, field_name)
return field_name
def compile_where(self):
alias_count = 0
alias_map = {}
alias_required = self.use_aliases()
joins = list(self._joins)
if self._where or len(joins):
joins.insert(0, (self.model, None, None))
where_with_alias = []
where_data = []
computed_joins = []
for i, (model, join_type, on) in enumerate(joins):
if alias_required:
alias_count += 1
alias_map[model] = 't%d' % alias_count
else:
alias_map[model] = ''
if i > 0:
from_model = joins[i-1][0]
field = from_model._meta.get_related_field_for_model(model, on)
if field:
left_field = field.name
right_field = model._meta.pk_name
else:
field = from_model._meta.get_reverse_related_field_for_model(model, on)
left_field = from_model._meta.pk_name
right_field = field.name
if join_type is None:
if field.null and model not in self._where:
join_type = 'LEFT OUTER'
else:
join_type = 'INNER'
computed_joins.append(
'%s JOIN %s AS %s ON %s = %s' % (
join_type,
model._meta.db_table,
alias_map[model],
self.combine_field(alias_map[from_model], left_field),
self.combine_field(alias_map[model], right_field),
)
)
for (model, join_type, on) in joins:
if model in self._where:
for node in self._where[model]:
query, data = self.parse_node(node, model, alias_map)
where_with_alias.append(query)
where_data.extend(data)
return computed_joins, where_with_alias, where_data, alias_map
def convert_where_to_params(self, where_data):
flattened = []
for clause in where_data:
if isinstance(clause, (tuple, list)):
flattened.extend(clause)
else:
flattened.append(clause)
return flattened
def parse_node(self, node, model, alias_map):
query = []
query_data = []
nodes = []
for child in node.children:
if isinstance(child, Q):
parsed, data = self.parse_q(child, model, alias_map)
query.append(parsed)
query_data.extend(data)
elif isinstance(child, Node):
parsed, data = self.parse_node(child, model, alias_map)
query.append('(%s)' % parsed)
query_data.extend(data)
query.extend(nodes)
connector = ' %s ' % node.connector
query = connector.join(query)
if node.negated:
query = 'NOT (%s)' % query
return query, query_data
def parse_q(self, q, model, alias_map):
query = []
query_data = []
parsed = self.parse_query_args(model, **q.query)
for (name, lookup) in parsed.iteritems():
operation, value = lookup
if isinstance(value, SelectQuery):
sql, value = self.convert_subquery(value)
operation = operation % sql
query_data.append(value)
combined = self.combine_field(alias_map[model], name)
query.append('%s %s' % (combined, operation))
if len(query) > 1:
query = '(%s)' % (' AND '.join(query))
else:
query = query[0]
if q.negated:
query = 'NOT %s' % query
return query, query_data
def convert_subquery(self, subquery):
subquery.query, orig_query = subquery.model._meta.pk_name, subquery.query
subquery.force_alias, orig_alias = True, subquery.force_alias
sql, data = subquery.sql()
subquery.query = orig_query
subquery.force_alias = orig_alias
return sql, data
def raw_execute(self):
query, params = self.sql()
return self.database.execute(query, params, self.requires_commit)
class RawQuery(BaseQuery):
def __init__(self, model, query, *params):
self._sql = query
self._params = list(params)
super(RawQuery, self).__init__(model)
def sql(self):
return self._sql, self._params
def execute(self):
return QueryResultWrapper(self.model, self.raw_execute())
def join(self):
raise AttributeError('Raw queries do not support joining programmatically')
def where(self):
raise AttributeError('Raw queries do not support querying programmatically')
def switch(self):
raise AttributeError('Raw queries do not support switching contexts')
def __iter__(self):
return self.execute()
class SelectQuery(BaseQuery):
requires_commit = False
def __init__(self, model, query=None):
self.query = query or '*'
self._group_by = []
self._having = []
self._order_by = []
self._pagination = None # return all by default
self._distinct = False
self._qr = None
super(SelectQuery, self).__init__(model)
def clone(self):
query = SelectQuery(self.model, self.query)
query.query_context = self.query_context
query._group_by = list(self._group_by)
query._having = list(self._having)
query._order_by = list(self._order_by)
query._pagination = self._pagination and tuple(self._pagination) or None
query._distinct = self._distinct
query._qr = self._qr
query._where = dict(self._where)
query._joins = list(self._joins)
return query
@returns_clone
def paginate(self, page_num, paginate_by=20):
self._pagination = (page_num, paginate_by)
def count(self):
tmp_pagination = self._pagination
self._pagination = None
tmp_query = self.query
if self.use_aliases():
self.query = 'COUNT(t1.%s)' % (self.model._meta.pk_name)
else:
self.query = 'COUNT(%s)' % (self.model._meta.pk_name)
res = self.database.execute(*self.sql())
self.query = tmp_query
self._pagination = tmp_pagination
return res.fetchone()[0]
@returns_clone
def group_by(self, clause):
model = self.query_context
if isinstance(clause, string_types):
fields = (clause,)
elif isinstance(clause, (list, tuple)):
fields = clause
elif issubclass(clause, Model):
model = clause
fields = clause._meta.get_field_names()
self._group_by.append((model, fields))
@returns_clone
def having(self, clause):
self._having.append(clause)
@returns_clone
def distinct(self):
self._distinct = True
@returns_clone
def order_by(self, field_or_string):
if isinstance(field_or_string, tuple):
field_or_string, ordering = field_or_string
else:
ordering = 'ASC'
self._order_by.append(
(self.query_context, field_or_string, ordering)
)
def parse_select_query(self, alias_map):
if isinstance(self.query, string_types):
if self.query in ('*', self.model._meta.pk_name) and self.use_aliases():
return '%s.%s' % (alias_map[self.model], self.query)
return self.query
elif isinstance(self.query, dict):
qparts = []
aggregates = []
for model, cols in self.query.iteritems():
alias = alias_map.get(model, '')
for col in cols:
if isinstance(col, tuple):
func, col, col_alias = col
aggregates.append('%s(%s) AS %s' % \
(func, self.combine_field(alias, col), col_alias)
)
else:
qparts.append(self.combine_field(alias, col))
return ', '.join(qparts + aggregates)
else:
raise TypeError('Unknown type encountered parsing select query')
def sql(self):
joins, where, where_data, alias_map = self.compile_where()
table = self.model._meta.db_table
params = []
group_by = []
if self.use_aliases():
table = '%s AS %s' % (table, alias_map[self.model])
for model, clause in self._group_by:
alias = alias_map[model]
for field in clause:
group_by.append(self.combine_field(alias, field))
else:
group_by = [c[1] for c in self._group_by]
parsed_query = self.parse_select_query(alias_map)
if self._distinct:
sel = 'SELECT DISTINCT'
else:
sel = 'SELECT'
select = '%s %s FROM %s' % (sel, parsed_query, table)
joins = '\n'.join(joins)
where = ' AND '.join(where)
group_by = ', '.join(group_by)
having = ' AND '.join(self._having)
order_by = []
for piece in self._order_by:
model, field, ordering = piece
if self.use_aliases() and field in model._meta.fields:
field = '%s.%s' % (alias_map[model], field)
order_by.append('%s %s' % (field, ordering))
pieces = [select]
if joins:
pieces.append(joins)
if where:
pieces.append('WHERE %s' % where)
params.extend(self.convert_where_to_params(where_data))
if group_by:
pieces.append('GROUP BY %s' % group_by)
if having:
pieces.append('HAVING %s' % having)
if order_by:
pieces.append('ORDER BY %s' % ', '.join(order_by))
if self._pagination:
page, paginate_by = self._pagination
if page > 0:
page -= 1
pieces.append('LIMIT %d OFFSET %d' % (paginate_by, page * paginate_by))
return ' '.join(pieces), params
def execute(self):
if self._dirty or not self._qr:
try:
self._qr = QueryResultWrapper(self.model, self.raw_execute())
self._dirty = False
return self._qr
except | |
<gh_stars>1-10
#!/usr/bin/env python3
##############################################################
# Copyright 2019 <NAME> <<EMAIL>>
# (c.f. COPYING)
#
# This file is part of BREWCOP, a coffee pot monitor.
# For details, see https://github.com/garlick/brewcop.
#
# SPDX-License-Identifier: BSD-3-Clause
##############################################################
import urwid
import serial
from collections import deque
import time
class Scale:
"""
Manage the Avery-Berkel 6702-16658 bench scale in ECR mode.
"""
path_serial = "/dev/ttyAMA0"
def __init__(self):
self._weight = 0.0
self._weight_is_valid = False
self.ecr_status = None
self.tare_offset = 0.0
self.ser = serial.Serial()
self.ser.port = self.path_serial
self.ser.baudrate = 9600
self.ser.timeout = 0.25
self.ser.parity = serial.PARITY_EVEN
self.ser.bytesize = serial.SEVENBITS
self.ser.stopbits = serial.STOPBITS_ONE
self.ser.xonxoff = False
self.ser.rtscts = False
self.ser.dsrdtr = False
self.ser.open()
def ecr_set_status(self, response):
"""Parse response and set internal ECR status"""
assert len(response) == 6
assert response[0:2] == b"\nS"
assert response[4:5] == b"\r"
self.ecr_status = response[2:4]
def ecr_read(self):
"""Read to ECR EOT (3)"""
message = bytearray()
while len(message) == 0 or message[-1] != 3:
ch = self.ser.read(size=1)
assert len(ch) == 1 # fail on timeout
message.append(ch[0])
return message
def zero(self):
"""Send ECR Zero command to the scale and read back status"""
self.ser.reset_input_buffer()
self.ser.write(b"Z\r")
response = self.ecr_read()
self.ecr_set_status(response)
def poll(self):
"""
Send ECR Weigh command to the scale and read back either
weight + status, or just status. If a valid weight is returned,
set _weight_is_valid True and convert pounds to grams.
"""
self.ser.reset_input_buffer()
self.ser.write(b"W\r")
response = self.ecr_read()
if len(response) == 16:
assert response[0:1] == b"\n"
assert response[7:10] == b"LB\r"
self._weight = float(response[1:7]) * 453.592
self.ecr_set_status(response[10:16])
self._weight_is_valid = True
else:
self.ecr_set_status(response)
self._weight_is_valid = False
def tare(self):
"""Incorporate weight of container on scale into future measurements"""
self.tare_offset = self._weight
@property
def at_zero(self):
"""
Test if scale status indicates scale is at zero. The zero LED
on the scale will be lit in this case.
"""
if self.ecr_status == b"20":
return True
return False
@property
def display(self):
"""
Get formatted text for scale readout.
Gray out previous value if scale is in motion.
Show red over/under on scale range error.
"""
if self._weight_is_valid:
return ("green", "{:.0f}g".format(self._weight - self.tare_offset))
elif self.ecr_status == b"10" or self.ecr_status == b"30": # moving
return ("deselect", "{:.0f}g".format(self._weight - self.tare_offset))
elif self.ecr_status == b"01" or self.ecr_status == b"11":
return ("red", "under")
elif self.ecr_status == b"02":
return ("red", "over")
else:
return ("red", "status:" + self.ecr_status.decode("utf-8"))
@property
def weight_is_valid(self):
"""Return True if most recent poll() returned a valid weight."""
return self._weight_is_valid
@property
def weight(self):
"""Return most recently measured weight, less tare offset if any."""
return self._weight - self.tare_offset
# For testing UI without scale present
class NoScale(Scale):
"""
Dummy version of scale class for UI testing.
"""
def __init__(self):
self._weight = 0.0
self._weight_is_valid = True
self.ecr_status = None
self.tare_offset = 0.0
return
def poll(self):
return
def zero(self):
return
@property
def display(self):
return ("deselect", "no scale")
class Progress_mL(urwid.ProgressBar):
"""
Progress bar that displays mL value instead of percentage.
It assumes range was set to (0, max capacity in mL).
"""
def get_text(self):
return "{:.0f} mL".format(self.current)
class DisplayHelper:
"""
Urwid color palette.
Tuples of (Key, font color, background color)
"""
palette = [
("background", "dark blue", ""),
("deselect", "dark gray", ""),
("select", "dark green", ""),
("green", "dark green", ""),
("red", "dark red", ""),
("pb_todo", "black", "dark red"),
("pb_done", "black", "dark green"),
]
"""
Source: https://www.asciiart.eu/food-and-drinks/coffee-and-tea
N.B. this one had no attribution on that site except author's initials,
and it seems to be widely disseminated. Public domain?
"""
coffee_cup = u'''\
(
) (
___...(-------)-....___
.-"" ) ( ""-.
.-'``'|-._ ) _.-|
/ .--.| `""---...........---""` |
/ / | |
| | | |
\ \ | |
`\ `\ | |
`\ `| |
_/ /\ /
(__/ \ /
_..---""` \ /`""---.._
.-' \ / '-.
: `-.__ __.-' :
: ) ""---...---"" ( :
'._ `"--...___...--"` _.'
jgs \""--..__ __..--""/
'._ """----.....______.....----""" _.'
`""--..,,_____ _____,,..--""`
`"""----"""`
'''
def __init__(self, pot_capacity_mL=100):
# header
headL = urwid.Text(("green", "B R E W C O P"), align="left")
self._headC = urwid.Text("", align="center")
self._headR = indicator = urwid.Text("", align="right")
self.header = urwid.Columns([headL, self._headC, self._headR], 3)
# body
bg = urwid.Text(self.coffee_cup)
bg = urwid.AttrMap(bg, "background")
bg = urwid.Padding(bg, align="center", width=56)
self.background = urwid.Filler(bg)
# body + meter pop-up (offline mode)"""
self._meter = urwid.BigText("", urwid.Thin6x6Font())
m = urwid.AttrMap(self._meter, "green")
m = urwid.Padding(m, align="center", width="clip")
m = urwid.Filler(m, "bottom", None, 7)
m = urwid.LineBox(m)
self.meterbody = urwid.Overlay(m, self.background, "center", 50, "middle", 8)
# footer
self.pbar = Progress_mL("pb_todo", "pb_done", 0, pot_capacity_mL)
self.footmsg = urwid.Text(
("red", "Brewcop is offline. Replace pot to continue monitoring."),
align="center",
)
self.layout = urwid.Frame(
header=self.header, body=self.meterbody, footer=self.footmsg
)
self.main_loop = urwid.MainLoop(
self.layout, self.palette, unhandled_input=self.handle_input
)
def handle_input(self, key):
"""
urwid's event loop calls this function on keyboard events
not handled by widgets.
"""
if key == "Q" or key == "q":
raise urwid.ExitMainLoop()
def tick_wrap(self, _loop, _data):
"""
urwid timer callback to run registered "tick" function periodically.
"""
self.ticker()
_loop.set_alarm_in(self.tick_period, self.tick_wrap)
def run(self, ticker, tick_period):
"""
Register ticker callable, to run every tick_period seconds.
Start urwid's main loop.
This method does not return until loop exits (press q).
"""
self.ticker = ticker
self.tick_period = tick_period
self.main_loop.set_alarm_in(0, self.tick_wrap)
self.main_loop.run()
def redraw(self):
"""
Force screen redraw.
It normally redraws when control returns to event loop.
"""
self.main_loop.draw_screen()
@property
def headC(self):
"""Get text from header, center region"""
return self._headC.get_text()
@headC.setter
def headC(self, value):
"""Set text in header, center region"""
self._headC.set_text(value)
@property
def headR(self):
"""Get text from header, right region"""
return self._headR.get_text()
@headR.setter
def headR(self, value):
"""Set text in header, right region"""
self._headR.set_text(value)
@property
def meter(self):
"""Get the meter text (scale reading)"""
return self._meter.get_text()
@meter.setter
def meter(self, value):
"""Set the meter text (scale reading)"""
self._meter.set_text(value)
def online(self):
"""Set online display mode (show background + footer progress bar)"""
self.layout.body = self.background
self.layout.footer = self.pbar
def offline(self):
"""Set offline display mode (show meter + footer message)"""
self.layout.body = self.meterbody
self.layout.footer = self.footmsg
def progress(self, value):
"""Update progress bar value (pot contents in mL)"""
self.pbar.set_completion(value)
class Brains:
"""
Add some scale memory and semantics for interpreting a series
of weights as human activity.
Implement a state machine consisting of the following states:
unknown - no scale readings stored yet
brewing - scale readings have shown some increase over last 30s
ready - scale readings are stable/decreasing and pot still has content
empty - scale readings are stable/decreasing and pot content is low
"""
"""Retain scale samples for history_length seconds"""
history_length = 30
def __init__(self, tick_period=1, empty_thresh=0, stale_thresh=60 * 60 * 8):
self.history = deque(maxlen=int(self.history_length / tick_period))
self.pot_empty_thresh_g = empty_thresh
self.stale_thresh = stale_thresh
self.state = "unknown"
self.timestamp = 0
def notify(self):
"""Stub for slack notification"""
return
def increasing(self):
"""
Return true if history shows (any) values increasing relative
to a predecessor.
N.B. Ignores l[i] < l[i + 1].
"""
l = list(self.history)
return any(x > y for x, y in zip(l, l[1:]))
def brewcheck(self):
"""
Process new scale reading, transitioning state, if needed.
Call notify() on brewing->ready state transition.
"""
if self.increasing():
if self.state != "brewing":
self.state = "brewing"
self.timestamp = time.time()
elif self.history[0] <= self.pot_empty_thresh_g:
if self.state != "empty":
self.state = "empty"
self.timestamp = time.time()
else:
if self.state == "brewing": # only notify on brewing->ready
self.notify()
if self.state != "ready":
self.state = "ready"
self.timestamp = time.time()
def store(self, w):
"""Record a scale measurement"""
self.history.appendleft(w)
self.brewcheck()
def timestr(self, t):
"""Return a human-friendly string representing elapsed time t"""
daysecs = 60 * 60 * 24
if t < daysecs:
return time.strftime("%H:%M:%S", time.gmtime(t))
elif t < daysecs * 2:
return "1 day"
else:
return "{} days".format(int(t / daysecs))
@property
def display(self):
"""Get message text describing the state, with time since entered"""
t = time.time() - self.timestamp
timestr = self.timestr(t)
if self.state == "brewing":
return ("red", "Brewing, elapsed: {}".format(timestr))
elif self.state == "ready" and t < self.stale_thresh:
return ("green", "Ready, elapsed: {}".format(timestr))
elif self.state == "ready":
return ("red", "Ready, elapsed: {} (stale)".format(timestr))
elif self.state == "empty":
return ("red", "Emptyish, elapsed: {}".format(timestr))
else:
return ""
class Brewcop:
"""
Main Brewcop class.
"""
tick_period = 0.5
"""Values for Technivorm Moccamaster insulated carafe"""
pot_tare_g = 796
pot_capacity_g = 1250 # 1g | |
'v_5'), ('t_1', 'v_3'), ('t_2', 'v_2')}},
{'DOMAIN': {'v_1', 'v_4', 't_2', 't_1', 'c_1', 'v_2', 'v_5', 'v_3'}, 'City': {'c_1'}, 'Town': {'t_2', 't_1'},
'Village': {'v_4', 'v_1', 'v_2', 'v_5', 'v_3'},
'Road': {('t_1', 'v_1'), ('v_3', 't_1'), ('c_1', 'v_5'), ('t_1', 'c_1'), ('c_1', 't_2'), ('t_2', 'v_4'),
('v_2', 't_2'), ('c_1', 't_1'), ('v_5', 'c_1'), ('t_2', 'c_1'), ('v_4', 't_2'), ('v_1', 't_1'),
('t_1', 'v_3'), ('t_2', 'v_2')},
'>': {('t_1', 'v_1'), ('t_1', 'v_4'), ('t_1', 'v_2'), ('c_1', 'v_5'), ('c_1', 'v_1'), ('c_1', 'v_4'),
('t_2', 'v_3'), ('c_1', 'v_2'), ('t_1', 'v_5'), ('c_1', 't_2'), ('t_2', 'v_1'), ('t_2', 'v_4'),
('c_1', 't_1'), ('c_1', 'v_3'), ('t_2', 'v_5'), ('t_1', 'v_3'), ('t_2', 'v_2')}},
{'DOMAIN': {'v_1', 'v_4', 't_2', 't_1', 'c_1', 'v_2', 'v_5', 'v_3'}, 'City': {'c_1'}, 'Town': {'t_2', 't_1'},
'Village': {'v_4', 'v_1', 'v_2', 'v_5', 'v_3'},
'Road': {('v_3', 't_2'), ('t_1', 'v_1'), ('t_1', 'v_4'), ('c_1', 'v_5'), ('t_2', 'v_3'), ('t_1', 'c_1'),
('v_4', 't_1'), ('c_1', 't_2'), ('v_2', 't_2'), ('c_1', 't_1'), ('v_5', 'c_1'), ('t_2', 'c_1'),
('v_1', 't_1'), ('t_2', 'v_2')},
'>': {('t_1', 'v_1'), ('t_1', 'v_4'), ('t_1', 'v_2'), ('c_1', 'v_5'), ('c_1', 'v_1'), ('c_1', 'v_4'),
('t_2', 'v_3'), ('c_1', 'v_2'), ('t_1', 'v_5'), ('c_1', 't_2'), ('t_2', 'v_1'), ('t_2', 'v_4'),
('c_1', 't_1'), ('c_1', 'v_3'), ('t_2', 'v_5'), ('t_1', 'v_3'), ('t_2', 'v_2')}},
{'DOMAIN': {'v_1', 'v_4', 't_2', 't_1', 'c_1', 'v_2', 'v_5', 'v_3'}, 'City': {'c_1'}, 'Town': {'t_2', 't_1'},
'Village': {'v_4', 'v_1', 'v_2', 'v_5', 'v_3'},
'Road': {('v_3', 't_1'), ('t_1', 'v_2'), ('c_1', 'v_5'), ('t_1', 'c_1'), ('c_1', 't_2'), ('t_2', 'v_1'),
('t_2', 'v_4'), ('c_1', 't_1'), ('v_5', 'c_1'), ('v_2', 't_1'), ('t_2', 'c_1'), ('v_1', 't_2'),
('v_4', 't_2'), ('t_1', 'v_3')},
'>': {('t_1', 'v_1'), ('t_1', 'v_4'), ('t_1', 'v_2'), ('c_1', 'v_5'), ('c_1', 'v_1'), ('c_1', 'v_4'),
('t_2', 'v_3'), ('c_1', 'v_2'), ('t_1', 'v_5'), ('c_1', 't_2'), ('t_2', 'v_1'), ('t_2', 'v_4'),
('c_1', 't_1'), ('c_1', 'v_3'), ('t_2', 'v_5'), ('t_1', 'v_3'), ('t_2', 'v_2')}},
{'DOMAIN': {'v_1', 'v_4', 't_2', 't_1', 'c_1', 'v_2', 'v_5', 'v_3'}, 'City': {'c_1'}, 'Town': {'t_2', 't_1'},
'Village': {'v_4', 'v_1', 'v_2', 'v_5', 'v_3'},
'Road': {('v_3', 't_2'), ('t_1', 'v_4'), ('t_1', 'v_2'), ('c_1', 'v_5'), ('t_2', 'v_3'), ('t_1', 'c_1'),
('c_1', 't_2'), ('t_2', 'v_1'), ('c_1', 't_1'), ('v_5', 'c_1'), ('v_2', 't_1'), ('t_2', 'c_1'),
('v_1', 't_2'), ('v_4', 't_1')},
'>': {('t_1', 'v_1'), ('t_1', 'v_4'), ('t_1', 'v_2'), ('c_1', 'v_5'), ('c_1', 'v_1'), ('c_1', 'v_4'),
('t_2', 'v_3'), ('c_1', 'v_2'), ('t_1', 'v_5'), ('c_1', 't_2'), ('t_2', 'v_1'), ('t_2', 'v_4'),
('c_1', 't_1'), ('c_1', 'v_3'), ('t_2', 'v_5'), ('t_1', 'v_3'), ('t_2', 'v_2')}},
{'DOMAIN': {'v_1', 'v_4', 't_2', 't_1', 'c_1', 'v_2', 'v_5', 'v_3'}, 'City': {'c_1'}, 'Town': {'t_2', 't_1'},
'Village': {'v_4', 'v_1', 'v_2', 'v_5', 'v_3'},
'Road': {('t_1', 'v_4'), ('v_3', 't_1'), ('c_1', 'v_5'), ('t_1', 'c_1'), ('c_1', 't_2'), ('t_2', 'v_1'),
('v_2', 't_2'), ('c_1', 't_1'), ('v_5', 'c_1'), ('t_2', 'c_1'), ('v_1', 't_2'), ('v_4', 't_1'),
('t_1', 'v_3'), ('t_2', 'v_2')},
'>': {('t_1', 'v_1'), ('t_1', 'v_4'), ('t_1', 'v_2'), ('c_1', 'v_5'), ('c_1', 'v_1'), ('c_1', 'v_4'),
('t_2', 'v_3'), ('c_1', 'v_2'), ('t_1', 'v_5'), ('c_1', 't_2'), ('t_2', 'v_1'), ('t_2', 'v_4'),
('c_1', 't_1'), ('c_1', 'v_3'), ('t_2', 'v_5'), ('t_1', 'v_3'), ('t_2', 'v_2')}},
{'DOMAIN': {'v_1', 'v_4', 't_2', 't_1', 'c_1', 'v_2', 'v_5', 'v_3'}, 'City': {'c_1'}, 'Town': {'t_2', 't_1'},
'Village': {'v_4', 'v_1', 'v_2', 'v_5', 'v_3'},
'Road': {('t_1', 'v_1'), ('v_3', 't_1'), ('t_1', 'v_2'), ('v_5', 't_2'), ('t_1', 'c_1'), ('c_1', 't_2'),
('t_2', 'v_4'), ('c_1', 't_1'), ('t_2', 'v_5'), ('t_2', 'c_1'), ('v_2', 't_1'), ('v_4', 't_2'),
('v_1', 't_1'), ('t_1', 'v_3')},
'>': {('t_1', 'v_1'), ('t_1', 'v_4'), ('t_1', 'v_2'), ('c_1', 'v_5'), ('c_1', 'v_1'), ('c_1', 'v_4'),
('t_2', 'v_3'), ('c_1', 'v_2'), ('t_1', 'v_5'), ('c_1', 't_2'), ('t_2', 'v_1'), ('t_2', 'v_4'),
('c_1', 't_1'), ('c_1', 'v_3'), ('t_2', 'v_5'), ('t_1', 'v_3'), ('t_2', 'v_2')}},
{'DOMAIN': {'v_1', 'v_4', 't_2', 't_1', 'c_1', 'v_2', 'v_5', 'v_3'}, 'City': {'c_1'}, 'Town': {'t_2', 't_1'},
'Village': {'v_4', 'v_1', 'v_2', 'v_5', 'v_3'},
'Road': {('v_3', 't_2'), ('t_1', 'v_1'), ('t_1', 'v_4'), ('t_1', 'v_2'), ('t_2', 'v_3'), ('v_5', 't_2'),
('t_1', 'c_1'), ('v_4', 't_1'), ('c_1', 't_2'), ('c_1', 't_1'), ('t_2', 'v_5'), ('t_2', 'c_1'),
('v_2', 't_1'), ('v_1', 't_1')},
'>': {('t_1', 'v_1'), ('t_1', 'v_4'), ('t_1', 'v_2'), ('c_1', 'v_5'), ('c_1', 'v_1'), ('c_1', 'v_4'),
('t_2', 'v_3'), ('c_1', 'v_2'), ('t_1', 'v_5'), ('c_1', 't_2'), ('t_2', 'v_1'), ('t_2', 'v_4'),
('c_1', 't_1'), ('c_1', 'v_3'), ('t_2', 'v_5'), ('t_1', 'v_3'), ('t_2', 'v_2')}},
{'DOMAIN': {'v_1', 'v_4', 't_2', 't_1', 'c_1', 'v_2', 'v_5', 'v_3'}, 'City': {'c_1'}, 'Town': {'t_2', 't_1'},
'Village': {'v_4', 'v_1', 'v_2', 'v_5', 'v_3'},
'Road': {('v_3', 't_2'), ('t_1', 'v_1'), ('t_1', 'v_2'), ('t_2', 'v_3'), ('v_5', 't_1'), ('t_1', 'c_1'),
('t_1', 'v_5'), ('c_1', 't_2'), ('t_2', 'v_4'), ('c_1', 't_1'), ('v_2', 't_1'), ('t_2', 'c_1'),
('v_4', 't_2'), ('v_1', 't_1')},
'>': {('t_1', 'v_1'), ('t_1', 'v_4'), ('t_1', 'v_2'), ('c_1', 'v_5'), ('c_1', 'v_1'), ('c_1', 'v_4'),
('t_2', 'v_3'), ('c_1', 'v_2'), ('t_1', 'v_5'), ('c_1', 't_2'), ('t_2', 'v_1'), ('t_2', 'v_4'),
('c_1', 't_1'), ('c_1', 'v_3'), ('t_2', 'v_5'), ('t_1', 'v_3'), ('t_2', 'v_2')}},
{'DOMAIN': {'v_1', 'v_4', 't_2', 't_1', 'c_1', 'v_2', 'v_5', 'v_3'}, 'City': {'c_1'}, 'Town': {'t_2', 't_1'},
'Village': {'v_4', 'v_1', 'v_2', 'v_5', 'v_3'},
'Road': {('v_3', 't_2'), ('t_1', 'v_1'), ('t_1', 'v_2'), ('t_2', 'v_3'), ('v_5', 't_2'), ('t_1', 'c_1'),
('c_1', 't_2'), ('t_2', 'v_4'), ('c_1', 't_1'), ('t_2', 'v_5'), ('t_2', 'c_1'), ('v_2', 't_1'),
('v_4', 't_2'), ('v_1', 't_1')},
'>': {('t_1', 'v_1'), ('t_1', 'v_4'), ('t_1', 'v_2'), ('c_1', 'v_5'), ('c_1', 'v_1'), ('c_1', 'v_4'),
('t_2', 'v_3'), ('c_1', 'v_2'), ('t_1', 'v_5'), ('c_1', 't_2'), ('t_2', 'v_1'), ('t_2', 'v_4'),
('c_1', 't_1'), ('c_1', 'v_3'), ('t_2', 'v_5'), ('t_1', 'v_3'), ('t_2', 'v_2')}},
{'DOMAIN': {'v_1', 'v_4', 't_2', 't_1', 'c_1', 'v_2', 'v_5', 'v_3'}, 'City': {'c_1'}, 'Town': {'t_2', 't_1'},
'Village': {'v_4', 'v_1', 'v_2', 'v_5', 'v_3'},
'Road': {('t_1', 'v_1'), ('t_1', 'v_4'), ('v_3', 't_1'), ('v_5', 't_2'), ('t_1', 'c_1'), ('v_4', 't_1'),
('c_1', 't_2'), ('v_2', 't_2'), ('c_1', 't_1'), ('t_2', 'v_5'), ('t_2', 'c_1'), ('v_1', 't_1'),
('t_1', 'v_3'), ('t_2', 'v_2')},
'>': {('t_1', 'v_1'), ('t_1', 'v_4'), ('t_1', 'v_2'), ('c_1', 'v_5'), ('c_1', 'v_1'), ('c_1', 'v_4'),
('t_2', 'v_3'), ('c_1', 'v_2'), ('t_1', 'v_5'), ('c_1', 't_2'), ('t_2', 'v_1'), ('t_2', 'v_4'),
('c_1', 't_1'), ('c_1', 'v_3'), ('t_2', 'v_5'), ('t_1', 'v_3'), ('t_2', 'v_2')}},
{'DOMAIN': {'v_1', 'v_4', 't_2', 't_1', 'c_1', 'v_2', 'v_5', 'v_3'}, 'City': {'c_1'}, 'Town': {'t_2', 't_1'},
'Village': {'v_4', 'v_1', 'v_2', 'v_5', 'v_3'},
'Road': {('t_1', 'v_1'), ('v_3', 't_1'), ('v_5', 't_1'), ('t_1', 'c_1'), ('t_1', 'v_5'), ('c_1', 't_2'),
('t_2', 'v_4'), ('v_2', 't_2'), ('c_1', 't_1'), ('t_2', 'c_1'), ('v_4', 't_2'), ('v_1', 't_1'),
('t_1', 'v_3'), ('t_2', 'v_2')},
'>': {('t_1', 'v_1'), ('t_1', 'v_4'), ('t_1', 'v_2'), ('c_1', 'v_5'), ('c_1', 'v_1'), ('c_1', 'v_4'),
('t_2', 'v_3'), ('c_1', 'v_2'), ('t_1', 'v_5'), ('c_1', 't_2'), ('t_2', 'v_1'), ('t_2', 'v_4'),
('c_1', 't_1'), ('c_1', 'v_3'), ('t_2', 'v_5'), ('t_1', 'v_3'), ('t_2', 'v_2')}},
{'DOMAIN': {'v_1', 'v_4', 't_2', 't_1', 'c_1', 'v_2', 'v_5', 'v_3'}, 'City': {'c_1'}, 'Town': {'t_2', 't_1'},
'Village': {'v_4', 'v_1', 'v_2', 'v_5', 'v_3'},
'Road': {('t_1', 'v_1'), ('v_3', 't_1'), ('v_5', 't_2'), ('t_1', 'c_1'), ('c_1', 't_2'), ('t_2', 'v_4'),
('v_2', 't_2'), ('c_1', 't_1'), ('t_2', 'v_5'), ('t_2', 'c_1'), ('v_4', 't_2'), ('v_1', 't_1'),
('t_1', 'v_3'), ('t_2', 'v_2')},
'>': {('t_1', 'v_1'), ('t_1', 'v_4'), ('t_1', 'v_2'), ('c_1', 'v_5'), ('c_1', 'v_1'), ('c_1', 'v_4'),
('t_2', 'v_3'), ('c_1', 'v_2'), ('t_1', 'v_5'), ('c_1', 't_2'), ('t_2', 'v_1'), ('t_2', 'v_4'),
('c_1', 't_1'), ('c_1', 'v_3'), ('t_2', 'v_5'), ('t_1', 'v_3'), ('t_2', 'v_2')}},
{'DOMAIN': {'v_1', 'v_4', 't_2', 't_1', 'c_1', 'v_2', 'v_5', 'v_3'}, 'City': {'c_1'}, 'Town': {'t_2', 't_1'},
'Village': {'v_4', 'v_1', 'v_2', 'v_5', 'v_3'},
'Road': {('v_3', 't_2'), ('t_1', 'v_1'), ('t_1', 'v_4'), ('t_2', 'v_3'), ('v_5', 't_1'), ('t_1', 'c_1'),
('t_1', 'v_5'), ('v_4', 't_1'), ('c_1', 't_2'), ('v_2', 't_2'), ('c_1', 't_1'), ('t_2', 'c_1'),
('v_1', 't_1'), ('t_2', 'v_2')},
'>': {('t_1', 'v_1'), ('t_1', 'v_4'), ('t_1', 'v_2'), ('c_1', 'v_5'), ('c_1', 'v_1'), ('c_1', 'v_4'),
('t_2', 'v_3'), ('c_1', 'v_2'), ('t_1', 'v_5'), ('c_1', 't_2'), ('t_2', 'v_1'), ('t_2', 'v_4'),
('c_1', 't_1'), ('c_1', 'v_3'), ('t_2', 'v_5'), ('t_1', 'v_3'), ('t_2', 'v_2')}},
{'DOMAIN': {'v_1', 'v_4', 't_2', 't_1', 'c_1', 'v_2', 'v_5', 'v_3'}, 'City': {'c_1'}, 'Town': {'t_2', 't_1'},
'Village': {'v_4', 'v_1', 'v_2', 'v_5', 'v_3'},
'Road': {('v_3', 't_2'), ('t_1', 'v_1'), ('t_1', 'v_4'), ('t_2', 'v_3'), ('v_5', 't_2'), ('t_1', 'c_1'),
('v_4', 't_1'), ('c_1', 't_2'), ('v_2', 't_2'), ('c_1', 't_1'), ('t_2', 'v_5'), ('t_2', 'c_1'),
('v_1', 't_1'), ('t_2', 'v_2')},
'>': {('t_1', 'v_1'), ('t_1', 'v_4'), ('t_1', 'v_2'), ('c_1', 'v_5'), ('c_1', 'v_1'), ('c_1', 'v_4'),
('t_2', 'v_3'), ('c_1', 'v_2'), ('t_1', 'v_5'), ('c_1', 't_2'), ('t_2', 'v_1'), ('t_2', 'v_4'),
('c_1', 't_1'), ('c_1', 'v_3'), ('t_2', 'v_5'), ('t_1', 'v_3'), ('t_2', 'v_2')}},
{'DOMAIN': {'v_1', 'v_4', 't_2', 't_1', 'c_1', 'v_2', 'v_5', 'v_3'}, 'City': {'c_1'}, 'Town': {'t_2', 't_1'},
'Village': {'v_4', 'v_1', 'v_2', 'v_5', 'v_3'},
'Road': {('v_3', 't_2'), ('t_1', 'v_1'), ('t_2', 'v_3'), ('v_5', 't_1'), ('t_1', 'c_1'), ('t_1', 'v_5'),
('c_1', 't_2'), ('t_2', 'v_4'), ('v_2', 't_2'), ('c_1', 't_1'), ('t_2', 'c_1'), ('v_4', 't_2'),
('v_1', 't_1'), ('t_2', 'v_2')},
'>': {('t_1', 'v_1'), ('t_1', 'v_4'), ('t_1', 'v_2'), ('c_1', 'v_5'), ('c_1', 'v_1'), ('c_1', 'v_4'),
('t_2', 'v_3'), ('c_1', 'v_2'), ('t_1', 'v_5'), ('c_1', 't_2'), ('t_2', 'v_1'), ('t_2', 'v_4'),
('c_1', 't_1'), ('c_1', 'v_3'), ('t_2', 'v_5'), ('t_1', 'v_3'), ('t_2', 'v_2')}},
{'DOMAIN': {'v_1', 'v_4', 't_2', 't_1', 'c_1', 'v_2', 'v_5', 'v_3'}, 'City': {'c_1'}, 'Town': {'t_2', 't_1'},
'Village': | |
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' ')
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError(repr(o) + " is not JSON serializable")
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.1.3'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
'OrderedDict',
]
__author__ = '<NAME> <<EMAIL>>'
from decimal import Decimal
from decoder import JSONDecoder, JSONDecodeError
from encoder import JSONEncoder
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
import ordered_dict
return ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
def _import_c_make_encoder():
try:
from simplejson._speedups import make_encoder
return make_encoder
except ImportError:
return None
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
use_decimal=False,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=False, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If *indent* is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``False``) then decimal.Decimal
will be natively serialized to JSON with full precision.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not use_decimal
and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, use_decimal=use_decimal, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=False, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``False``) then decimal.Decimal
will be natively serialized to JSON with full precision.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not use_decimal
and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
use_decimal=use_decimal, **kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None,
object_pairs_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide | |
import ast
import re
import pymel.core as pm
import maya.cmds as cmds
import maya.OpenMaya as api
import utils
__all__ = [
'decodeMetaData',
'decodeMetaDataValue',
'encodeMetaData',
'encodeMetaDataValue',
'findMetaNodes',
'getMetaClasses',
'getMetaData',
'hasMetaClass',
'isMetaNode',
'removeMetaData',
'setAllMetaData',
'setMetaData',
'updateMetaData',
]
METACLASS_ATTR_PREFIX = 'pyMetaClass_'
METADATA_ATTR = 'pyMetaData'
VALID_CLASSATTR = re.compile(r'^[_a-z0-9]*$', re.IGNORECASE)
def _getMetaDataPlug(mfnnode):
"""
Return the MPlug for the meta data attribute on a node
Args:
mfnnode: A MFnDependencyNode referencing the target node.
"""
try:
return mfnnode.findPlug(METADATA_ATTR)
except RuntimeError:
pass
def _getMetaClassPlug(mfnnode, className):
"""
Return the MPlug for a meta class attribute on a node
Args:
mfnnode: A MFnDependencyNode referencing the target node.
className: A string name of the meta class type.
"""
attrName = METACLASS_ATTR_PREFIX + className
try:
return mfnnode.findPlug(attrName)
except RuntimeError:
pass
def _getOrCreateMetaDataPlug(mfnnode, undoable=True):
"""
Return the MPlug for the meta data attribute on a node,
adding the attribute if it does not already exist.
Args:
mfnnode (MFnDependencyNode): The MFnDependencyNode of a node
undoable (bool): When True, the operation will be undoable
"""
try:
plug = mfnnode.findPlug(METADATA_ATTR)
except:
if undoable:
cmds.addAttr(mfnnode.name(), ln=METADATA_ATTR, dt='string')
else:
mfnattr = api.MFnTypedAttribute()
attr = mfnattr.create(
METADATA_ATTR, METADATA_ATTR, api.MFnData.kString)
mfnnode.addAttribute(attr)
plug = mfnnode.findPlug(METADATA_ATTR)
return plug
def _addMetaClassAttr(mfnnode, className, undoable=True):
"""
Add a meta class attribute to a node.
Does nothing if the attribute already exists.
Args:
mfnnode (MFnDependencyNode): The MFnDependencyNode of a node
className (str): The meta data class name
undoable (bool): When True, the operation will be undoable
"""
if not VALID_CLASSATTR.match(className):
raise ValueError('Invalid meta class name: ' + className)
classAttr = METACLASS_ATTR_PREFIX + className
try:
mfnnode.attribute(classAttr)
except RuntimeError:
if undoable:
cmds.addAttr(mfnnode.name(), ln=classAttr, at='short')
else:
mfnattr = api.MFnNumericAttribute()
attr = mfnattr.create(
classAttr, classAttr, api.MFnNumericData.kShort)
mfnnode.addAttribute(attr)
def _removeMetaClassAttr(mfnnode, className, undoable=True):
"""
Remove a meta class attribute from a node.
Does nothing if the attribute does not exist.
Args:
mfnnode (MFnDependencyNode): The api MFnDependencyNode of a node
undoable (bool): When True, the operation will be undoable
Returns:
True if the attr was removed or didn't exist,
False if it couldn't be removed.
"""
classPlug = _getMetaClassPlug(mfnnode, className)
if not classPlug:
return True
if classPlug.isLocked():
return False
else:
if undoable:
cmds.deleteAttr(classPlug.name())
else:
mfnnode.removeAttribute(classPlug.attribute())
return True
def encodeMetaData(data):
"""
Return the given meta data encoded into a string
Args:
data: A python dictionary-like object representing
the data to serialize.
"""
return repr(encodeMetaDataValue(data))
def encodeMetaDataValue(value):
"""
Encode and return a meta data value. Handles special
data types like Maya nodes.
Args:
value: Any python value to be encoded
"""
if isinstance(value, dict):
result = {}
for k, v in value.iteritems():
result[k] = encodeMetaDataValue(v)
return result
elif isinstance(value, (list, tuple)):
return value.__class__([encodeMetaDataValue(v) for v in value])
elif isinstance(value, pm.nt.DependNode):
return utils.getUUID(value)
else:
return value
def decodeMetaData(data, refNode=None):
"""
Parse the given meta data and return it as a valid
python object.
Args:
data: A string representing encoded meta data.
"""
if not data:
return {}
# convert from string to python object
try:
data = ast.literal_eval(data.replace('\r', ''))
except Exception as e:
raise ValueError("Failed to decode meta data: {0}".format(e))
return decodeMetaDataValue(data, refNode)
def decodeMetaDataValue(value, refNode):
"""
Parse string formatted meta data and return the
resulting python object.
Args:
data: A str representing encoded meta data
"""
if isinstance(value, dict):
result = {}
for k, v in value.iteritems():
result[k] = decodeMetaDataValue(v, refNode)
return result
elif isinstance(value, (list, tuple)):
return value.__class__([decodeMetaDataValue(v, refNode) for v in value])
elif utils.isUUID(value):
return utils.findNodeByUUID(value, refNode)
else:
return value
def isMetaNode(node):
"""
Return True if the given node has any meta data
Args:
node: A PyMel node or string node name
"""
return utils.hasAttr(node, METADATA_ATTR)
def hasMetaClass(node, className):
"""
Return True if the given node has data for the given meta class type
Args:
node: A PyMel node or string node name
className: A string name of the meta class type.
If given, the node must be of this class type.
"""
return utils.hasAttr(node, METACLASS_ATTR_PREFIX + className)
def findMetaNodes(className=None, asPyNodes=True):
"""
Return a list of all meta nodes of the given class type.
If no class is given, all nodes with meta data are returned.
Args:
className: A string name of the meta class type.
asPyNodes: A bool, when True, returns a list of PyNodes,
otherwise returns a list of MObjects
"""
if className is not None:
plugName = METACLASS_ATTR_PREFIX + className
else:
plugName = METADATA_ATTR
objs = utils.getMObjectsByPlug(plugName)
if asPyNodes:
return [pm.PyNode(o) for o in objs]
else:
return objs
def setMetaData(node, className, data, undoable=True, replace=False):
"""
Set the meta data for the a meta class type on a node.
The className must be a valid attribute name.
Args:
node (PyNode or str): The node on which to set data
className (str): The data's meta class type name
data (dict): The data to serialize and store on the node
undoable (bool): When True, the operation will be undoable
replace (bool): When True, will replace all data on the node
with the new meta data. This uses setAllMetaData and can
be much faster with large data sets.
"""
if replace:
setAllMetaData(node, {className: data}, undoable)
return
mfnnode = utils.getMFnDependencyNode(node)
plug = _getOrCreateMetaDataPlug(mfnnode, undoable)
_addMetaClassAttr(mfnnode, className, undoable)
# update meta data
refNode = None
if cmds.referenceQuery(str(node), isNodeReferenced=True):
refNode = cmds.referenceQuery(str(node), rfn=True)
fullData = decodeMetaData(plug.asString(), refNode)
fullData[className] = data
newValue = encodeMetaData(fullData)
if undoable:
cmds.setAttr(plug.name(), newValue, type='string')
else:
plug.setString(newValue)
def setAllMetaData(node, data, undoable=True):
"""
Set all meta data on a node. This is faster because the
existing data on the node is not retrieved first and then
modified. The data must be first indexed by strings that
are valid meta class names, otherwise errors may occur
when retrieving it later.
New meta class attributes will be added automatically,
but existing meta class attributes will not be removed.
If old meta class attributes on this node will no longer
be applicable, they should be removed with removeAllData
first.
Args:
node (PyNode or str): The node on which to set data
data (dict): The data to serialize and store on the node
undoable (bool): When True, the operation will be undoable
"""
mfnnode = utils.getMFnDependencyNode(node)
plug = _getOrCreateMetaDataPlug(mfnnode, undoable)
# add class attributes
if data:
for className in data.keys():
_addMetaClassAttr(mfnnode, className, undoable)
# set meta data
newValue = encodeMetaData(data)
if undoable:
cmds.setAttr(plug.name(), newValue, type='string')
else:
plug.setString(newValue)
def getMetaData(node, className=None):
"""
Return meta data from a node. If `className` is given,
return only meta data for that meta class type.
Args:
node: A PyMel node or string node name
className: A string name of the meta class type.
Returns:
A dict or python object representing the stored meta data
"""
mfnnode = utils.getMFnDependencyNode(node)
try:
plug = mfnnode.findPlug(METADATA_ATTR)
datastr = plug.asString()
except RuntimeError:
return
else:
refNode = None
if cmds.referenceQuery(str(node), isNodeReferenced=True):
refNode = cmds.referenceQuery(str(node), rfn=True)
data = decodeMetaData(datastr, refNode)
if className is not None:
return data.get(className, None)
else:
return data
def updateMetaData(node, className, data):
"""
Updates existing meta data on a node for a meta class type.
Only supports dict-type meta data
Args:
node: A PyMel node or string node name
className: A string name of the meta class type
data: A dict object containing meta data to add to the node
"""
fullData = getMetaData(node, className)
if not isinstance(fullData, dict):
raise ValueError(
"meta data for node '{0}' is not "
"a dict and cannot be updated".format(node))
fullData.update(data)
setMetaData(node, className, fullData)
def removeMetaData(node, className=None, undoable=True):
"""
Remove meta data from a node. If no `className` is given
then all meta data is removed.
Args:
node: A PyMel node or string node name
className: A string name of the meta class type.
undoable: A bool, when True the change will be undoable
Returns:
True if node is fully clean of relevant meta data.
"""
if not isMetaNode(node):
return True
mfnnode = utils.getMFnDependencyNode(node)
# this may become true if we find there are no
# classes left after removing one
removeAllData = False
if className is not None:
# remove meta data for the given class only
# make sure data attribute is unlocked
dataPlug = _getMetaDataPlug(mfnnode)
if dataPlug and dataPlug.isLocked():
return False
# attempt to remove class attribute
| |
7, 8], num_classes=n_classes,
anchors=[12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401],
num_anchors=9, stride=32)
def forward(self, input1, input2, input3):
x1 = self.conv1(input1)
x2 = self.conv2(x1)
x3 = self.conv3(input1)
# R -1 -16
x3 = torch.cat([x3, input2], dim=1)
x4 = self.conv4(x3)
x5 = self.conv5(x4)
x6 = self.conv6(x5)
x7 = self.conv7(x6)
x8 = self.conv8(x7)
x9 = self.conv9(x8)
x10 = self.conv10(x9)
# R -4
x11 = self.conv11(x8)
# R -1 -37
x11 = torch.cat([x11, input3], dim=1)
x12 = self.conv12(x11)
x13 = self.conv13(x12)
x14 = self.conv14(x13)
x15 = self.conv15(x14)
x16 = self.conv16(x15)
x17 = self.conv17(x16)
x18 = self.conv18(x17)
if self.inference:
y1 = self.yolo1(x2)
y2 = self.yolo2(x10)
y3 = self.yolo3(x18)
return get_region_boxes([y1, y2, y3])
else:
return [x2, x10, x18]
class Yolov4(nn.Module):
def __init__(self, yolov4conv137weight=None, n_classes=80, inference=False):
super().__init__()
output_ch = (4 + 1 + n_classes) * 3
# backbone
self.down1 = DownSample1()
self.down2 = DownSample2()
self.down3 = DownSample3()
self.down4 = DownSample4()
self.down5 = DownSample5()
# neck
self.neek = Neck(inference)
# yolov4conv137
if yolov4conv137weight:
_model = nn.Sequential(self.down1, self.down2, self.down3, self.down4, self.down5, self.neek)
pretrained_dict = torch.load(yolov4conv137weight)
model_dict = _model.state_dict()
# 1. filter out unnecessary keys
pretrained_dict = {k1: v for (k, v), k1 in zip(pretrained_dict.items(), model_dict)}
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
_model.load_state_dict(model_dict)
# head
self.head = Yolov4Head(output_ch, n_classes, inference)
def forward(self, input):
d1 = self.down1(input)
d2 = self.down2(d1)
d3 = self.down3(d2)
d4 = self.down4(d3)
d5 = self.down5(d4)
x20, x13, x6 = self.neek(d5, d4, d3)
output = self.head(x20, x13, x6)
return output
def custom_bbox(gt_coords, img, imgname):
cbbox_coords = []
for k in range(len(gt_coords)):
if gt_coords[k][0] == imgname:
print(gt_coords[k][0])
box = [float(gt_coords[k][2]), float(gt_coords[k][3]), 50, 80]
box = torch.tensor(box)
bbox = box_center_to_corner(box)
x1 = int(bbox[0].item())
y1 = int(bbox[1].item())
x2 = int(bbox[2].item())
y2 = int(bbox[3].item())
coords = [x1, y1, x2, y2]
cbbox_coords.append(coords)
img = cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
return img, cbbox_coords
# def get_iou(a, b, epsilon=1e-5):
# """ Given two boxes `a` and `b` defined as a list of four numbers:
# [x1,y1,x2,y2]
# where:
# x1,y1 represent the upper left corner
# x2,y2 represent the lower right corner
# It returns the Intersect of Union score for these two boxes.
# Args:
# a: (list of 4 numbers) [x1,y1,x2,y2]
# b: (list of 4 numbers) [x1,y1,x2,y2]
# epsilon: (float) Small value to prevent division by zero
# Returns:
# (float) The Intersect of Union score.
# """
# iou_list = []
# # iou = 0.0
# bj = b[0]
# n_iou = []
# for i in range(len(b)):
# iou_l=[]
# for j in range(len(a)):
# # bj = b[j]
# # COORDINATES OF THE INTERSECTION BOX
# x1 = max(a[j][0], b[i][0])
# y1 = max(a[j][1], b[i][1])
# x2 = min(a[j][2], b[i][2])
# y2 = min(a[j][3], b[i][3])
# # AREA OF OVERLAP - Area where the boxes intersect
# width = (x2 - x1)
# height = (y2 - y1)
# # print(width)
# # print(height)
# # handle case where there is NO overlap
# if (width<0) or (height <0):
# iou = 0.0
# iou_l.append(iou)
# break
# area_overlap = width * height
# # COMBINED AREA
# area_a = (a[j][2] - a[j][0]) * (a[j][3] - a[j][1])
# area_b = (b[i][2] - b[i][0]) * (b[i][3] - b[i][1])
# area_combined = area_a + area_b - area_overlap
# # RATIO OF AREA OF OVERLAP OVER COMBINED AREA
# iou_l.append(area_overlap / (area_combined+epsilon))
# max_iou = max(iou_l)
# # print(max_iou)
# iou_list.append([b[i], round(max_iou, 3)])
# return iou_list
# def batch_iou(a, b, epsilon=1e-5):
# """ Given two arrays `a` and `b` where each row contains a bounding
# box defined as a list of four numbers:
# [x1,y1,x2,y2]
# where:
# x1,y1 represent the upper left corner
# x2,y2 represent the lower right corner
# It returns the Intersect of Union scores for each corresponding
# pair of boxes.
# Args:
# a: (numpy array) each row containing [x1,y1,x2,y2] coordinates
# b: (numpy array) each row containing [x1,y1,x2,y2] coordinates
# epsilon: (float) Small value to prevent division by zero
# Returns:
# (numpy array) The Intersect of Union scores for each pair of bounding
# boxes.
# """
# # print(a[:, 0])
# # # print("b")
# # # print(b)
# # # COORDINATES OF THE INTERSECTION BOXES
# x1 = np.array([a[:, 0], b[:, 0]]).max(axis=0)
# y1 = np.array([a[:, 1], b[:, 1]]).max(axis=0)
# x2 = np.array([a[:, 2], b[:, 2]]).min(axis=0)
# y2 = np.array([a[:, 3], b[:, 3]]).min(axis=0)
# # AREAS OF OVERLAP - Area where the boxes intersect
# width = (x2 - x1)
# height = (y2 - y1)
# # handle case where there is NO overlap
# width[width < 0] = 0
# height[height < 0] = 0
# area_overlap = width * height
# # COMBINED AREAS
# area_a = (a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1])
# area_b = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])
# area_combined = area_a + area_b - area_overlap
# # RATIO OF AREA OF OVERLAP OVER COMBINED AREA
# iou = area_overlap / (area_combined + epsilon)
# return iou
def bbox_iou(boxA, boxB):
# https://www.pyimagesearch.com/2016/11/07/intersection-over-union-iou-for-object-detection/
# ^^ corrected.
# Determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
interW = xB - xA + 1
interH = yB - yA + 1
# Correction: reject non-overlapping boxes
if interW <=0 or interH <=0 :
return -1.0
interArea = interW * interH
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
iou = interArea / float(boxAArea + boxBArea - interArea)
# print(iou)
return iou
def match_bboxes(bbox_gt, bbox_pred, IOU_THRESH=0.0):
'''
Given sets of true and predicted bounding-boxes,
determine the best possible match.
Parameters
----------
bbox_gt, bbox_pred : N1x4 and N2x4 np array of bboxes [x1,y1,x2,y2].
The number of bboxes, N1 and N2, need not be the same.
Returns
-------
(idxs_true, idxs_pred, ious, labels)
idxs_true, idxs_pred : indices into gt and pred for matches
ious : corresponding IOU value of each match
labels: vector of 0/1 values for the list of detections
'''
n_true = bbox_gt.shape[0]
n_pred = bbox_pred.shape[0]
MAX_DIST = 1.0
MIN_IOU = 0.0
# NUM_GT x NUM_PRED
iou_matrix = np.zeros((n_true, n_pred))
for i in range(n_true):
for j in range(n_pred):
iou_matrix[i, j] = bbox_iou(bbox_gt[i,:], bbox_pred[j,:])
if n_pred > n_true:
# there are more predictions than ground-truth - add dummy rows
diff = n_pred - n_true
iou_matrix = np.concatenate((iou_matrix,
np.full((diff, n_pred), MIN_IOU)),
axis=0)
if n_true > n_pred:
# more ground-truth than predictions - add dummy columns
diff = n_true - n_pred
iou_matrix = np.concatenate((iou_matrix,
np.full((n_true, diff), MIN_IOU)),
axis=1)
# call the Hungarian matching
idxs_true, idxs_pred = scipy.optimize.linear_sum_assignment(1 - iou_matrix)
if (not idxs_true.size) or (not idxs_pred.size):
ious = np.array([])
else:
ious = iou_matrix[idxs_true, idxs_pred]
# remove dummy assignments
sel_pred = idxs_pred<n_pred
idx_pred_actual = idxs_pred[sel_pred]
idx_gt_actual = idxs_true[sel_pred]
ious_actual = iou_matrix[idx_gt_actual, idx_pred_actual]
sel_valid = (ious_actual > IOU_THRESH)
label = sel_valid.astype(int)
return idx_gt_actual[sel_valid], idx_pred_actual[sel_valid], ious_actual[sel_valid], label
def findClosest(time, camera_time_list):
val = min(camera_time_list, key=lambda x: abs(x - time))
return camera_time_list.index(val)
# def extract_frames(path,file_name, model, class_names, width, height, savename, gt, device):
# detections=0
# gt_actual=0
# #===== process the index files of camera 1 ======#
# with open('/home/dissana8/LAB/Visor/cam1/index.dmp') as f:
# content = f.readlines()
# cam_content = [x.strip() for x in content]
# c1_frames = []
# c1_times = []
# for line in cam_content:
# s = line.split(" ")
# frame = s[0]
# time = float(s[1]+'.'+s[2])
# c1_frames.append(frame)
# c1_times.append(time)
# with open('/home/dissana8/LAB/Visor/cam2/index.dmp') as f:
# content = f.readlines()
# cam_content = [x.strip() for x in content]
# c2_frames = []
# c2_times = []
# for line in cam_content:
# s = line.split(" ")
# frame = s[0]
# time = float(s[1]+'.'+s[2])
# c2_frames.append(frame)
# c2_times.append(time)
# # ===== process the index files of camera 3 ======#
# with open('/home/dissana8/LAB/Visor/cam3/index.dmp') as f:
# content = f.readlines()
# cam_content = [x.strip() for x in content]
# c3_frames = []
# c3_times = []
# for line in cam_content:
# s = line.split(" ")
# frame = s[0]
# time = float(s[1] + '.' + s[2])
# c3_frames.append(frame)
# c3_times.append(time)
# # ===== process the index files of camera 4 ======#
# with open('/home/dissana8/LAB/Visor/cam4/index.dmp') as f:
# content = f.readlines()
# cam_content = [x.strip() for x in content]
# c4_frames = []
# c4_times = []
# for | |
pixels referenced from 0
x, y = x+1, y+1
return (x, y)
def pixtosystem(self, idxs, system=None, coords='data'):
if self.coordsys == 'raw':
raise WCSError("No usable WCS")
if system == None:
system = 'j2000'
# Get a coordinates object based on ra/dec wcs transform
ra_deg, dec_deg = self.pixtoradec(idxs, coords=coords)
self.logger.debug("ra, dec = %f, %f" % (ra_deg, dec_deg))
# convert to alternate coord
try:
fromsys = self.coordsys.upper()
tosys = system.upper()
if fromsys == 'B1950':
equinox = 1950.0
else:
equinox = 2000.0
lon_deg, lat_deg = astCoords.convertCoords(fromsys, tosys,
ra_deg, dec_deg,
equinox)
except Exception as e:
raise WCSError("Error converting between coordinate systems '%s' and '%s': %s" % (
fromsys, tosys, str(e)))
return (lon_deg, lat_deg)
class KapteynWCS(BaseWCS):
"""A WCS interface for kapteyn.wcs.Projection
You need to install python module 'kapteyn'
http://www.astro.rug.nl/software/kapteyn/
if you want to use this version.
"""
def __init__(self, logger):
super(KapteynWCS, self).__init__()
if not have_kapteyn:
raise WCSError("Please install package 'kapteyn' first!")
self.logger = logger
self.header = None
self.wcs = None
self.coordsys = 'raw'
self.kind = 'kapteyn/WCSLIB'
self._skyout = "equatorial icrs J2000.0"
# see: https://github.com/astropy/coordinates-benchmark/blob/master/kapteyn/convert.py
self.conv_d = dict(fk5='fk5', fk4='fk4,J2000_OBS', icrs='icrs',
galactic='galactic', ecliptic='ecliptic,J2000')
def load_header(self, header, fobj=None):
# For kapteyn, header just needs to be duck-typed like a dict
self.header = {}
# Seems pyfits header objects are not perfectly duck-typed as dicts
#self.header.update(header)
for key, value in header.items():
self.header[key] = value
self.fix_bad_headers()
try:
self.wcs = kapwcs.Projection(self.header,
skyout=self._skyout)
self.coordsys = choose_coord_system(self.header)
except Exception, e:
self.logger.error("Error making WCS object: %s" % (str(e)))
self.wcs = None
def pixtoradec(self, idxs, coords='data'):
# Kapteyn's WCS needs pixels referenced from 1
if coords == 'data':
idxs = tuple(map(lambda x: x+1, idxs))
else:
idxs = tuple(idxs)
#print "indexes=%s" % (str(idxs))
try:
res = self.wcs.toworld(idxs)
ra_deg, dec_deg = res[0], res[1]
except Exception, e:
self.logger.error("Error calculating pixtoradec: %s" % (str(e)))
raise WCSError(e)
return ra_deg, dec_deg
def radectopix(self, ra_deg, dec_deg, coords='data', naxispath=None):
args = [ra_deg, dec_deg]
if naxispath:
args += [0] * len(naxispath)
args = tuple(args)
try:
pix = self.wcs.topixel(args)
except Exception, e:
print ("Error calculating radectopix: %s" % (str(e)))
raise WCSError(e)
if coords == 'data':
# Kapteyn's WCS returns pixels referenced from 1
pix = map(lambda x: x-1, pix)
x, y = pix[0], pix[1]
return (x, y)
def pixtosystem(self, idxs, system=None, coords='data'):
if self.coordsys == 'raw':
raise WCSError("No usable WCS")
if system == None:
system = 'icrs'
# Get a coordinates object based on ra/dec wcs transform
ra_deg, dec_deg = self.pixtoradec(idxs, coords=coords)
self.logger.debug("ra, dec = %f, %f" % (ra_deg, dec_deg))
# convert to alternate coord
spec = self.conv_d[system]
tran = kapwcs.Transformation(self._skyout, spec)
lon_deg, lat_deg = tran((ra_deg, dec_deg))
return lon_deg, lat_deg
class StarlinkWCS(BaseWCS):
"""A WCS interface for Starlink
You need to install python module 'starlink-pyast'
http://www.astro.rug.nl/software/kapteyn/
if you want to use this version.
"""
def __init__(self, logger):
super(StarlinkWCS, self).__init__()
if not have_starlink:
raise WCSError("Please install package 'starlink-pyast' first!")
self.logger = logger
self.header = None
self.wcs = None
self.coordsys = 'raw'
self.kind = 'starlink'
def load_header(self, header, fobj=None):
# For starlink, header is pulled in via pyfits adapter
## hdu = pyfits.PrimaryHDU()
## self.header = hdu.header
## for key, value in header.items():
## self.header[key] = value
self.header = {}
# Seems pyfits header objects are not perfectly duck-typed
# as dicts so we can't use update()
for key, value in header.items():
self.header[key] = value
self.fix_bad_headers()
source = []
for key, value in header.items():
source.append("%-8.8s= %-70.70s" % (key, repr(value)))
# following https://gist.github.com/dsberry/4171277 to get a
# usable WCS in Ast
try:
# read in the header and create the default WCS transform
#adapter = Atl.PyFITSAdapter(hdu)
#fitschan = Ast.FitsChan(adapter)
fitschan = Ast.FitsChan(source)
self.wcs = fitschan.read()
# self.wcs is a FrameSet, with a Mapping
#self.wcs.Report = True
self.coordsys = choose_coord_system(self.header)
# define a transform from this destination frame to icrs/j2000
refframe = self.wcs.getframe(2)
toframe = Ast.SkyFrame("System=ICRS, Equinox=J2000")
self.icrs_trans = refframe.convert(toframe)
except Exception, e:
self.logger.error("Error making WCS object: %s" % (str(e)))
self.wcs = None
def pixtoradec(self, idxs, coords='data'):
# Starlink's WCS needs pixels referenced from 1
if coords == 'data':
idxs = numpy.array(map(lambda x: x+1, idxs))
else:
idxs = numpy.array(idxs)
try:
# pixel to sky coords (in the WCS specified transform)
xs, ys = [idxs[0]], [idxs[1]]
res = self.wcs.tran([ xs, ys ], 1)
ra_rad, dec_rad = res[0][0], res[1][0]
# whatever sky coords to icrs coords
res = self.icrs_trans.tran([[ra_rad], [dec_rad]], 1)
ra_rad, dec_rad = res[0][0], res[1][0]
ra_deg, dec_deg = math.degrees(ra_rad), math.degrees(dec_rad)
#print ra_deg, dec_deg
except Exception, e:
self.logger.error("Error calculating pixtoradec: %s" % (str(e)))
raise WCSError(e)
return ra_deg, dec_deg
def radectopix(self, ra_deg, dec_deg, coords='data', naxispath=None):
try:
# sky coords to pixel (in the WCS specified transform)
ra_rad, dec_rad = math.radians(ra_deg), math.radians(dec_deg)
xs, ys = [ra_rad], [dec_rad]
# 0 as second arg -> inverse transform
res = self.wcs.tran([ xs, ys ], 0)
x, y = res[0][0], res[1][0]
except Exception, e:
print ("Error calculating radectopix: %s" % (str(e)))
raise WCSError(e)
if coords == 'data':
# Starlink's WCS returns pixels referenced from 1
x, y = x-1, y-1
return (x, y)
def pixtosystem(self, idxs, system=None, coords='data'):
if self.coordsys == 'raw':
raise WCSError("No usable WCS")
if system == None:
system = 'icrs'
# define a transform from reference (icrs/j2000) to user's end choice
refframe = self.icrs_trans.getframe(2)
toframe = Ast.SkyFrame("System=%s, Epoch=2000.0" % (system.upper()))
end_trans = refframe.convert(toframe)
# Get a coordinates object based on ra/dec wcs transform
ra_deg, dec_deg = self.pixtoradec(idxs, coords=coords)
self.logger.debug("ra, dec = %f, %f" % (ra_deg, dec_deg))
# convert to alternate coord
ra_rad, dec_rad = math.radians(ra_deg), math.radians(dec_deg)
res = end_trans.tran([[ra_rad], [dec_rad]], 1)
lon_rad, lat_rad = res[0][0], res[1][0]
lon_deg, lat_deg = math.degrees(lon_rad), math.degrees(lat_rad)
return lon_deg, lat_deg
class BareBonesWCS(BaseWCS):
"""A very basic WCS. Assumes J2000, units in degrees, projection TAN.
***** NOTE *****:
We strongly recommend that you install one of the 3rd party python
WCS modules referred to at the top of this module, all of which are
much more capable than BareBonesWCS.
****************
"""
def __init__(self, logger):
super(BareBonesWCS, self).__init__()
self.logger = logger
self.header = {}
self.coordsys = 'raw'
self.kind = 'barebones'
def load_header(self, header, fobj=None):
self.header = {}
for key, value in header.items():
self.header[key] = value
self.fix_bad_headers()
self.coordsys = choose_coord_system(self.header)
# WCS calculations
def get_reference_pixel(self):
x = float(self.get_keyword('CRPIX1'))
y = float(self.get_keyword('CRPIX2'))
return x, y
def get_physical_reference_pixel(self):
xv = float(self.get_keyword('CRVAL1'))
yv = float(self.get_keyword('CRVAL2'))
assert 0.0 <= xv < 360.0, \
WCSError("CRVAL1 out of range: %f" % (xv))
assert -90.0 <= yv <= 90.0, \
WCSError("CRVAL2 out of range: %f" % (yv))
return xv, yv
def get_pixel_coordinates(self):
try:
cd11 = float(self.get_keyword('CD1_1'))
cd12 = float(self.get_keyword('CD1_2'))
cd21 = float(self.get_keyword('CD2_1'))
cd22 = float(self.get_keyword('CD2_2'))
except Exception as e:
cdelt1 = float(self.get_keyword('CDELT1'))
cdelt2 = float(self.get_keyword('CDELT2'))
try:
cd11 = float(self.get_keyword('PC1_1')) * cdelt1
cd12 = float(self.get_keyword('PC1_2')) * cdelt1
cd21 = float(self.get_keyword('PC2_1')) * cdelt2
cd22 = float(self.get_keyword('PC2_2')) * cdelt2
except KeyError:
cd11 = float(self.get_keyword('PC001001')) * cdelt1
cd12 = float(self.get_keyword('PC001002')) * cdelt1
cd21 = float(self.get_keyword('PC002001')) * cdelt2
cd22 = float(self.get_keyword('PC002002')) * cdelt2
return (cd11, cd12, cd21, cd22)
def pixtoradec(self, idxs, coords='data'):
"""Convert a (x, y) pixel coordinate on the image to a (ra, dec)
coordinate in space.
Parameter (coords):
- if 'data' then x, y coordinates are interpreted as 0-based
- otherwise coordinates are interpreted as 1-based (traditional FITS)
"""
x, y = idxs[:2]
# account for DATA->FITS coordinate space
if coords == 'data':
x, y = x + 1, y + 1
crpix1, crpix2 = self.get_reference_pixel()
crval1, crval2 = self.get_physical_reference_pixel()
cd11, cd12, cd21, cd22 = self.get_pixel_coordinates()
ra_deg = (cd11 * (x - crpix1) + cd12 *
(y - crpix2)) / math.cos(math.radians(crval2)) + crval1
dec_deg = cd21 * (x - crpix1) + cd22 * (y - crpix2) + crval2
return ra_deg, dec_deg
def radectopix(self, ra_deg, dec_deg, coords='data', naxispath=None):
"""Convert a (ra_deg, dec_deg) space coordinates to (x, y) pixel
coordinates on the image. ra and dec are expected as floats in
degrees.
Parameter (coords):
- if 'data' then x, y coordinates are returned as 0-based
- otherwise coordinates are returned as | |
# -*- coding: utf-8 -*-
#
# Authors: Swolf <<EMAIL>>
# Date: 2021/1/07
# License: MIT License
"""
Common Spatial Patterns and his happy little buddies!
"""
from copy import deepcopy
from typing import Union, Optional, List, Dict, Tuple
from functools import partial
import numpy as np
from numpy import ndarray
from scipy.linalg import eigh, pinv, solve
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import GridSearchCV, StratifiedKFold, ShuffleSplit
from sklearn.feature_selection import SelectKBest, mutual_info_classif
from sklearn.svm import SVC
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier
from sklearn.pipeline import make_pipeline
from .base import robust_pattern, FilterBank
from ..utils.covariance import nearestPD, covariances
def csp_kernel(X: ndarray, y: ndarray) -> Tuple[ndarray, ndarray, ndarray]:
"""The kernel in CSP algorithm based on paper [1]_.
Parameters
----------
X: ndarray
eeg data, shape (n_trials, n_channels, n_samples).
y: ndarray
labels of X, shape (n_trials,).
Returns
-------
W: ndarray
Spatial filters, shape (n_channels, n_filters).
D: ndarray
Eigenvalues of spatial filters, shape (n_filters,).
A: ndarray
Spatial patterns, shape (n_channels, n_patterns).
References
----------
.. [1] <NAME>, <NAME>, <NAME>. Optimal spatial filtering of single trial EEG during imagined hand movement[J]. IEEE transactions on rehabilitation engineering, 2000, 8(4): 441-446.
"""
X, y = np.copy(X), np.copy(y)
labels = np.unique(y)
X = X - np.mean(X, axis=-1, keepdims=True)
if len(labels) != 2:
raise ValueError("the current kernel is for 2-class problem.")
C1 = covariances(X[y==labels[0]])
C2 = covariances(X[y==labels[1]])
# # trace normalization
# # this operation equals to trial normalization
# C1 = C1 / np.trace(C1, axis1=-1, axis2=-2)[:, np.newaxis, np.newaxis]
# C2 = C2 / np.trace(C2, axis1=-1, axis2=-2)[:, np.newaxis, np.newaxis]
C1 = np.mean(C1, axis=0)
C2 = np.mean(C2, axis=0)
Cc = C1 + C2
# check positive-definiteness
Cc = nearestPD(Cc)
# generalized eigenvalue problem
D, W = eigh(C1, Cc)
ix = np.argsort(D)[::-1]
W = W[:, ix]
D = D[ix]
A = robust_pattern(W, C1, W.T@C1@W)
return W, D, A
def csp_feature(W: ndarray, X: ndarray,
n_components: int = 2) -> ndarray:
"""Return CSP features in paper [1]_.
Parameters
----------
W : ndarray
spatial filters from csp_kernel, shape (n_channels, n_filters)
X : ndarray
eeg data, shape (n_trials, n_channels, n_samples)
n_components : int, optional
the first k components to use, usually even number, by default 2
Returns
-------
ndarray
features of shape (n_trials, n_features)
Raises
------
ValueError
n_components should less than the number of channels
References
----------
.. [1] <NAME>, <NAME>, <NAME>. Optimal spatial filtering of single trial EEG during imagined hand movement[J]. IEEE transactions on rehabilitation engineering, 2000, 8(4): 441-446.
"""
W, X = np.copy(W), np.copy(X)
max_components = W.shape[1]
if n_components > max_components:
raise ValueError("n_components should less than the number of channels")
eps = np.finfo(X.dtype).eps
X = X - np.mean(X, axis=-1, keepdims=True)
# normalized variance
features = np.mean(np.square(np.matmul(W[:, :n_components].T, X)), axis=-1)
features = features / (np.sum(features, axis=-1, keepdims=True) + eps)
# log-transformation
features = np.log(np.clip(features, eps, None))
return features
def _rjd(X, eps=1e-9, n_iter_max=1000):
"""Approximate joint diagonalization based on jacobi angle.
Parameters
----------
X : ndarray
A set of covariance matrices to diagonalize, shape (n_trials, n_channels, n_channels).
eps : float, optional
Tolerance for stopping criterion (default 1e-8).
n_iter_max : int, optional
The maximum number of iteration to reach convergence (default 1000).
Returns
-------
V : ndarray
The diagonalizer, shape (n_channels, n_filters), usually n_filters == n_channels.
D : ndarray
The set of quasi diagonal matrices, shape (n_trials, n_channels, n_channels).
Notes
-----
This is a direct implementation of the Cardoso AJD algorithm [1]_ used in
JADE. The code is a translation of the matlab code provided in the author
website.
References
----------
.. [1] Cardoso, Jean-Francois, and <NAME>. Jacobi angles for simultaneous diagonalization. SIAM journal on matrix analysis and applications 17.1 (1996): 161-164.
"""
# reshape input matrix
A = np.concatenate(X, 0).T
# init variables
m, nm = A.shape
V = np.eye(m)
encore = True
k = 0
while encore:
encore = False
k += 1
if k > n_iter_max:
break
for p in range(m - 1):
for q in range(p + 1, m):
Ip = np.arange(p, nm, m)
Iq = np.arange(q, nm, m)
# computation of Givens angle
g = np.array([A[p, Ip] - A[q, Iq], A[p, Iq] + A[q, Ip]])
gg = np.dot(g, g.T)
ton = gg[0, 0] - gg[1, 1]
toff = gg[0, 1] + gg[1, 0]
theta = 0.5 * np.arctan2(toff, ton +
np.sqrt(ton * ton + toff * toff))
c = np.cos(theta)
s = np.sin(theta)
encore = encore | (np.abs(s) > eps)
if (np.abs(s) > eps):
tmp = A[:, Ip].copy()
A[:, Ip] = c * A[:, Ip] + s * A[:, Iq]
A[:, Iq] = c * A[:, Iq] - s * tmp
tmp = A[p, :].copy()
A[p, :] = c * A[p, :] + s * A[q, :]
A[q, :] = c * A[q, :] - s * tmp
tmp = V[:, p].copy()
V[:, p] = c * V[:, p] + s * V[:, q]
V[:, q] = c * V[:, q] - s * tmp
D = np.reshape(A, (m, int(nm / m), m)).transpose(1, 0, 2)
return V, D
def _ajd_pham(X, eps=1e-9, n_iter_max=1000):
"""Approximate joint diagonalization based on pham's algorithm.
Parameters
----------
X : ndarray
A set of covariance matrices to diagonalize, shape (n_trials, n_channels, n_channels).
eps : float, optional
Tolerance for stoping criterion (default 1e-6).
n_iter_max : int, optional
The maximum number of iteration to reach convergence (default 1000).
Returns
-------
V : ndarray
The diagonalizer, shape (n_channels, n_filters), usually n_filters == n_channels.
D : ndarray
The set of quasi diagonal matrices, shape (n_trials, n_channels, n_channels).
Notes
-----
This is a direct implementation of the PHAM's AJD algorithm [1]_.
References
----------
.. [1] Pham, <NAME>. "Joint approximate diagonalization of positive definite Hermitian matrices." SIAM Journal on Matrix Analysis and Applications 22, no. 4 (2001): 1136-1152.
"""
# Adapted from http://github.com/alexandrebarachant/pyRiemann
n_epochs = X.shape[0]
# Reshape input matrix
A = np.concatenate(X, axis=0).T
# Init variables
n_times, n_m = A.shape
V = np.eye(n_times)
epsilon = n_times * (n_times - 1) * eps
for it in range(n_iter_max):
decr = 0
for ii in range(1, n_times):
for jj in range(ii):
Ii = np.arange(ii, n_m, n_times)
Ij = np.arange(jj, n_m, n_times)
c1 = A[ii, Ii]
c2 = A[jj, Ij]
g12 = np.mean(A[ii, Ij] / c1)
g21 = np.mean(A[ii, Ij] / c2)
omega21 = np.mean(c1 / c2)
omega12 = np.mean(c2 / c1)
omega = np.sqrt(omega12 * omega21)
tmp = np.sqrt(omega21 / omega12)
tmp1 = (tmp * g12 + g21) / (omega + 1)
tmp2 = (tmp * g12 - g21) / max(omega - 1, 1e-9)
h12 = tmp1 + tmp2
h21 = np.conj((tmp1 - tmp2) / tmp)
decr += n_epochs * (g12 * np.conj(h12) + g21 * h21) / 2.0
tmp = 1 + 1.j * 0.5 * np.imag(h12 * h21)
tmp = np.real(tmp + np.sqrt(tmp ** 2 - h12 * h21))
tau = np.array([[1, -h12 / tmp], [-h21 / tmp, 1]])
A[[ii, jj], :] = np.dot(tau, A[[ii, jj], :])
tmp = np.c_[A[:, Ii], A[:, Ij]]
tmp = np.reshape(tmp, (n_times * n_epochs, 2), order='F')
tmp = np.dot(tmp, tau.T)
tmp = np.reshape(tmp, (n_times, n_epochs * 2), order='F')
A[:, Ii] = tmp[:, :n_epochs]
A[:, Ij] = tmp[:, n_epochs:]
V[[ii, jj], :] = np.dot(tau, V[[ii, jj], :])
if decr < epsilon:
break
D = np.reshape(A, (n_times, -1, n_times)).transpose(1, 0, 2)
return V.T, D
def _uwedge(X, init=None, eps=1e-9, n_iter_max=1000):
"""Approximate joint diagonalization algorithm UWEDGE.
Parameters
----------
X : ndarray
A set of covariance matrices to diagonalize, shape (n_trials, n_channels, n_channels).
init : None | ndarray, optional
Initialization for the diagonalizer, shape (n_channels, n_channels).
eps : float, optional
Tolerance for stoping criterion (default 1e-7).
n_iter_max : int
The maximum number of iteration to reach convergence (default 1000).
Returns
-------
W_est : ndarray
The diagonalizer, shape (n_filters, n_channels), usually n_filters == n_channels.
D : ndarray
The set of quasi diagonal matrices, shape (n_trials, n_channels, n_channels).
Notes
-----
Uniformly Weighted Exhaustive Diagonalization using Gauss iteration
(U-WEDGE). Implementation of the AJD algorithm by Tichavsky and Yeredor [1]_ [2]_.
This is a translation from the matlab code provided by the authors.
References
----------
.. [1] <NAME>, <NAME> and <NAME>, | |
# (C) Copyright 2017 Inova Development Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
smicli commands based on python click for executing sweeps of selected
targets to find WBEM servers.
"""
from __future__ import print_function, absolute_import
import datetime
import click
import six
from click_datetime import Datetime
from smipyping import SimplePingList, PingsTable, ProgramsTable, UsersTable, \
datetime_display_str, compute_startend_dates
from smipyping._common import get_list_index, fold_cell
from smipyping._logging import AUDIT_LOGGER_NAME, get_logger
from .smicli import cli, CMD_OPTS_TXT
from ._click_common import print_table, validate_prompt, get_target_id, \
get_multiple_target_ids
from ._common_options import add_options
# default sort order for weekly table is the company row
DEFAULT_WEEKLY_TBL_SORT = 'Company'
targetIds_option = [ # pylint: disable=invalid-name
click.option('-t', '--targetIds', type=str, multiple=True,
required=False,
help='Get results only for the defined targetIDs. If the '
'value is "?" a select list is provided to the console '
'to select the WBEM server targetids from the targets '
'table.')]
startdate_option = [ # pylint: disable=invalid-name
click.option('-s', '--startdate', type=Datetime(format='%d/%m/%y'),
default=None,
required=False,
help='Start date for ping records included. Format is '
'dd/mm/yy where dd and mm are zero padded (ex. 01) '
'and year is without century (ex. 17).'
'\nDefault:oldest record')]
enddate_option = [ # pylint: disable=invalid-name
click.option('-e', '--enddate', type=Datetime(format='%d/%m/%y'),
default=None,
required=False,
help='End date for ping records included. Format is dd/mm/yy'
' where dd and dm are zero padded (ex. 01) and year is'
' without century (ex. 17).\nDefault:current datetime')]
# TODO make this test for positive int
numberofdays_option = [ # pylint: disable=invalid-name
click.option('-n', '--numberofdays', type=int,
required=False,
help='Alternative to enddate. Number of days to report from'
' startdate. "enddate" ignored if "numberofdays" set')]
@cli.group('history', options_metavar=CMD_OPTS_TXT)
def history_group():
"""
Command group manages history(pings) table.
The history command group processes the database pings table.
The pings table maintains entries with the results of the ``cimping all
-s`` subcommand. Each history entry contains the target id, the timestamp
for the test, and the results of the test.
It includes commands to clean the pings table and also to create various
reports and tables of the history of tests on the WBEM servers in the
targets table that are stored in the Pings table.
Because the pings table can be very large, there are subcommands to clean
entries out of the table based on program id, dates, etc.
Rather than a simple list subcommand this subcommand includes a number of
reports to view the table for:
- changes to status for particular targets.
- Consolidated history over time periods
- Snapshots of the full set of entries over periods of time.
"""
pass
# @history_group.command('create', options_metavar=CMD_OPTS_TXT)
# @click.option('-i', '--ids', default=None, type=int,
# required=False,
# help="Optional list of ids. If not supplied, all id's are used")
# @click.option('-d', '--datetime', type=Datetime(format='%-M:%-H:%d/%m/%y'),
# default=datetime.datetime.now(),
# required=False,
# help='Timestamp for the ping history. format for input is'
# 'min:hour:day/month/year. The minute and hour are optional.'
# ' Default current datetime')
# @click.pass_obj
# def history_create(context, **options): # pylint: disable=redefined-builtin
# """
# TODO: Delete this or move somewhere in a test catagory.#
#
# """
# context.execute_cmd(lambda: cmd_history_create(context, options))
#######################################################################
#
# Subcommand history list
#
#######################################################################
@history_group.command('list', options_metavar=CMD_OPTS_TXT)
@add_options(targetIds_option)
@add_options(startdate_option)
@add_options(enddate_option)
@add_options(numberofdays_option)
@click.option('-r', 'result', type=click.Choice(['full', 'changes', 'status',
'%ok', 'count']),
default='status',
help='Display history records or status info on records. '
'"full" displays all records, "changes" displays records '
'that change status, "status"(default) displays '
'status summary by target. "%ok" reports '
'percentage pings OK by Id and total count.')
# TODO determine if there is any reason for this
# @click.option('-S', '--summary', is_flag=True, required=False, default=False,
# help='If set only a summary is generated.')
@click.pass_obj
def history_list(context, **options): # pylint: disable=redefined-builtin
"""
Display history of pings in database.
It outputs a table data from the database pings table which may be
filtered by targets and dates.
The listing may be filtered a date range with the --startdate, --enddate,
and --numberofdays options.
It may also be filtered to only show a selected target WBEM server from
the targets table with the `--targetid` option
The output of this subcommand is determined by the `--result` option which
provides for:
* `full` - all records defined by the input parameters.
* `status` - listing records by status (i.e. OK, etc.) and
count of records for that status.
* `%ok` - listing the percentage of records that have 'OK' status and
the total number of ping records.
* `count` - count of records within the defined date/time range.
ex. smicli history list --startdate 09/09/17 --enddate 09/10/17\n
smicli history list --startdate 09/09/17 --numberofdays 9 -t 88 -t 91\n
smicli history list --startdate 09/09/17 --numberofdays 9 - *\n
# list pings for 9 days starting 9 sept 17 for targets\n
# selected by user (-t *)
"""
context.execute_cmd(lambda: cmd_history_list(context, options))
#######################################################################
#
# Subcommand history overview
#
#######################################################################
@history_group.command('overview', options_metavar=CMD_OPTS_TXT)
@click.pass_obj
def history_overview(context, **options): # pylint: disable=redefined-builtin
"""
Display overview of pingstable in database.
This subcommand only shows the count of records and the oldest and
newest record in the pings database, and the number of pings by
program.
"""
context.execute_cmd(lambda: cmd_history_overview(context, options))
#######################################################################
#
# Subcommand history delete
#
#######################################################################
@history_group.command('delete', options_metavar=CMD_OPTS_TXT)
@click.option('-s', '--startdate', type=Datetime(format='%d/%m/%y'),
required=True,
help='Start date for pings to be deleted. Format is dd/mm/yy')
@click.option('-e', '--enddate', type=Datetime(format='%d/%m/%y'),
required=False,
help='End date for pings to be deleted. Format is dd/mm/yy')
@click.option('-n', '--numberofdays', type=int,
required=False,
help='Alternative to enddate. Number of days to report from'
' startdate. "enddate" ignored if "numberofdays" set')
@click.option('-t', '--TargetID', type=int,
required=False,
help='Optional targetID. If included, delete ping records only '
'for the defined targetID and defined time period. '
'Otherwise all ping records in the defined time period '
'are deleted.')
@click.pass_obj
def history_delete(context, **options): # pylint: disable=redefined-builtin
"""
Delete records from pings table.
Delete records from the history(pings) database based on start date and end
date options and the optional list of targetids provided.
ex. smicli history delete --startdate 09/09/17 --endate 09/10/17
Because this could accidently delete all history records, this command
requires that the user provide both the start date and either
the enddate or number of days. It makes no assumptions about dates.
It also requires verification before deleting any records.
"""
context.execute_cmd(lambda: cmd_history_delete(context, options))
#######################################################################
#
# Subcommand history weekly
#
#######################################################################
@history_group.command('weekly', options_metavar=CMD_OPTS_TXT)
@click.option('-d', '--date', type=Datetime(format='%d/%m/%y'),
default=datetime.datetime.today(),
required=False,
help='Optional date to be used as basis for report in form '
' dd/mm/yy. Default is today. This option '
'allows reports to be generated for previous periods.')
@click.option('-o', '--order', required=False, type=str,
default=DEFAULT_WEEKLY_TBL_SORT,
help='Sort order of the columns for the report output. This '
'can be any of the column headers (case independent). '
'Default: {}'.format(DEFAULT_WEEKLY_TBL_SORT))
@click.option('-d', '--disabled', default=False, is_flag=True, required=False,
help='Show disabled targets. Otherwise only targets that are '
'set Enabled in the database are shown.'
'(Default:Do not show disabled targets).')
@click.pass_obj
def history_weekly(context, **options): # pylint: disable=redefined-builtin
"""
Generate weekly report from ping history.
Generates the report normally emailed for the smi lab status.
This subcommand generates a report on the status of each target id
in the targets table filtered by the --date parameter. It generates
a summary of the status for the current day, for the previous week and
for the total program.
The --date is optional. Normally the report is generated for the week
ending at the time the report is generated but the --date pararameter
allows the report to be generated for previous dates.
This report includes percentage OK for each target for today, this week,
and the program and overall information on the target (company, product,
SMIversion, contacts.)
The error codes are documented in the online documentation.
"""
context.execute_cmd(lambda: cmd_history_weekly(context, options))
#######################################################################
#
# Subcommand history timeline
#
#######################################################################
@history_group.command('timeline', options_metavar=CMD_OPTS_TXT)
@add_options(targetIds_option)
@add_options(startdate_option)
@add_options(enddate_option)
@add_options(numberofdays_option)
# @click.option('-r', '--result', type=click.Choice(['full', 'status', '%ok']),
# # default='status',
# # help='"full" displays all records, "status" displays '
# # 'status summary by id. "%ok" reports percentage '
# # 'pings OK by Id and total count. Default="status". ')
# TODO this is worthless right now
# @click.option('-S', '--summary', is_flag=True, required=False, default=False,
# help='If set only a summary is generated.')
@click.pass_obj
def history_timeline(context, **options):
# pylint: disable=redefined-builtin
"""
Show | |
<filename>hazma/vector_mediator/__init__.py<gh_stars>1-10
from typing import Union
import numpy as np
from hazma.parameters import Qd, Qe, Qu
from hazma.parameters import charged_kaon_mass as _MK
from hazma.parameters import charged_pion_mass as _MPI
from hazma.parameters import eta_mass as _META
from hazma.parameters import neutral_kaon_mass as _MK0
from hazma.parameters import neutral_pion_mass as _MPI0
from hazma.parameters import qe
from hazma.theory import TheoryAnn
from hazma.vector_mediator._vector_mediator_cross_sections import (
VectorMediatorCrossSections,
)
from hazma.vector_mediator._vector_mediator_fsr import VectorMediatorFSR
from hazma.vector_mediator._vector_mediator_positron_spectra import (
VectorMediatorPositronSpectra,
)
from hazma.vector_mediator._vector_mediator_spectra import VectorMediatorSpectra
from hazma.vector_mediator._vector_mediator_widths import VectorMediatorWidths
from hazma.vector_mediator.form_factors.kk import (
compute_kk_form_factor_parameters as __compute_ff_params_kk,
)
from hazma.vector_mediator.form_factors.pipi import (
compute_pipi_form_factor_parameters as __compute_ff_params_pipi,
)
# Note that Theory must be inherited from AFTER all the other mixin classes,
# since they furnish definitions of the abstract methods in Theory.
class VectorMediator(
VectorMediatorCrossSections,
VectorMediatorFSR,
VectorMediatorPositronSpectra,
VectorMediatorSpectra,
VectorMediatorWidths,
TheoryAnn,
):
r"""
Create a VectorMediator object with generic couplings.
Parameters
----------
mx : float
Mass of the dark matter.
mv : float
Mass of the vector mediator.
gvxx : float
Coupling of vector mediator to dark matter.
gvuu : float
Coupling of vector mediator to the up quark.
gvdd : float
Coupling of vector mediator to the down quark.
gvss : float
Coupling of vector mediator to the strange quark.
gvee : float
Coupling of vector mediator to the electron.
gvmumu : float
Coupling of vector mediator to the muon.
"""
def __init__(self, mx, mv, gvxx, gvuu, gvdd, gvss, gvee, gvmumu):
self._mx = mx
self._mv = mv
self._gvxx = gvxx
self._gvuu = gvuu
self._gvdd = gvdd
self._gvss = gvss
self._gvee = gvee
self._gvmumu = gvmumu
self.compute_width_v()
def __repr__(self):
return (
f"VectorMediator(\n"
f"\tmx={self.mx} MeV,\n"
f"\tmv={self.mv} MeV,\n"
f"\tgvxx={self.gvxx},\n"
f"\tgvuu={self.gvuu},\n"
f"\tgvdd={self.gvdd},\n"
f"\tgvss={self.gvss},\n"
f"\tgvee={self.gvee},\n"
f"\tgvmumu={self.gvmumu}\n"
")"
)
@property
def mx(self):
return self._mx
@mx.setter
def mx(self, mx):
self._mx = mx
self.compute_width_v()
@property
def mv(self):
return self._mv
@mv.setter
def mv(self, mv):
self._mv = mv
self.compute_width_v()
@property
def gvxx(self):
return self._gvxx
@gvxx.setter
def gvxx(self, gvxx):
self._gvxx = gvxx
self.compute_width_v()
@property
def gvuu(self):
return self._gvuu
@gvuu.setter
def gvuu(self, gvuu):
self._gvuu = gvuu
self.compute_width_v()
@property
def gvdd(self):
return self._gvdd
@gvdd.setter
def gvdd(self, gvdd):
self._gvdd = gvdd
self.compute_width_v()
@property
def gvss(self):
return self._gvss
@gvss.setter
def gvss(self, gvss):
self._gvss = gvss
self.compute_width_v()
@property
def gvee(self):
return self._gvee
@gvee.setter
def gvee(self, gvee):
self._gvee = gvee
self.compute_width_v()
@property
def gvmumu(self):
return self._gvmumu
@gvmumu.setter
def gvmumu(self, gvmumu):
self._gvmumu = gvmumu
self.compute_width_v()
def compute_width_v(self):
"""Recomputes the scalar's total width."""
self.width_v = self.partial_widths()["total"]
@staticmethod
def list_annihilation_final_states():
"""
Return a list of the available final states.
Returns
-------
fs : array-like
Array of the available final states.
"""
return ["mu mu", "e e", "pi pi", "pi0 g", "pi0 v", "v v"]
def constraints(self):
pass
def constrain(self, p1, p1_vals, p2, p2_vals, ls_or_img="image"):
pass
class KineticMixing(VectorMediator):
r"""
Create a ``VectorMediator`` object with kinetic mixing couplings.
The couplings are defined as::
gvuu = Qu qe eps
gvdd = Qd qe eps
gvss = Qd qe eps
gvee = Qe qe eps
gvmumu = Qe qe eps
where Qu, Qd and Qe are the up-type quark, down-type quark and
lepton electic charges in units of the electric charge, qe is the
electric charge and eps is the kinetic mixing parameter.
Parameters
----------
mx : float
Mass of the dark matter.
mv : float
Mass of the vector mediator.
gvxx : float
Coupling of vector mediator to dark matter.
eps : float
Kinetic mixing parameter.
"""
def __init__(self, mx, mv, gvxx, eps):
self._eps = eps
super(KineticMixing, self).__init__(
mx,
mv,
gvxx,
-Qu * eps * qe,
-Qd * eps * qe,
-Qd * eps * qe,
-Qe * eps * qe,
-Qe * eps * qe,
)
def __repr__(self):
repr_ = "KineticMixing("
repr_ += f"mx={self.mx} [MeV], "
repr_ += f"mv={self.mv} [MeV], "
repr_ += f"gvxx={self.gvxx}, "
repr_ += f"eps={self.eps}"
repr_ += ")"
return repr_
@property
def eps(self):
return self._eps
@eps.setter
def eps(self, eps):
self._eps = eps
self._gvuu = -Qu * eps * qe
self._gvdd = -Qd * eps * qe
self._gvss = -Qd * eps * qe
self._gvee = -Qe * eps * qe
self._gvmumu = -Qe * eps * qe
self.compute_width_v()
# Hide underlying properties' setters
@VectorMediator.gvuu.setter
def gvuu(self, _):
raise AttributeError("Cannot set gvuu")
@VectorMediator.gvdd.setter
def gvdd(self, _):
raise AttributeError("Cannot set gvdd")
@VectorMediator.gvss.setter
def gvss(self, _):
raise AttributeError("Cannot set gvss")
@VectorMediator.gvee.setter
def gvee(self, _):
raise AttributeError("Cannot set gvee")
@VectorMediator.gvmumu.setter
def gvmumu(self, _):
raise AttributeError("Cannot set gvmumu")
class QuarksOnly(VectorMediator):
r"""
Create a VectorMediator object with only quark couplings.
Parameters
----------
mx : float
Mass of the dark matter.
mv : float
Mass of the vector mediator.
gvxx : float
Coupling of vector mediator to dark matter.
gvuu : float
Coupling of vector mediator to the up quark.
gvdd : float
Coupling of vector mediator to the down quark.
gvss : float
Coupling of vector mediator to the strange quark.
"""
def __init__(self, mx, mv, gvxx, gvuu, gvdd, gvss):
super(QuarksOnly, self).__init__(mx, mv, gvxx, gvuu, gvdd, gvss, 0.0, 0.0)
def __repr__(self):
repr_ = "QuarksOnly("
repr_ += f"mx={self.mx} [MeV], "
repr_ += f"mv={self.mv} [MeV], "
repr_ += f"gvxx={self.gvxx}, "
repr_ += f"gvuu={self.gvuu}"
repr_ += f"gvdd={self.gvdd}"
repr_ += f"gvss={self.gvss}"
repr_ += ")"
return repr_
@staticmethod
def list_annihilation_final_states():
return ["pi pi", "pi0 g", "pi0 v", "v v"]
# Hide underlying properties' setters
@VectorMediator.gvee.setter
def gvee(self, _):
raise AttributeError("Cannot set gvee")
@VectorMediator.gvmumu.setter
def gvmumu(self, _):
raise AttributeError("Cannot set gvmumu")
class VectorMediatorGeV(VectorMediator):
"""
A generic dark matter model where interactions with the SM are mediated via
an s-channel vector mediator. This model is valid for dark-matter masses
up to 1 GeV.
"""
def __init__(self, mx, mv, gvxx, gvuu, gvdd, gvss, gvee, gvmumu):
"""
Create a `VectorMediatorGeV` object.
Parameters
----------
mx : float
Mass of the dark matter.
mv : float
Mass of the vector mediator.
gvxx : float
Coupling of vector mediator to dark matter.
gvuu : float
Coupling of vector mediator to the up quark.
gvdd : float
Coupling of vector mediator to the down quark.
gvss : float
Coupling of vector mediator to the strange quark.
gvee : float
Coupling of vector mediator to the electron.
gvmumu : float
Coupling of vector mediator to the muon.
"""
# Compute and store the parameters needed to compute form factors.
self._ff_pipi_params = __compute_ff_params_pipi(2000)
self._ff_kk_params = __compute_ff_params_kk(200)
super().__init__(mx, mv, gvxx, gvuu, gvdd, gvss, gvee, gvmumu)
# Import the form factors
from hazma.vector_mediator.form_factors import (
_form_factor_eta_gamma,
_form_factor_kk,
_form_factor_pi_gamma,
_form_factor_pipi,
)
@property
def gvuu(self) -> float:
"""
Coupling of vector mediator to the up quark.
"""
return self._gvuu
@gvuu.setter
def gvuu(self, val: float) -> None:
self._gvuu = val
self._reset_state()
@property
def gvdd(self) -> float:
"""
Coupling of vector mediator to the down quark.
"""
return self._gvdd
@gvdd.setter
def gvdd(self, val: float) -> None:
self._gvdd = val
self._reset_state()
@property
def gvss(self) -> float:
"""
Coupling of vector mediator to the down quark.
"""
return self._gvss
@gvss.setter
def gvss(self, val: float) -> None:
self._gvss = val
self._reset_state()
def _reset_state(self) -> None:
"""
Function to reset the state of the derived quantities such as the
vector width and form-factors.
"""
pass
def _width_v_to_mm(self, mass: float, form_factor: complex, symmetry: float = 1.0):
"""
Compute the partial width for the decay of the vector mediator into two
mesons.
Parameters
----------
mass: float
Mass of the final state meson.
form_factor: complex
Vector form factor for the V-meson-meson vertex.
symmetry: float
Symmetry factor. If the final state mesons are identical, then this
should be 1/2. Default is 1.0
Returns
-------
gamma: float
Partial width for the vector to decay into two mesons.
"""
if self._mv < 2 * mass:
return 0.0
return (
symmetry
/ 48.0
/ np.pi
* self._mv
* (1 - 4 * mass ** 2 / self._mv ** 2) ** 1.5
* abs(form_factor) ** 2
)
def width_v_to_pipi(self):
"""
Compute the partial width for the decay of the vector mediator into two
charged pions.
"""
mass = _MPI
form_factor = self._form_factor_pipi(self._mv ** 2)
return self._width_v_to_mm(mass, form_factor)
def width_v_to_k0k0(self):
"""
Compute the partial width for the decay of the vector mediator into two
neutral kaons.
"""
mass = _MK0
form_factor = self._form_factor_kk(self._mv ** 2, imode=0)
return self._width_v_to_mm(mass, form_factor)
def width_v_to_kk(self):
| |
["po/lin", -1], ["u(po/", -1]]
}
},
{"Tester": Rule_SW_20, "ruleName": "SW.20: Forms of eu)qu/s", "Short_Name": "SW.20",
"Test_Forms": {
DIALECT.IONIC: [["i)qu/s", -1], ["i)qei=a", -1]],
DIALECT.AEOLIC: [["eu)qu/s", -1], ["eu)qei=a", -1]],
DIALECT.HOMERIC: [],
DIALECT.ANY: [["xe/ras", -1], ["paideu/w", -1], ["paideu/ete", -1], ["po/lin", -1], ["u(po/", -1]]
}
},
{"Tester": Rule_SW_21, "ruleName": "SW.21: Forms of mi/a", "Short_Name": "SW.21",
"Test_Forms": {
DIALECT.IONIC: [["mi/a", -1], ["mia=s", -1]],
DIALECT.AEOLIC: [["i)/a", -1], ["i)a/s", -1]],
DIALECT.HOMERIC: [["i)/a", -1], ["i)a/s", -1]],
DIALECT.ANY: [["xe/ras", -1], ["paideu/w", -1], ["paideu/ete", -1], ["po/lin", -1], ["u(po/", -1], ["ei(=s", -1]]
}
},
{"Tester": Rule_SW_22, "ruleName": "SW.22: Homeric forms of gonu, doru, zeus, naus", "Short_Name": "SW.22",
"Test_Forms": {
DIALECT.IONIC: [],
DIALECT.AEOLIC: [],
DIALECT.HOMERIC: [["gouno/s", -1], ["gou/natos", -1], ["douro/s", -1], ["dou/ratos", -1], ["douri/", -1], ["dou/rati", -1], ["dou=re", -1], ["dou=ra", -1], ["dou/rata", -1], ["dou/rwn", -1], ["dou/ressi", -1], ["dou/rasi", -1], ["zhno/s", -1], ["zhni/", -1], ["zh=na", -1], ["nhu=s", -1], ["nho/s", -1], ["nhi/", -1], ["nh=a", -1], ["nh=es", -1], ["nhw=n", -1], ["nh/essi", -1], ["nhusi/", -1], ["nh=as", -1]],
DIALECT.ANY: [["xe/ras", -1], ["paideu/w", -1], ["paideu/ete", -1], ["po/lin", -1], ["u(po/", -1], ["go/nu", -1], ["do/ru", -1], ["zeu/s", -1], ["dio/s", -1], ["dii/", -1], ["di/a", -1], ["zeu=", -1], ["neo/s", -1], ["ne/es", -1], ["new=n", -1], ["ne/as", -1]]
}
},
{"Tester": Rule_SW_23, "ruleName": "SW.23: Homeric forms of polus", "Short_Name": "SW.23",
"Test_Forms": {
DIALECT.IONIC: [],
DIALECT.AEOLIC: [],
DIALECT.HOMERIC: [["pollo/s", -1], ["pole/os", -1], ["pole/wn", -1], ["pole/essin", -1]],
DIALECT.ANY: [["xe/ras", -1], ["paideu/w", -1], ["paideu/ete", -1], ["po/lin", -1], ["u(po/", -1], ["pollh=s", -1], ["polloi/", -1], ["pollw=|", -1], ["polu/s", -1]]
}
},
{"Tester": Rule_SW_24, "ruleName": "SW.24: Homeric ptolis", "Short_Name": "SW.24",
"Test_Forms": {
DIALECT.IONIC: [],
DIALECT.AEOLIC: [],
DIALECT.HOMERIC: [["pto/lis", -1], ["pto/lios", -1]],
DIALECT.ANY: [["xe/ras", -1], ["paideu/w", -1], ["paideu/ete", -1], ["po/lin", -1], ["u(po/", -1]]
}
},
{"Tester": Rule_NE_1a, "ruleName": "NE.1a: Endings of singular feminine long alpha-stems", "Short_Name": "NE.1a",
"Test_Forms": {
DIALECT.IONIC: [["gnw/mh", -1], ["gnw/mhs", -1], ["gnw/mh|", -1], ["gnw/mhn", -1], ["paideuome/nh", -1], ["paideuome/nhs", -1], ["pepaideume/nh", -1], ["pepaideume/nhs", -1], ["h(=s", -1], ["h(/n", -1], ["a)gorh/n", -1], ["a)pria/thn", -1], ["au)dh/", -1], ["au)th/", -1], ["bi/hn", -1], ["boulh/", -1], ["canqh=s", -1], ["daimoni/h", -1], ["deciterh=|", -1], ["deinh/", -1], ["duwdeka/th", -1], ["e)i+/shs", -1], ["fa/nh", -1], ["fare/trhn", -1], ["fi/lh", -1], ["fqi/hn", -1], ["h)eri/h", -1], ["h)gaqe/h|", -1], ["i)/dh|", -1], ["kalh=|", -1], ["klaggh/", -1], ["klisi/hn", -1], ["kou/rhn", -1], ["kouridi/hs", -1], ["kradi/hn", -1], ["o)mi/xlh", -1], ["oi)/h", -1], ["au)dh/", 2], ["au)dh/", 3], ["i)/dh|", 5], ["oi)/h", 2]],
DIALECT.AEOLIC: [["gnw/ma", -1], ["gnw/mas", -1], ["gnw/ma|", -1], ["gnw/man", -1], ["pepaideume/na", -1], ["pepaideume/nas", -1], ["a(=s", -1], ["a(/n", -1], ["a)ci/a", -1], ["do/menai", 2], ["polla/", 0]],
DIALECT.HOMERIC: [],
DIALECT.ANY: [["xe/ras", -1], ["paideu/w", -1], ["paideu/ete", -1], ["po/lin", -1], ["u(po/", -1], ["h)rige/neia", 0]]
}
},
{"Tester": Rule_NE_1b, "ruleName": "NE.1b: Endings of singular feminine short alpha-stems", "Short_Name": "NE.1b",
"Test_Forms": {
DIALECT.IONIC: [["qala/tths", -1], ["qala/tth|", -1], ["paideuou/shs", -1], ["a)naidei/hn", -1], ["kni/shs", -1], ["kni/sh", -1], ["kni/sh|", -1], ["barei/hs", -1]],
DIALECT.AEOLIC: [["qala/ssas", -1], ["qala/ssa|", -1], ["gefu/ras", -1], ["telhe/ssas", 1]],
DIALECT.HOMERIC: [],
DIALECT.ANY: [["xe/ras", -1], ["paideu/w", -1], ["paideu/ete", -1], ["po/lin", -1], ["u(po/", -1], ["qa/latta", -1], ["qa/lattan", -1], ["paideuqei=sa", -1]]
}
},
{"Tester": Rule_NE_2, "ruleName": "NE.2: Singulars of masculine alpha stems", "Short_Name": "NE.2",
"Test_Forms": {
DIALECT.IONIC: [["poli/ths", -1], ["poli/tew", -1], ["poli/tw", -1], ["poli/thi", -1], ["poli/thn", -1], ["neani/hs", -1], ["ai)xmhth/n", -1], ["ba/thn", -1], ["i)/thn", -1], ["kradi/hn", 1]],
DIALECT.AEOLIC: [["poli/tas", -1], ["poli/ta=", -1], ["poli=tai", -1], ["polita=n", -1], ["neani/as", -1]],
DIALECT.HOMERIC: [["poli/tao", -1], ["neani/a=o", -1]],
DIALECT.ANY: [["xe/ras", -1], ["paideu/w", -1], ["paideu/ete", -1], ["po/lin", -1], ["u(po/", -1]]
}
},
{"Tester": Rule_NE_3, "ruleName": "NE.3: Plurals of alpha stems", "Short_Name": "NE.3",
"Test_Forms": {
DIALECT.IONIC: [["politw=n", -1], ["a)gorw=n", -1], ["mhxanw=n", -1], ["r(htorikw=n", -1], ["qalassw=n", -1], ["moirw=n", -1], ["paideuome/nwn", -1], ["pepaideume/nwn", -1], ["w(=n", -1]],
DIALECT.AEOLIC: [["polita=n", -1], ["a)gora=n", -1], ["maxana/n", -1], ["qa/lassan", -1], ["moira=n", -1], ["lipou=san", -1], ["a(=n", -1], ["ai)xmhta/wn", 0], ["baqei=an", 1]],
DIALECT.HOMERIC: [["polita=n", -1], ["a)gora=n", -1], ["maxana/n", -1], ["qa/lassan", -1], ["moira=n", -1], ["lipou=san", -1], ["a(=n", -1], ["ai)xmhta/wn", 0], ["baqei=an", 1]],
DIALECT.ANY: [["xe/ras", -1], ["paideu/w", -1], ["paideu/ete", -1], ["po/lin", -1], ["u(po/", -1]]
}
},
{"Tester": Rule_NE_4, "ruleName": "NE.4: Forms of digammma stems", "Short_Name": "NE.4",
"Test_Forms": {
DIALECT.IONIC: [["basile/os", -1], ["basilei=", -1], ["basile/a", -1], ["basilei=s", -1], ["basile/wn", -1], ["basile/as", -1]],
DIALECT.AEOLIC: [["basilh=os", -1], ["basilh=i", -1], ["basilh=a", -1], ["basilei=s", -1], ["basile/wn", -1], ["basile/as", -1], ["ou)rei=s", -1]],
DIALECT.HOMERIC: [["basilh=os", -1], ["basilh=i", -1], ["basilh=a", -1], ["basilh=es", -1], ["basilh/wn", -1], ["basilh=as", -1], ["ou)rh=es", -1], ["a)xillh=os", -1]],
DIALECT.ANY: [["xe/ras", -1], ["paideu/w", -1], ["paideu/ete", -1], ["po/lin", -1], ["u(po/", -1], ["basileu=si", -1]]
}
},
{"Tester": Rule_NE_5, "ruleName": "NE.5: forms of iota stems", "Short_Name": "NE.5",
"Test_Forms": {
DIALECT.IONIC: [["po/lews", -1], ["po/lei", -1], ["po/leis", -1], ["polei=s", -1], ["po/lewn", -1], ["po/lesi", -1]],
DIALECT.AEOLIC: [["po/lios", -1], ["po/li", -1], ["po/lies", -1], ["poli/wn", -1], ["po/lisi", -1], ["poli/esi", -1], ["po/lis", -1]],
DIALECT.HOMERIC: [["po/lios", -1], ["po/lhos", -1], ["po/lies", -1], ["po/lhes", -1], ["poli/wn", -1], ["poli/essi", -1], ["poli/essin", -1], ["po/lhas", -1]],
DIALECT.ANY: [["xe/ras", -1], ["paideu/w", -1], ["paideu/ete", -1], ["po/lin", -1], ["u(po/", -1]]
}
},
{"Tester": Rule_NE_6, "ruleName": "NE.6: Dative plural in -essi", "Short_Name": "NE.6",
"Test_Forms": {
DIALECT.IONIC: [],
DIALECT.AEOLIC: [["po/dessi", -1]],
DIALECT.HOMERIC: [["po/dessi", -1]],
DIALECT.ANY: [["xe/ras", -1], ["paideu/w", -1], ["paideu/ete", -1], ["po/lin", -1], ["u(po/", -1]]
}
},
{"Tester": Rule_NE_7, "ruleName": "NE.7: Homeric second declension endings", "Short_Name": "NE.7",
"Test_Forms": {
DIALECT.IONIC: [["lo/gou", -1], ["o(dou=", -1], ["lo/gois", -1], ["o(doi=s", -1]],
DIALECT.AEOLIC: [["lo/gou", -1], ["o(dou=", -1], ["lo/gois", -1], ["o(doi=s", -1]],
DIALECT.HOMERIC: [["lo/goio", -1], ["o(doi=o", -1], ["lo/goisi", -1], ["a(li/oio", 4], ["derkome/noio", -1], ["a(li/oio", 8], ["toi=o", -1], ["a)ndrofo/noio", 0]],
DIALECT.ANY: [["xe/ras", -1], ["paideu/w", -1], ["paideu/ete", -1], ["po/lin", -1], ["u(po/", -1]]
}
},
{"Tester": Rule_VE_1, "ruleName": "VE.1: Ionic mi-verbs inflected like contracts", "Short_Name": "VE.1",
"Test_Forms": {
DIALECT.IONIC: [["i(ei=si", -1], ["didoi=s", -1], ["didoi=sqa", -1], ["didoi=", -1], ["didou=si", -1], ["didou=sin", -1], ["tiqei=", -1], ["tiqei=si", -1]],
DIALECT.AEOLIC: [["i(a=si", -1], ["di/dws", -1], ["di/dwsi", -1], ["di/dwsin", -1], ["dido/asi", -1], ["dido/asin", -1], ["ti/qhsi", -1], ["tiqe/asi", -1]],
DIALECT.HOMERIC: [["i(ei=si", -1], ["didoi=s", -1], ["didoi=sqa", -1], ["didoi=", -1], ["didou=si", -1], ["didou=sin", -1], ["tiqei=", -1], ["tiqei=si", -1]],
DIALECT.ANY: [["xe/ras", -1], ["paideu/w", -1], ["paideu/ete", -1], ["po/lin", -1], ["u(po/", -1], ["di/dwmi", -1]]
}
},
{"Tester": Rule_VE_2, "ruleName": "VE.2: Third person middle forms", "Short_Name": "VE.2",
"Test_Forms": {
DIALECT.IONIC: [["tiqe/atai", -1], ["beblh/atai", -1], ["puqoi/ato", 0], ["puqoi/ato", 1]], # , ["dune/atai", -1] (better mi verb grabbing?)
DIALECT.AEOLIC: [["ti/qentai", -1], ["be/blhntai", -1], ["paideu/ointo", -1]],
DIALECT.HOMERIC: [],
DIALECT.ANY: [["xe/ras", -1], ["paideu/w", -1], ["paideu/ete", -1], ["po/lin", -1], ["u(po/", -1], ["gegra/fatai", -1]]
}
},
{"Tester": Rule_VE_3, "ruleName": "VE.3: Alpha contract endings", "Short_Name": "VE.3",
"Test_Forms": {
DIALECT.IONIC: [["tima=|s", -1], ["tima=|", -1], ["tima=te", -1], ["tima=|", -1], ["tima=tai", -1], ["tima=sqe", -1], ["e)ti/mas", -1], ["e)ti/ma", -1], ["e)tima=te", -1], ["e)tima=to", -1], ["tima=n", -1], ["tima=sqai", -1], ["te/xna|", -1], ["e)texna=to", -1]],
DIALECT.AEOLIC: [["timh=|s", -1], ["timh=|", -1], ["timh=|", -1], ["timhtai/", -1], ["timh=n", -1], ["te/xnh|", -1]],
DIALECT.HOMERIC: [],
DIALECT.ANY: [["xe/ras", -1], ["paideu/w", -1], ["paideu/ete", -1], ["po/lin", -1], ["u(po/", -1], ["po/lis", -1], ["timw=men", -1]]
}
},
{"Tester": Rule_VE_4, "ruleName": "VE.4: Athematic 3rd plural secondary ending", "Short_Name": "VE.4",
"Test_Forms": {
DIALECT.IONIC: [["e)/dosan", -1], ["i(/stasan", -1]],
DIALECT.AEOLIC: [["e)/don", -1], ["i(/stan", -1]],
DIALECT.HOMERIC: [["e)/don", -1], ["i(/stan", -1]],
DIALECT.ANY: [["xe/ras", -1], ["paideu/w", -1], ["paideu/ete", -1], ["po/lin", -1], ["u(po/", -1], ["po/lis", -1], ["di/dwmi", -1], ["'n", -1], ["en", -1], ["e)/peisin", -1], ["e)kpeta/sousin", -1]]
}
},
{"Tester": Rule_VE_5, "ruleName": "VE.5: Active infinitive endings (-men vs -nai vs -menai)", "Short_Name": "VE.5",
"Test_Forms": {
DIALECT.IONIC: [["dido/nai", -1], ["tiqe/nai", -1]],
DIALECT.AEOLIC: [["dido/men", -1], ["ti/qemen", -1], ["dido/menai", -1]],
DIALECT.HOMERIC: [["dido/menai", -1]],
DIALECT.ANY: [["xe/ras", -1], ["paideu/w", -1], ["paideu/ete", -1], ["po/lin", -1], ["u(po/", -1], ["po/lis", -1], ["di/dwmi", -1]]
}
},
{"Tester": Rule_VE_6, "ruleName": "VE.6: Homeric verb endings", "Short_Name": "VE.6",
"Test_Forms": {
DIALECT.IONIC: [],
DIALECT.AEOLIC: [],
DIALECT.HOMERIC: [["e)qe/lwmi", -1], ["a)ga/gwmi", -1], ["e)qe/lh|sqa", -1], ["dw=|si", -1], ["fa/anqen", -1], ["tra/fen", -1], ["h)/|deen", -1], ["h)/|dei", -1], ["h)/|dea", -1], ["metafraso/mesqa", -1], ["i(laso/mesqa", -1]],
DIALECT.ANY: [["xe/ras", -1], ["paideu/w", -1], ["paideu/ete", -1], ["po/lin", -1], ["u(po/", -1], ["oi)=sqa", -1], ["e)/fhsqa", -1], ["di/dwsi", -1]]
}
},
{"Tester": Rule_NM_1, "ruleName": "NM.1: Nu Movable (simple)", "Short_Name": "NM.1",
"Test_Forms": {
DIALECT.IONIC: [["paideu/ousin", -1], ["po/lisin", -1], ["e)pai/deusen", -1], ["w)/moisin", -1], ["pe/mpousin", -1], ["proqe/ousin", 0], ["proqe/ousin", 2], ["proqe/ousin", 3], ["proqe/ousin", 5], ["w)/moisin", 0], ["w)/moisin", 1], ["w)/moisin", 3]],
DIALECT.AEOLIC: [],
DIALECT.HOMERIC: [["paideu/ousin", -1], ["po/lisin", -1], ["e)pai/deusen", -1]],
DIALECT.ANY: [["xe/ras", -1], ["paideu/w", -1], ["paideu/ete", -1], ["po/lin", -1], ["u(po/", -1], ["po/lis", -1]]
}
}
]
if (False):
rulesList = [
{"Tester": Rule_NE_7, "ruleName": "NE.1b: Endings of singular feminine long alpha-stems", "Short_Name": "NE.1a",
"Test_Forms": {
DIALECT.IONIC: [],
DIALECT.AEOLIC: [],
DIALECT.HOMERIC: [["pontopo/roio", 0]],
DIALECT.ANY: []
}
}]
# barei/hs for NE_1b ["barei/hs", -1]
#example rule
if False:
#,
{"Tester": Rule_NE_1b, "ruleName": "", "Short_Name": "",
"Test_Forms": {
DIALECT.IONIC: [],
DIALECT.AEOLIC: [],
DIALECT.HOMERIC: [],
DIALECT.ANY: []
}
}
# information in | |
<reponame>AdamSchnapp/pyrocoto
#!/usr/bin/env python
from xml.etree.ElementTree import Element, tostring
from xml.dom import minidom
from .helpers import Validator, Borg
from itertools import product
import logging
logger = logging.getLogger(__name__)
class String(Validator):
def __init__(self, contains=None, one_of=None):
self.isin = contains
self.one_of = one_of
def validate(self, value):
if isinstance(value, Offset):
return # offset objects are strings with additional offset
name = super().get_name()
if not isinstance(value, str):
raise TypeError(f'Expected "{name}" value {value!r} to be a string')
if self.isin is not None:
if self.isin not in value:
raise ValueError(f'Expected {self.isin} in "{name}" value {repr(value)}')
if self.one_of is not None:
if value not in self.one_of:
raise ValueError(f'Expected "{value}" to be one of {self.one_of}')
class Offset:
''' entry that should recieve time offset '''
offset = String()
value = String(contains='@')
def __init__(self, value, offset):
self.offset = offset
self.value = value
def to_element(self, name, **kwargs):
E = Element(name, kwargs)
Esub = Element('cyclestr', offset=self.offset)
Esub.text = self.value
E.append(Esub)
return E
class Envar(Validator):
def __init__(self, contains=None):
self.isin = contains
def validate(self, value):
''' stores xml from dict input '''
if not isinstance(value, dict):
raise TypeError(f'Expected envar value {value!r} to be a dictionary')
envars = []
for name, v in value.items():
envar = Element('envar')
name_element = Element('name')
name_element.text = name
envar.append(name_element)
if isinstance(v, str):
value_element = Element('value')
value_element.text = v
value_element = _cyclestr(value_element)
else:
value_element = to_element(v, 'value')
envar.append(value_element)
envars.append(envar)
return envars
class Meta(Validator):
def __init__(self, contains=None):
self.isin = contains
def validate(self, value):
if not isinstance(value, dict):
raise TypeError(f'Expected meta value {value!r} to be a dictionary')
for v in value.values():
if not isinstance(v, str):
raise TypeError(f'Expected to find string values in meta dict, \
but found {repr(v)}')
class XmlElement(Validator):
def __init__(self):
pass
def validate(self, value):
if not isinstance(value, Element):
raise(TypeError(f'Expected Element but got {type(value)}'))
class Dependency():
elm = XmlElement()
def __init__(self, elm):
self.elm = elm
@staticmethod
def operator(oper, *args):
''' Return new dependency wrapped in an operator tag; the operator is not validated'''
if len(args) < 2:
raise TypeError(f'Expected atleast two args, but got {len(args)},{args}')
for arg in args:
if not isinstance(arg, Dependency):
raise TypeError(f'Expected Dependency but got {type(arg)},{arg}')
elm = Element(oper)
for arg in args:
elm.append(arg.elm)
return Dependency(elm)
def to_element(self, name='dependency'):
E = Element(name)
E.append(self.elm)
return E
class IsDependency(Validator):
def __init__(self):
pass
def validate(self, value):
if not isinstance(value, Dependency):
name = super().get_name()
raise TypeError(f'Expected "{name}" value {value!r} to be a Dependency')
class Cycledefs(Validator):
def __init__(self, contains=None):
self.isin = contains
def validate(self, value):
''' return list of cycle definition group names (strings) '''
if isinstance(value, str):
return [value]
if isinstance(value, CycleDefinition):
return [value.group]
if isinstance(value, list):
def_list = list()
for v in value:
if isinstance(v, CycleDefinition):
def_list.append(v.group)
elif isinstance(v, str):
def_list.append(v)
else:
msg = f'Expected CycleDefinition or string, but got {type(v)}'
raise TypeError(msg)
return def_list
raise TypeError(f'Expected Cycledefs value {value!r} to '
'be CycleDefinition or list of CycleDefinitions/strings\n')
class CycleDefinition():
# add logic to verify user provides a valid definition
def __init__(self, group, definition, activation_offset=None):
self.group = str(group)
self.definition = str(definition)
self.activation_offset = str(activation_offset)
def __repr__(self):
return "CycleDefinition({!r})".format(self.__dict__)
def __eq__(self, other):
if isinstance(other, CycleDefinition):
return (self.group == other.group and
self.definition == other.definition and
self.activation_offset == other.activation_offset)
else:
return False
def __hash__(self):
return hash(self.group)
def _generate_xml(self):
cycledef_element = Element('cycledef', group=self.group)
cycledef_element.text = self.definition
if self.activation_offset != 'None':
cycledef_element.attrib['activation_offset'] = self.activation_offset
return cycledef_element
def _cyclestr(element):
''' Wrap text elements containing '@' for syclestr information with cyclestr tag.
Elements that do not contain '@' are returned unchanged'''
if not isinstance(element, Element):
raise TypeError('element passed must be of type Element')
if element.text is None:
raise ValueError('passed element does not have text')
if '@' in element.text:
text = element.text
element.text = None
cyclestr_element = Element('cyclestr')
cyclestr_element.text = text
element.append(cyclestr_element)
return element
class Workflow(Borg):
''' Implement an abstarction layer on top of rocoto workflow management engine
The WorkFlow class will serve as a central object that registers all units of work
(tasks) for any number of desired cycle definitions.
Workflow objects share state.
'''
def __init__(self, realtime='T', scheduler='lsf', _shared=True, **kwargs):
if _shared:
Borg.__init__(self)
if not hasattr(self, 'tasks'):
self.tasks = []
self.task_names = set() # set of unique task names, metatasks are expended.
self.metatask_names = set()
self.cycle_definitions = dict()
self.workflow_element = Element('workflow', realtime=realtime,
scheduler=scheduler, **kwargs)
self.log_element = None
def define_cycle(self, group, definition, activation_offset=None):
cycledef = CycleDefinition(group, definition, activation_offset)
if group in self.cycle_definitions:
if cycledef == self.cycle_definitions[group]:
return cycledef
else:
raise ValueError('cannot add different cycle definition with same group name')
else:
self.cycle_definitions[cycledef.group] = cycledef
return cycledef
def set_log(self, logfile):
log = Element('log')
log.text = logfile
self.log_element = _cyclestr(log)
def _validate_task_dependencies(self, task):
if hasattr(task, 'dependency'):
for elm in task.dependency.to_element().iter():
if elm.tag == 'taskdep':
if elm.attrib['task'] not in self.task_names:
n = elm.attrib['task']
raise ValueError(f'Task depenency {repr(n)} is not in workflow')
if elm.tag == 'metataskdep':
if elm.attrib['metatask'] not in self.metatask_names:
n = elm.attrib['metatask']
raise ValueError(f'Metatask dependency {repr(n)} is not in workflowa')
def _validate_task_cycles(self, task):
for cycledef in task.cycledefs:
if cycledef not in self.cycle_definitions:
raise ValueError(f'cycle definition "{cycledef}" not in workflow')
def add_task(self, task):
task._validate() # will raise error if eggregate of task info appears to have issues
self._validate_task_dependencies(task) # will raise errors if task dependency issues
self._validate_task_cycles(task) # will raise errors if task cycle issues
# for cycledef in task.cycledefs:
# if isinstance(cycledef, str):
# if cycledef in self.cycle_definitions:
# continue
# else:
# raise ValueError(f'cycle with group name {cycledef} does not exist')
# if isinstance(cycledef, CycleDefinition):
# if cycledef.group not in self.cycle_definitions:
# self.cycle_definitions[cycledef.group] = cycledef
# continue
# else:
# raise ValueError(f'{cycledef} is not a cycle definition')
self.tasks.append(task)
if not self.task_names.isdisjoint(task.task_names): # if intersection
raise ValueError(f'Task names must be unique; Error adding task {repr(task.name)}')
else:
self.task_names.update(task.task_names)
if hasattr(task, 'metatask_name'):
self.metatask_names.add(task.metatask_name)
def task(self):
''' decorator used to associate tasks with workflow
Use to wrap functions that will return task object
@flow.task()
def task():
namespace for defining task
return Task(locals())
'''
def decorator(func):
task = func()
self.add_task(task)
logger.info(f'adding task {repr(task.name)}')
return decorator
@staticmethod
def prettify(elem):
rough_string = tostring(elem, 'UTF-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ", encoding=None)
def write_xml(self, xmlfile, cycledefs=None):
''' write xml workflow.
'''
xml = self.workflow_element
xml.append(self.log_element)
for cycledef in self.cycle_definitions.values():
E = cycledef._generate_xml()
xml.append(E)
for task in self.tasks:
E = task._generate_xml()
xml.append(E)
with open(xmlfile, 'w') as f:
f.write('<?xml version="1.0"?>\n<!DOCTYPE workflow []>')
f.write(self.prettify(self.workflow_element)[22:])
class Task:
''' Implement container for information pertaining to a single task '''
# validate and track class meta data
# note: validated data attributes are added to self._validated by the validators
name = String() # tasks added to workflow should have unique name
metatask_name = String()
jobname = String()
command = String()
join = String(contains='/')
stderr = String(contains='/')
account = String()
memory = String() # maybe validate this more
walltime = String(contains=':')
maxtries = String()
queue = String()
partition = String()
native = String()
cores = String()
envar = Envar()
meta = Meta()
cycledefs = Cycledefs()
dependency = IsDependency()
final = String(one_of=['true', 'false'])
defaults = {'maxtries': '2',
'walltime': '20:00',
'final': 'false'}
# Specify required metadata, multiple entries indicates atleast one of is required
# I.E atleast one of 'join' or 'stderr' is required
_required = [['name'],
['command'],
['join', 'stderr'],
['cores', 'nodes'],
['cycledefs'],
['queue'],
['account']]
# All metadata that is _for_xml should be validated and stored as
# a string, Element or an object that has method as_element
_for_xml = ['jobname',
'command',
'join',
'stderr',
'stdout',
'account',
'queue',
'partition',
'walltime',
'cores',
'nodes',
'native',
'memory',
'envar',
'dependency']
def __init__(self, d):
# set some defaults if not already set
for k, v in self.defaults.items():
if not hasattr(self, k):
setattr(self, k, v)
# set user passed data that will overwrite any defaults
self.task_names = set()
for var, value in d.items():
setattr(self, var, value)
def _validate(self):
# ensure that metadata that should be diffrent by job is.
# jobname, join/stderr,
# meta keys should be specified when meta and
# ensure the agregate of data for this task looks ok
# check for common mistakes that are based on a combination of data
# single data validation should occur within a validator
for req_attrs in self._required:
good = False
for attr in req_attrs:
if hasattr(self, attr):
good = True
break
if not good:
raise ValueError(f'Expected one | |
up0 = self.upsample_0(conv4)
up0 = self.decoderblock_0(up0)
deconv0 = torch.cat([up0,conv3],-1)
deconv0 = self.decoderlayer_0(deconv0,mask=mask)
up1 = self.upsample_1(deconv0)
up1 = self.decoderblock_1(up1)
deconv1 = torch.cat([up1,conv2],-1)
deconv1 = self.decoderlayer_1(deconv1,mask=mask)
up2 = self.upsample_2(deconv1)
up2 = self.decoderblock_2(up2)
deconv2 = torch.cat([up2,conv1],-1)
deconv2 = self.decoderlayer_2(deconv2,mask=mask)
up3 = self.upsample_3(deconv2)
up3 = self.decoderblock_3(up3)
deconv3 = torch.cat([up3,conv0],-1)
deconv3 = self.decoderlayer_3(deconv3,mask=mask)
up4 = self.decoderblock_4(deconv3)
# Output Projection
y = self.output_proj(up4)
return x + y
def flops(self):
flops = 0
# Input Projection
flops += self.input_proj.flops(self.reso,self.reso)
# Encoder
flops += self.encoderlayer_0.flops()+self.dowsample_0.flops(self.reso,self.reso)
flops += self.encoderlayer_1.flops()+self.dowsample_1.flops(self.reso//2,self.reso//2)
flops += self.encoderlayer_2.flops()+self.dowsample_2.flops(self.reso//2**2,self.reso//2**2)
flops += self.encoderlayer_3.flops()+self.dowsample_3.flops(self.reso//2**3,self.reso//2**3)
# Bottleneck
flops += self.conv.flops()
# Decoder
flops += self.upsample_0.flops(self.reso//2**4,self.reso//2**4)+self.decoderlayer_0.flops()
flops += self.upsample_1.flops(self.reso//2**3,self.reso//2**3)+self.decoderlayer_1.flops()
flops += self.upsample_2.flops(self.reso//2**2,self.reso//2**2)+self.decoderlayer_2.flops()
flops += self.upsample_3.flops(self.reso//2,self.reso//2)+self.decoderlayer_3.flops()
# Output Projection
flops += self.output_proj.flops(self.reso,self.reso)
return flops
# 删掉右半部分的transformer
class Lformer(nn.Module):
def __init__(self, img_size=128, in_chans=3,
embed_dim=32, depths=[2, 2, 2, 2, 2, 2, 2, 2, 2], num_heads=[1, 2, 4, 8, 16, 16, 8, 4, 2],
win_size=8, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, patch_norm=True,
use_checkpoint=False, token_projection='linear', token_mlp='ffn', se_layer=False,
dowsample=Downsample, upsample=MyUpsample, **kwargs):
super().__init__()
self.num_enc_layers = len(depths)//2
self.num_dec_layers = len(depths)//2
self.embed_dim = embed_dim
self.patch_norm = patch_norm
self.mlp_ratio = mlp_ratio
self.token_projection = token_projection
self.mlp = token_mlp
self.win_size =win_size
self.reso = img_size
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
enc_dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths[:self.num_enc_layers]))]
conv_dpr = [drop_path_rate]*depths[4]
dec_dpr = enc_dpr[::-1]
# build layers
# Input/Output
self.input_proj = InputProj(in_channel=in_chans, out_channel=embed_dim, kernel_size=3, stride=1, act_layer=nn.LeakyReLU)
self.output_proj = OutputProj(in_channel=2*embed_dim, out_channel=in_chans, kernel_size=3, stride=1)
# Encoder
self.encoderlayer_0 = BasicUformerLayer(dim=embed_dim,
output_dim=embed_dim,
input_resolution=(img_size,
img_size),
depth=depths[0],
num_heads=num_heads[0],
win_size=win_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=enc_dpr[sum(depths[:0]):sum(depths[:1])],
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
token_projection=token_projection,token_mlp=token_mlp,se_layer=se_layer)
self.dowsample_0 = dowsample(embed_dim, embed_dim*2)
self.encoderlayer_1 = BasicUformerLayer(dim=embed_dim*2,
output_dim=embed_dim*2,
input_resolution=(img_size // 2,
img_size // 2),
depth=depths[1],
num_heads=num_heads[1],
win_size=win_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=enc_dpr[sum(depths[:1]):sum(depths[:2])],
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
token_projection=token_projection,token_mlp=token_mlp,se_layer=se_layer)
self.dowsample_1 = dowsample(embed_dim*2, embed_dim*4)
self.encoderlayer_2 = BasicUformerLayer(dim=embed_dim*4,
output_dim=embed_dim*4,
input_resolution=(img_size // (2 ** 2),
img_size // (2 ** 2)),
depth=depths[2],
num_heads=num_heads[2],
win_size=win_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=enc_dpr[sum(depths[:2]):sum(depths[:3])],
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
token_projection=token_projection,token_mlp=token_mlp,se_layer=se_layer)
self.dowsample_2 = dowsample(embed_dim*4, embed_dim*8)
self.encoderlayer_3 = BasicUformerLayer(dim=embed_dim*8,
output_dim=embed_dim*8,
input_resolution=(img_size // (2 ** 3),
img_size // (2 ** 3)),
depth=depths[3],
num_heads=num_heads[3],
win_size=win_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=enc_dpr[sum(depths[:3]):sum(depths[:4])],
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
token_projection=token_projection,token_mlp=token_mlp,se_layer=se_layer)
self.dowsample_3 = dowsample(embed_dim*8, embed_dim*16)
# Bottleneck
self.conv = BasicUformerLayer(dim=embed_dim*16,
output_dim=embed_dim*16,
input_resolution=(img_size // (2 ** 4),
img_size // (2 ** 4)),
depth=depths[4],
num_heads=num_heads[4],
win_size=win_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=conv_dpr,
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
token_projection=token_projection,token_mlp=token_mlp,se_layer=se_layer)
# Decoder
self.upsample_0 = upsample(embed_dim*16, embed_dim*8)
self.decoderblock_0 = ConvBlock_1(embed_dim*8,embed_dim*8)
self.decoderlayer_0 = BasicUformerLayer(dim=embed_dim*16,
output_dim=embed_dim*16,
input_resolution=(img_size // (2 ** 3),
img_size // (2 ** 3)),
depth=depths[5],
num_heads=num_heads[5],
win_size=win_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dec_dpr[:depths[5]],
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
token_projection=token_projection,token_mlp=token_mlp,se_layer=se_layer)
self.upsample_1 = upsample(embed_dim*16, embed_dim*4)
self.decoderblock_1 = ConvBlock_1(embed_dim*4,embed_dim*4)
self.decoderlayer_1 = BasicUformerLayer(dim=embed_dim*8,
output_dim=embed_dim*8,
input_resolution=(img_size // (2 ** 2),
img_size // (2 ** 2)),
depth=depths[6],
num_heads=num_heads[6],
win_size=win_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dec_dpr[sum(depths[5:6]):sum(depths[5:7])],
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
token_projection=token_projection,token_mlp=token_mlp,se_layer=se_layer)
self.upsample_2 = upsample(embed_dim*8, embed_dim*2)
self.decoderblock_2 = ConvBlock_1(embed_dim*2,embed_dim*2)
self.decoderlayer_2 = BasicUformerLayer(dim=embed_dim*4,
output_dim=embed_dim*4,
input_resolution=(img_size // 2,
img_size // 2),
depth=depths[7],
num_heads=num_heads[7],
win_size=win_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dec_dpr[sum(depths[5:7]):sum(depths[5:8])],
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
token_projection=token_projection,token_mlp=token_mlp,se_layer=se_layer)
self.upsample_3 = upsample(embed_dim*4, embed_dim)
self.decoderblock_3 = ConvBlock_1(embed_dim,embed_dim)
self.decoderlayer_3 = BasicUformerLayer(dim=embed_dim*2,
output_dim=embed_dim*2,
input_resolution=(img_size,
img_size),
depth=depths[8],
num_heads=num_heads[8],
win_size=win_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dec_dpr[sum(depths[5:8]):sum(depths[5:9])],
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
token_projection=token_projection,token_mlp=token_mlp,se_layer=se_layer)
self.decoderblock_4 = ConvBlock_1(embed_dim*2,embed_dim*2)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
def extra_repr(self) -> str:
return f"embed_dim={self.embed_dim}, token_projection={self.token_projection}, token_mlp={self.mlp},win_size={self.win_size}"
def forward(self, x, mask=None):
# Input Projection
y = self.input_proj(x)
y = self.pos_drop(y)
#Encoder
conv0 = self.encoderlayer_0(y,mask=mask)
pool0 = self.dowsample_0(conv0)
conv1 = self.encoderlayer_1(pool0,mask=mask)
pool1 = self.dowsample_1(conv1)
conv2 = self.encoderlayer_2(pool1,mask=mask)
pool2 = self.dowsample_2(conv2)
conv3 = self.encoderlayer_3(pool2,mask=mask)
pool3 = self.dowsample_3(conv3)
# Bottleneck
conv4 = self.conv(pool3, mask=mask)
#Decoder
up0 = self.upsample_0(conv4)
# up0 = self.decoderblock_0(up0)
deconv0 = torch.cat([up0,conv3],-1)
deconv0 = self.decoderlayer_0(deconv0,mask=mask)
up1 = self.upsample_1(deconv0)
# up1 = self.decoderblock_1(up1)
deconv1 = torch.cat([up1,conv2],-1)
deconv1 = self.decoderlayer_1(deconv1,mask=mask)
up2 = self.upsample_2(deconv1)
# up2 = self.decoderblock_2(up2)
deconv2 = torch.cat([up2,conv1],-1)
deconv2 = self.decoderlayer_2(deconv2,mask=mask)
up3 = self.upsample_3(deconv2)
# up3 = self.decoderblock_3(up3)
deconv3 = torch.cat([up3,conv0],-1)
deconv3 = self.decoderlayer_3(deconv3,mask=mask)
up4 = self.decoderblock_4(deconv3)
# Output Projection
y = self.output_proj(up4)
return x + y
def flops(self):
flops = 0
# Input Projection
flops += self.input_proj.flops(self.reso,self.reso)
# Encoder
flops += self.encoderlayer_0.flops()+self.dowsample_0.flops(self.reso,self.reso)
flops += self.encoderlayer_1.flops()+self.dowsample_1.flops(self.reso//2,self.reso//2)
flops += self.encoderlayer_2.flops()+self.dowsample_2.flops(self.reso//2**2,self.reso//2**2)
flops += self.encoderlayer_3.flops()+self.dowsample_3.flops(self.reso//2**3,self.reso//2**3)
# Bottleneck
flops += self.conv.flops()
# Decoder
flops += self.upsample_0.flops(self.reso//2**4,self.reso//2**4)+self.decoderlayer_0.flops()
flops += self.upsample_1.flops(self.reso//2**3,self.reso//2**3)+self.decoderlayer_1.flops()
flops += self.upsample_2.flops(self.reso//2**2,self.reso//2**2)+self.decoderlayer_2.flops()
flops += self.upsample_3.flops(self.reso//2,self.reso//2)+self.decoderlayer_3.flops()
# Output Projection
flops += self.output_proj.flops(self.reso,self.reso)
return flops
#删掉所有的transformer
class LLformer(nn.Module):
def __init__(self, img_size=128, in_chans=3,
embed_dim=32, depths=[2, 2, 2, 2, 2, 2, 2, 2, 2], num_heads=[1, 2, 4, 8, 16, 16, 8, 4, 2],
win_size=8, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, patch_norm=True,
use_checkpoint=False, token_projection='linear', token_mlp='ffn', se_layer=False,
dowsample=Downsample, upsample=MyUpsample, **kwargs):
super().__init__()
self.num_enc_layers = len(depths)//2
self.num_dec_layers = len(depths)//2
self.embed_dim = embed_dim
self.patch_norm = patch_norm
self.mlp_ratio = mlp_ratio
self.token_projection = token_projection
self.mlp = token_mlp
self.win_size =win_size
self.reso = img_size
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
enc_dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths[:self.num_enc_layers]))]
conv_dpr = [drop_path_rate]*depths[4]
dec_dpr = enc_dpr[::-1]
# build layers
# Input/Output
self.input_proj = InputProj(in_channel=in_chans, out_channel=embed_dim, kernel_size=3, stride=1, act_layer=nn.LeakyReLU)
self.output_proj = OutputProj(in_channel=2*embed_dim, out_channel=in_chans, kernel_size=3, stride=1)
# Encoder
self.encoderlayer_0 = BasicUformerLayer(dim=embed_dim,
output_dim=embed_dim,
input_resolution=(img_size,
img_size),
depth=depths[0],
num_heads=num_heads[0],
win_size=win_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=enc_dpr[sum(depths[:0]):sum(depths[:1])],
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
token_projection=token_projection,token_mlp=token_mlp,se_layer=se_layer)
self.encoderblock_0 = ConvBlock_1(embed_dim,embed_dim)
self.dowsample_0 = dowsample(embed_dim, embed_dim*2)
self.encoderlayer_1 = BasicUformerLayer(dim=embed_dim*2,
output_dim=embed_dim*2,
input_resolution=(img_size // 2,
img_size // 2),
depth=depths[1],
num_heads=num_heads[1],
win_size=win_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=enc_dpr[sum(depths[:1]):sum(depths[:2])],
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
token_projection=token_projection,token_mlp=token_mlp,se_layer=se_layer)
self.encoderblock_1 = ConvBlock_1(embed_dim*2,embed_dim*2)
self.dowsample_1 = dowsample(embed_dim*2, embed_dim*4)
self.encoderlayer_2 = BasicUformerLayer(dim=embed_dim*4,
output_dim=embed_dim*4,
input_resolution=(img_size // (2 ** 2),
img_size // (2 ** 2)),
depth=depths[2],
num_heads=num_heads[2],
win_size=win_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=enc_dpr[sum(depths[:2]):sum(depths[:3])],
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
token_projection=token_projection,token_mlp=token_mlp,se_layer=se_layer)
self.encoderblock_2 = ConvBlock_1(embed_dim*4,embed_dim*4)
self.dowsample_2 = dowsample(embed_dim*4, embed_dim*8)
self.encoderlayer_3 = BasicUformerLayer(dim=embed_dim*8,
output_dim=embed_dim*8,
input_resolution=(img_size // (2 ** 3),
img_size // (2 ** 3)),
depth=depths[3],
num_heads=num_heads[3],
win_size=win_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=enc_dpr[sum(depths[:3]):sum(depths[:4])],
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
token_projection=token_projection,token_mlp=token_mlp,se_layer=se_layer)
self.encoderblock_3 = ConvBlock_1(embed_dim*8,embed_dim*8)
self.dowsample_3 = dowsample(embed_dim*8, embed_dim*16)
# Bottleneck
self.conv = BasicUformerLayer(dim=embed_dim*16,
output_dim=embed_dim*16,
input_resolution=(img_size // (2 ** 4),
img_size // (2 ** 4)),
depth=depths[4],
num_heads=num_heads[4],
win_size=win_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=conv_dpr,
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
token_projection=token_projection,token_mlp=token_mlp,se_layer=se_layer)
self.conv_block = ConvBlock_1(embed_dim*16,embed_dim*16)
# Decoder
self.upsample_0 = upsample(embed_dim*16, embed_dim*8)
self.decoderblock_0 = ConvBlock_1(embed_dim*8,embed_dim*8)
self.decoderlayer_0 = BasicUformerLayer(dim=embed_dim*16,
output_dim=embed_dim*16,
input_resolution=(img_size // (2 ** 3),
img_size // (2 ** 3)),
depth=depths[5],
num_heads=num_heads[5],
win_size=win_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dec_dpr[:depths[5]],
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
token_projection=token_projection,token_mlp=token_mlp,se_layer=se_layer)
self.upsample_1 = upsample(embed_dim*16, embed_dim*4)
self.decoderblock_1 = ConvBlock_1(embed_dim*4,embed_dim*4)
self.decoderlayer_1 = BasicUformerLayer(dim=embed_dim*8,
output_dim=embed_dim*8,
input_resolution=(img_size // (2 ** 2),
img_size // (2 ** 2)),
depth=depths[6],
num_heads=num_heads[6],
win_size=win_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dec_dpr[sum(depths[5:6]):sum(depths[5:7])],
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
token_projection=token_projection,token_mlp=token_mlp,se_layer=se_layer)
self.upsample_2 = upsample(embed_dim*8, embed_dim*2)
self.decoderblock_2 = ConvBlock_1(embed_dim*2,embed_dim*2)
self.decoderlayer_2 = BasicUformerLayer(dim=embed_dim*4,
output_dim=embed_dim*4,
input_resolution=(img_size // 2,
img_size // 2),
depth=depths[7],
num_heads=num_heads[7],
win_size=win_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dec_dpr[sum(depths[5:7]):sum(depths[5:8])],
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
token_projection=token_projection,token_mlp=token_mlp,se_layer=se_layer)
self.upsample_3 = upsample(embed_dim*4, embed_dim)
self.decoderblock_3 = ConvBlock_1(embed_dim,embed_dim)
self.decoderlayer_3 = BasicUformerLayer(dim=embed_dim*2,
output_dim=embed_dim*2,
input_resolution=(img_size,
img_size),
depth=depths[8],
num_heads=num_heads[8],
win_size=win_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dec_dpr[sum(depths[5:8]):sum(depths[5:9])],
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
token_projection=token_projection,token_mlp=token_mlp,se_layer=se_layer)
self.decoderblock_4 = ConvBlock_1(embed_dim*2,embed_dim*2)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
def extra_repr(self) -> str:
return f"embed_dim={self.embed_dim}, token_projection={self.token_projection}, token_mlp={self.mlp},win_size={self.win_size}"
def forward(self, x, mask=None):
# Input Projection
y = self.input_proj(x)
y = self.pos_drop(y)
#Encoder
conv0 = self.encoderblock_0(y)
pool0 = self.dowsample_0(conv0)
conv1 = self.encoderblock_1(pool0)
pool1 = self.dowsample_1(conv1)
conv2 = self.encoderblock_2(pool1)
pool2 = self.dowsample_2(conv2)
conv3 = self.encoderblock_3(pool2)
pool3 = self.dowsample_3(conv3)
# Bottleneck
conv4 = self.conv_block(pool3)
#Decoder
up0 = self.upsample_0(conv4)
up0 = self.decoderblock_0(up0)
deconv0 = torch.cat([up0,conv3],-1)
# deconv0 = self.decoderlayer_0(deconv0,mask=mask)
up1 = self.upsample_1(deconv0)
up1 = self.decoderblock_1(up1)
deconv1 = torch.cat([up1,conv2],-1)
# deconv1 = self.decoderlayer_1(deconv1,mask=mask)
up2 = self.upsample_2(deconv1)
up2 = self.decoderblock_2(up2)
deconv2 = torch.cat([up2,conv1],-1)
# deconv2 = self.decoderlayer_2(deconv2,mask=mask)
up3 = self.upsample_3(deconv2)
up3 = self.decoderblock_3(up3)
deconv3 = torch.cat([up3,conv0],-1)
# deconv3 = self.decoderlayer_3(deconv3,mask=mask)
up4 = self.decoderblock_4(deconv3)
# Output Projection
y = self.output_proj(up4)
return x + y
def flops(self):
flops = 0
# Input Projection
flops += self.input_proj.flops(self.reso,self.reso)
# Encoder
flops += self.encoderlayer_0.flops()+self.dowsample_0.flops(self.reso,self.reso)
flops += self.encoderlayer_1.flops()+self.dowsample_1.flops(self.reso//2,self.reso//2)
flops += self.encoderlayer_2.flops()+self.dowsample_2.flops(self.reso//2**2,self.reso//2**2)
flops += self.encoderlayer_3.flops()+self.dowsample_3.flops(self.reso//2**3,self.reso//2**3)
# Bottleneck
flops += self.conv.flops()
# Decoder
flops += self.upsample_0.flops(self.reso//2**4,self.reso//2**4)+self.decoderlayer_0.flops()
flops += self.upsample_1.flops(self.reso//2**3,self.reso//2**3)+self.decoderlayer_1.flops()
flops += self.upsample_2.flops(self.reso//2**2,self.reso//2**2)+self.decoderlayer_2.flops()
flops | |
# -*- coding: utf-8 -*-
"""
File name: quad_mdl.py
Author: <NAME>
Created: June 2019
Description: A fault model of a multi-rotor drone.
"""
import numpy as np
from fmdtools.modeldef import *
#Define specialized flows
class Direc(Flow):
def __init__(self):
self.traj=[0,0,0]
super().__init__({'x': self.traj[0], 'y': self.traj[1], 'z': self.traj[2], 'power': 1}, 'Trajectory')
def assign(self, traj):
self.x, self.y, self.z = traj[0], traj[1], traj[2]
self.traj=traj
def status(self):
status={'x': self.traj[0], 'y': self.traj[1], 'z': self.traj[2], 'power': self.power}
return status.copy()
#Define functions
class StoreEE(FxnBlock):
def __init__(self, flows, params):
self.archtype=params['bat']
#weight, cap, voltage, drag_factor
if self.archtype == 'monolithic':
self.batparams ={'s':1,'p':1,'w':params['weight'],'v':12,'d':params['drag']}
components = {'S1P1': Battery('S1P1', self.batparams)}
elif self.archtype =='series-split':
self.batparams ={'s':2,'p':1,'w':params['weight'],'v':12,'d':params['drag']}
components = {'S1P1': Battery('S1P1', self.batparams), 'S2P1': Battery('S2P1', self.batparams)}
elif self.archtype == 'parallel-split':
self.batparams ={'s':1,'p':2,'w':params['weight'],'v':12,'d':params['drag']}
components = {'S1P1': Battery('S1P1', self.batparams),'S1P2': Battery('S1P2', self.batparams)}
elif self.archtype == 'split-both':
self.batparams ={'s':2,'p':2,'w':params['weight'],'v':12,'d':params['drag']}
components = {'S1P1': Battery('S1P1', self.batparams), 'S2P1': Battery('S2P1', self.batparams),'S1P2': Battery('S1P2', self.batparams), 'S2P2': Battery('S2P2', self.batparams)}
else: raise Exception("Invalid battery architecture")
#failrate for function w- component only applies to function modes
self.failrate=1e-4
self.assoc_modes({'nocharge':[0.2,[0.6,0.2,0.2],0],'lowcharge':[0.7,[0.6,0.2,0.2],0]})
super().__init__(['EEout', 'FS', 'HSig'], flows, {'soc': 100}, components)
def condfaults(self, time):
if self.soc<20: self.add_fault('lowcharge')
elif self.has_fault('lowcharge'):
for batname, bat in self.components.items(): bat.soc=19
if self.soc<1: self.replace_fault('lowcharge','nocharge')
elif self.has_fault('nocharge'):
for batname, bat in self.components.items(): bat.soc=0
def behavior(self, time):
EE, soc = {}, {}
rate_res=0
for batname, bat in self.components.items():
EE[bat.name], soc[bat.name], rate_res = bat.behavior(self.FS.support, self.EEout.rate/(self.batparams['s']*self.batparams['p'])+rate_res, time)
#need to incorporate max current draw somehow + draw when reconfigured
if self.archtype == 'monolithic': self.EEout.effort = EE['S1P1']
elif self.archtype == 'series-split': self.EEout.effort = np.max(list(EE.values()))
elif self.archtype == 'parallel-split': self.EEout.effort = np.sum(list(EE.values()))
elif self.archtype == 'split-both':
e=list(EE.values())
e.sort()
self.EEout.effort = e[-1]+e[-2]
self.soc=np.mean(list(soc.values()))
if self.any_faults(): self.HSig.hstate = 'faulty'
else: self.HSig.hstate = 'nominal'
class Battery(Component):
def __init__(self, name, batparams):
super().__init__(name, {'soc':100, 'EEe':1.0, 'Et':1.0})
self.failrate=1e-4
self.avail_eff = 1/batparams['p']
self.maxa = 2/batparams['s']
self.p=batparams['p']
self.s=batparams['s']
self.amt = 60*4.200/(batparams['w']*170/(batparams['d']*batparams['v']))
self.assoc_modes({'short':[0.2,[0.3,0.3,0.3],100], 'degr':[0.2,[0.3,0.3,0.3],100],
'break':[0.2,[0.2,0.2,0.2],100], 'nocharge':[0.6,[0.6,0.2,0.2],100],
'lowcharge':[0,[0.6,0.2,0.2],100]}, name=name)
def behavior(self, FS, EEoutr, time):
if FS <1.0: self.add_fault(self.name+'break')
if EEoutr>self.maxa: self.add_fault(self.name+'break')
if self.soc<20: self.add_fault(self.name+'lowcharge')
if self.soc<1: self.replace_fault(self.name+'lowcharge',self.name+'nocharge')
Et=1.0 #default
if self.has_fault(self.name+'short'): Et=0.0
elif self.has_fault(self.name+'break'): Et=0.0
elif self.has_fault(self.name+'degr'): Et=0.5
self.Et = Et*self.avail_eff
Er_res=0.0
if time > self.time:
self.soc=self.soc-100*EEoutr*self.p*self.s*(time-self.time)/self.amt
self.time=time
if self.has_fault(self.name+'nocharge'): self.soc, self.Et, Er_res = 0.0,0.0, EEoutr
return self.Et, self.soc, Er_res
class DistEE(FxnBlock):
def __init__(self,flows):
super().__init__(['EEin','EEmot','EEctl','ST'],flows, {'EEtr':1.0, 'EEte':1.0}, timely=False)
self.failrate=1e-5
self.assoc_modes({'short':[0.3,[0.33, 0.33, 0.33],300], 'degr':[0.5,[0.33, 0.33, 0.33],100],\
'break':[0.2,[0.33, 0.33, 0.33],200]})
def condfaults(self, time):
if self.ST.support<0.5 or max(self.EEmot.rate,self.EEctl.rate)>2: self.add_fault('break')
if self.EEin.rate>2: self.add_fault('short')
def behavior(self, time):
if self.has_fault('short'): self.EEte, self.EEre = 0.0,10.0
elif self.has_fault('break'): self.EEte, self.EEre = 0.0,0.0
elif self.has_fault('degr'): self.EEte=0.5
self.EEmot.effort=self.EEte*self.EEin.effort
self.EEctl.effort=self.EEte*self.EEin.effort
self.EEin.rate=m2to1([ self.EEin.effort, self.EEtr, 0.99*self.EEmot.rate+0.01*self.EEctl.rate])
class HoldPayload(FxnBlock):
def __init__(self,flows):
super().__init__(['DOF', 'Lin', 'ST'],flows, timely=False, states={'Force_GR':1.0})
self.failrate=1e-6
self.assoc_modes({'break':[0.2, [0.33, 0.33, 0.33], 1000], 'deform':[0.8, [0.33, 0.33, 0.33], 1000]})
def condfaults(self, time):
if self.DOF.elev<=0.0: self.Force_GR=min(-0.5, (self.DOF.vertvel/60-self.DOF.planvel/60)/7.5)
else: self.Force_GR=0.0
if abs(self.Force_GR/2)>0.8: self.add_fault('break')
elif abs(self.Force_GR/2)>1.0: self.add_fault('deform')
def behavior(self, time):
#need to transfer FG to FA & FS???
if self.has_fault('break'): self.Lin.support, self.ST.support = 0,0
elif self.has_fault('deform'): self.Lin.support, self.ST.support = 0.5,0.5
else: self.Lin.support, self.ST.support = 1.0,1.0
class ManageHealth(FxnBlock):
def __init__(self,flows,respolicy):
self.respolicy = respolicy
flownames=['EECtl','FS','DOFshealth', 'Bathealth', 'Trajconfig' ]
super().__init__(flownames, flows)
self.failrate=1e-6 #{'falsemaintenance':[0.8,[1.0, 0.0,0.0,0.0,0.0],1000],\
self.assoc_modes({'falsemasking':[0.1,[1.0, 0.2,0.4,0.4,0.0],1000],\
'falseemland':[0.05,[0.0, 0.2,0.4,0.4,0.0],1000],\
'lostfunction':[0.05,[0.2, 0.2,0.2,0.2,0.2],1000]})
def condfaults(self, time):
if self.FS.support<0.5 or self.EECtl.effort>2.0: self.add_fault('lostfunction')
def behavior(self, time):
if self.has_fault('lostfunction'): self.Trajconfig.mode = 'continue'
elif self.DOFshealth.hstate=='faulty': self.Trajconfig.mode = self.respolicy['line']
elif self.Bathealth.hstate=='faulty': self.Trajconfig.mode = self.respolicy['bat']
else: self.Trajconfig.mode = 'continue'
# trajconfig: continue, to_home, to_nearest, emland
class AffectDOF(FxnBlock): #EEmot,Ctl1,DOFs,Force_Lin HSig_DOFs, RSig_DOFs
def __init__(self, flows, archtype):
self.archtype=archtype
if archtype=='quad':
components={'RF':Line('RF'), 'LF':Line('LF'), 'LR':Line('LR'), 'RR':Line('RR')}
self.upward={'RF':1,'LF':1,'LR':1,'RR':1}
self.forward={'RF':0.5,'LF':0.5,'LR':-0.5,'RR':-0.5}
self.LR = {'L':{'LF', 'LR'}, 'R':{'RF','RR'}}
self.FR = {'F':{'LF', 'RF'}, 'R':{'LR', 'RR'}}
elif archtype=='hex':
components={'RF':Line('RF'), 'LF':Line('LF'), 'LR':Line('LR'), 'RR':Line('RR'),'R':Line('R'), 'F':Line('F')}
self.upward={'RF':1,'LF':1,'LR':1,'RR':1,'R':1,'F':1}
self.forward={'RF':0.5,'LF':0.5,'LR':-0.5,'RR':-0.5, 'R':-0.75, 'F':0.75}
self.LR = {'L':{'LF', 'LR'}, 'R':{'RF','RR'}}
self.FR = {'F':{'LF', 'RF', 'F'}, 'R':{'LR', 'RR', 'R'}}
elif archtype=='oct':
components={'RF':Line('RF'), 'LF':Line('LF'), 'LR':Line('LR'), 'RR':Line('RR'),'RF2':Line('RF2'), 'LF2':Line('LF2'), 'LR2':Line('LR2'), 'RR2':Line('RR2')}
self.upward={'RF':1,'LF':1,'LR':1,'RR':1,'RF2':1,'LF2':1,'LR2':1,'RR2':1}
self.forward={'RF':0.5,'LF':0.5,'LR':-0.5,'RR':-0.5,'RF2':0.5,'LF2':0.5,'LR2':-0.5,'RR2':-0.5}
self.LR = {'L':{'LF', 'LR','LF2', 'LR2'}, 'R':{'RF','RR','RF2','RR2'}}
self.FR = {'F':{'LF', 'RF','LF2', 'RF2'}, 'R':{'LR', 'RR','LR2', 'RR2'}}
super().__init__(['EEin', 'Ctlin','DOF','Dir','Force','HSig'], flows,{'LRstab':0.0, 'FRstab':0.0}, components)
def behavior(self, time):
Air,EEin={},{}
#injects faults into lines
for linname,lin in self.components.items():
cmds={'up':self.upward[linname], 'for':self.forward[linname]}
lin.behavior(self.EEin.effort, self.Ctlin, cmds, self.Force.support)
Air[lin.name]=lin.Airout
EEin[lin.name]=lin.EE_in
if any(value>=10 for value in EEin.values()): self.EEin.rate=10
elif any(value!=0.0 for value in EEin.values()): self.EEin.rate=sum(EEin.values())/len(EEin) #should it really be max?
else: self.EEin.rate=0.0
self.LRstab = (sum([Air[comp] for comp in self.LR['L']])-sum([Air[comp] for comp in self.LR['R']]))/len(Air)
self.FRstab = (sum([Air[comp] for comp in self.FR['R']])-sum([Air[comp] for comp in self.FR['F']]))/len(Air)
if abs(self.LRstab) >=0.25 or abs(self.FRstab)>=0.75: self.DOF.uppwr, self.DOF.planpwr = 0.0, 0.0
else:
Airs=list(Air.values())
self.DOF.uppwr=np.mean(Airs)
self.DOF.planpwr=self.Ctlin.forward
if self.any_faults(): self.HSig.hstate='faulty'
if time> self.time:
if self.DOF.uppwr > 1.0: self.DOF.vertvel = 60*min([(self.DOF.uppwr-1)*5, 5])
elif self.DOF.uppwr < 1.0: self.DOF.vertvel = 60*max([(self.DOF.uppwr-1)*5, -5])
else: self.DOF.vertvel = 0.0
if self.DOF.elev<=0.0:
self.DOF.vertvel=max(0,self.DOF.vertvel)
self.DOF.planvel=0.0
if self.DOF.vertvel<-self.DOF.elev:
reqdist = np.sqrt(self.Dir.x**2 + self.Dir.y**2+0.0001)
if self.DOF.planpwr>0.0:
maxdist = 600 * self.DOF.elev/(-self.DOF.vertvel+0.001)
if reqdist > maxdist: self.planvel = maxdist
else: self.planvel = reqdist
else: self.planvel = 0.1
self.DOF.x=self.DOF.x+self.planvel*self.Dir.traj[0]/reqdist
self.DOF.y=self.DOF.y+self.planvel*self.Dir.traj[1]/reqdist
self.DOF.elev=0.0
else:
self.DOF.planvel=60*min([10*self.DOF.planpwr, 10]) # 600 m/m = 23 mph
vect = np.sqrt(np.power(self.Dir.traj[0], 2)+ np.power(self.Dir.traj[1], 2))+0.001
self.DOF.x=self.DOF.x+self.DOF.planvel*self.Dir.traj[0]/vect
self.DOF.y=self.DOF.y+self.DOF.planvel*self.Dir.traj[1]/vect
self.DOF.elev=self.DOF.elev + self.DOF.vertvel
class Line(Component):
def __init__(self, name):
super().__init__(name,{'Eto': 1.0, 'Eti':1.0, 'Ct':1.0, 'Mt':1.0, 'Pt':1.0}, timely=False)
self.failrate=1e-5
self.assoc_modes({'short':[0.1, [0.33, 0.33, 0.33], 200],'openc':[0.1, [0.33, 0.33, 0.33], 200],\
'ctlbreak':[0.2, [0.33, 0.33, 0.33], 100], 'mechbreak':[0.1, [0.33, 0.33, 0.33], 500],\
'mechfriction':[0.05, [0.0, 0.5,0.5], 500], 'stuck':[0.02, [0.0, 0.5,0.5], 200]},name=name)
def behavior(self, EEin, Ctlin, cmds, Force):
if Force<=0.0: self.add_fault(self.name+'mechbreak')
elif Force<=0.5: self.add_fault(self.name+'mechfriction')
if self.has_fault(self.name+'short'): self.Eti, self.Eto = 0.0, np.inf
elif self.has_fault(self.name+'openc'): self.Eti, self.Eto =0.0, 0.0
elif Ctlin.upward==0 and Ctlin.forward == 0: self.Eto = 0.0
if self.has_fault(self.name+'ctlbreak'): self.Ct=0.0
if self.has_fault(self.name+'mechbreak'): self.Mt=0.0
elif self.has_fault(self.name+'mechfriction'): self.Mt, self.Eti = 0.5, 2.0
if self.has_fault(self.name+'stuck'): self.Pt, self.Mt, self.Eti = 0.0, 0.0, 4.0
self.Airout=m2to1([EEin,self.Eti,Ctlin.upward*cmds['up']+Ctlin.forward*cmds['for'],self.Ct,self.Mt,self.Pt])
self.EE_in=m2to1([EEin,self.Eto])
class CtlDOF(FxnBlock):
def __init__(self, flows):
super().__init__(['EEin','Dir','Ctl','DOFs','FS'],flows, {'vel':0.0, 'Cs':1.0})
self.failrate=1e-5
self.assoc_modes({'noctl':[0.2, [0.6, 0.3, 0.1], 1000], 'degctl':[0.8, [0.6, 0.3, 0.1], 1000]})
def condfaults(self, time):
if self.FS.support<0.5: self.add_fault('noctl')
def behavior(self, time):
if self.has_fault('noctl'): self.Cs=0.0
elif self.has_fault('degctl'): self.Cs=0.5
if time>self.time: self.vel=self.DOFs.vertvel
# throttle settings: 0 is off (-50 m/s), 1 is hover, 2 is max climb (5 m/s)
if self.Dir.traj[2]>0: upthrottle = 1+np.min([self.Dir.traj[2]/(50*5), 1])
elif self.Dir.traj[2] <0: upthrottle = 1+np.max([self.Dir.traj[2]/(50*5), -1])
else: upthrottle = 1.0
vect = np.sqrt(np.power(self.Dir.traj[0], 2)+ np.power(self.Dir.traj[1], 2))+0.001
forwardthrottle = np.min([vect/(60*10), 1])
self.Ctl.forward=self.EEin.effort*self.Cs*forwardthrottle*self.Dir.power
self.Ctl.upward=self.EEin.effort*self.Cs*upthrottle*self.Dir.power
class PlanPath(FxnBlock):
def __init__(self, flows, params):
super().__init__(['EEin','Env','Dir','FS','Rsig'], flows, states={'dx':0.0, 'dy':0.0, 'dz':0.0, 'pt':1, 'mode':'taxi'})
self.nearest = params['safe'][0:2]+[0]
self.goals = params['flightplan']
self.goal=self.goals[1]
self.failrate=1e-5
self.assoc_modes({'noloc':[0.2, [0.6, 0.3, 0.1], 1000], 'degloc':[0.8, [0.6, 0.3, 0.1], 1000]})
def condfaults(self, time):
if self.FS.support<0.5: self.add_fault('noloc')
def behavior(self, t):
loc = [self.Env.x, self.Env.y, self.Env.elev]
if self.pt <= max(self.goals): self.goal = self.goals[self.pt]
dist = finddist(loc, self.goal)
[self.dx,self.dy, self.dz] = vectdist(self.goal,loc)
if self.mode=='taxi' and t>5: self.mode=='taxi'
elif self.Rsig.mode == 'to_home': # add a to_nearest option
self.pt = 0
self.goal =self.goals[self.pt]
[self.dx,self.dy, self.dz] = vectdist(self.goal,loc)
elif self.Rsig.mode == 'to_nearest': self.mode = 'to_nearest'
elif self.Rsig.mode== 'emland': self.mode = 'land'
elif self.Env.elev<1 and (self.pt>=max(self.goals) or self.mode=='land'): self.mode = 'taxi'
elif dist<10 and self.pt>=max(self.goals): self.mode = 'land'
elif dist<10 and {'move'}.issuperset({self.mode}):
if self.pt < max(self.goals):
self.pt+=1
self.goal = self.goals[self.pt]
elif dist>5 and not(self.mode=='descend'): self.mode='move'
# nominal behaviors
self.Dir.power=1.0
if self.mode=='taxi': self.Dir.power=0.0
elif self.mode=='move': self.Dir.assign([self.dx,self.dy, self.dz])
elif self.mode=='land': self.Dir.assign([0,0,-self.Env.elev/2])
elif self.mode =='to_nearest': self.Dir.assign(vectdist(self.nearest,loc))
# faulty behaviors
if self.has_fault('noloc'): self.Dir.assign([0,0,0])
elif self.has_fault('degloc'): self.Dir.assign([0,0,-1])
if self.EEin.effort<0.5:
self.Dir.power=0.0
self.Dir.assign([0,0,0])
class Drone(Model):
def __init__(self, params={'flightplan':{1:[0,0,100], 2:[100, 0,100], 3:[100, 100,100], 4:[150, 150,100], 5:[0,0,100], 6:[0,0,0]},'bat':'monolithic', 'linearch':'quad','respolicy':{'bat':'to_home','line':'emland'},
'start': [0.0,0.0, 10, 10], 'target': [0, 150, 160, 160], 'safe': [0, 50, 10, 10], 'loc':'rural', 'landtime':12}):
super().__init__()
super().__init__(modelparams={'phases': {'ascend':[0,1],'forward':[1,params['landtime']],'taxis':[params['landtime'], 20]},
'times':[0,30],'units':'min'}, params=params)
self.start_area = square(self.params['start'][0:2],self.params['start'][2],self.params['start'][3] )
self.safe_area = square(self.params['safe'][0:2],self.params['safe'][2],self.params['safe'][3] )
self.target_area = square(self.params['target'][0:2],self.params['target'][2],self.params['target'][3] )
#add flows to the model
self.add_flow('Force_ST', {'support':1.0})
self.add_flow('Force_Lin', {'support':1.0} )
self.add_flow('HSig_DOFs', {'hstate':'nominal', 'config':1.0})
self.add_flow('HSig_Bat', {'hstate':'nominal', 'config':1.0} )
self.add_flow('RSig_Traj', {'mode':'continue'})
self.add_flow('EE_1', {'rate':1.0, 'effort':1.0})
self.add_flow('EEmot', {'rate':1.0, 'effort':1.0})
self.add_flow('EEctl', {'rate':1.0, 'effort':1.0})
self.add_flow('Ctl1', {'forward':0.0, 'upward':1.0})
self.add_flow('DOFs', {'vertvel':0.0, 'planvel':0.0, 'planpwr':0.0, 'uppwr':0.0, 'x':0.0,'y':0.0,'elev':0.0})
# custom flows
self.add_flow('Dir1', Direc())
#add functions to the model
flows=['EEctl', 'Force_ST', 'HSig_DOFs', 'HSig_Bat', 'RSig_Traj']
# trajconfig: continue, to_home, to_nearest, emland
self.add_fxn('ManageHealth',flows,fclass = ManageHealth, fparams=params['respolicy'])
batweight = {'monolithic':0.4, 'series-split':0.5, 'parallel-split':0.5, 'split-both':0.6}[params['bat']]
archweight = {'quad':1.2, 'hex':1.6, 'oct':2.0}[params['linearch']]
archdrag = {'quad':0.95, 'hex':0.85, 'oct':0.75}[params['linearch']]
self.add_fxn('StoreEE',['EE_1', 'Force_ST', 'HSig_Bat'],fclass = StoreEE, fparams= {'bat': params['bat'], 'weight':(batweight+archweight)/2.2 , 'drag': archdrag })
self.add_fxn('DistEE', ['EE_1','EEmot','EEctl', 'Force_ST'], fclass = DistEE)
self.add_fxn('AffectDOF',['EEmot','Ctl1','DOFs','Dir1','Force_Lin', 'HSig_DOFs'], fclass=AffectDOF, fparams = params['linearch'])
self.add_fxn('CtlDOF',['EEctl', 'Dir1', 'Ctl1', 'DOFs', 'Force_ST'], fclass = CtlDOF)
self.add_fxn('Planpath', ['EEctl', 'DOFs','Dir1', 'Force_ST', 'RSig_Traj'], fclass=PlanPath, fparams=params)
| |
2400
)
self.assertEqual(
invoice.paid,
0
)
self.assertEqual(
invoice.total,
2400
)
url = reverse("sales:edit", kwargs={"pk": invoice.pk})
data = {}
header_data = create_header(
HEADER_FORM_PREFIX,
{
"type": invoice.type,
"customer": invoice.customer.pk,
"period": invoice.period.pk,
"ref": invoice.ref,
"date": invoice.date.strftime(DATE_INPUT_FORMAT),
"due_date": invoice.due_date.strftime(DATE_INPUT_FORMAT),
"total": invoice.total
}
)
data.update(header_data)
lines = SaleLine.objects.all().order_by("pk")
lines_as_dicts = [to_dict(line) for line in lines]
line_trans = [get_fields(line, ['id', 'description', 'goods',
'nominal', 'vat_code', 'vat']) for line in lines_as_dicts]
line_forms = line_trans
line_data = create_formset_data(LINE_FORM_PREFIX, line_forms)
line_data["line-INITIAL_FORMS"] = 20
data.update(line_data)
matching_trans = [invoice]
matching_trans_as_dicts = [to_dict(m) for m in matching_trans]
matching_trans = [get_fields(
m, ['type', 'ref', 'total', 'paid', 'due', 'id']) for m in matching_trans_as_dicts]
matching_forms = []
matching_forms += add_and_replace_objects(
matching_trans, {"id": "matched_by"}, {"value": 600})
matches = SaleMatching.objects.all().order_by("pk")
matching_forms[0]["matched_to"] = invoice.pk
matching_data = create_formset_data(match_form_prefix, matching_forms)
data.update(matching_data)
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Cannot match a transaction to itself.")
invoice.refresh_from_db()
self.assertEqual(
invoice.due,
2400
)
self.assertEqual(
invoice.paid,
0
)
self.assertEqual(
invoice.total,
2400
)
matches = SaleMatching.objects.all().order_by("pk")
self.assertEqual(
len(matches),
0
)
def test_cannot_match_a_transaction_with_status_void_36(self):
self.client.force_login(self.user)
create_invoice_with_nom_entries(
{
"type": "si",
"customer": self.customer,
"period": self.period,
"ref": self.ref,
"date": self.model_date,
"due_date": self.model_due_date,
"total": 2400,
"paid": 0,
"due": 2400,
"goods": 2000,
"vat": 400,
},
[
{
'description': self.description,
'goods': 100,
'nominal': self.nominal,
'vat_code': self.vat_code,
'vat': 20
}
] * 20,
self.vat_nominal,
self.sale_control
)
invoice = SaleHeader.objects.first()
receipt1 = create_receipts(
self.customer, "receipt", 1, self.period, 1200)[0]
invoice.refresh_from_db()
receipt1.refresh_from_db()
self.assertEqual(
invoice.due,
2400
)
self.assertEqual(
invoice.paid,
0
)
self.assertEqual(
invoice.total,
2400
)
# here we match with models only so matching is possible
self.assertEqual(
receipt1.due,
-1200
)
self.assertEqual(
receipt1.paid,
0
)
self.assertEqual(
receipt1.total,
-1200
)
cashbook = CashBook.objects.create(
name="current", nominal=self.nominal)
invoice.status = "v"
invoice.save()
url = reverse("sales:edit", kwargs={"pk": receipt1.pk})
data = {}
header_data = create_header(
HEADER_FORM_PREFIX,
{
"cash_book": cashbook.pk,
"type": receipt1.type,
"customer": receipt1.customer.pk,
"period": receipt1.period.pk,
"ref": receipt1.ref,
"date": receipt1.date.strftime(DATE_INPUT_FORMAT),
"total": receipt1.total * -1
}
)
data.update(header_data)
line_data = create_formset_data(LINE_FORM_PREFIX, [])
data.update(line_data)
matching_trans = [invoice]
matching_trans_as_dicts = [to_dict(m) for m in matching_trans]
matching_trans = [get_fields(
m, ['type', 'ref', 'total', 'paid', 'due', 'id']) for m in matching_trans_as_dicts]
matching_forms = []
matching_forms += add_and_replace_objects(
matching_trans, {"id": "matched_to"}, {"value": 600})
matching_data = create_formset_data(match_form_prefix, matching_forms)
data.update(matching_data)
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
"Cannot match to a void transaction"
)
def test_cannot_change_customer_if_there_are_matches_38(self):
self.client.force_login(self.user)
create_invoice_with_nom_entries(
{
"type": "si",
"customer": self.customer,
"period": self.period,
"ref": self.ref,
"date": self.model_date,
"due_date": self.model_due_date,
"total": 2400,
"paid": 0,
"due": 2400,
"goods": 2000,
"vat": 400,
},
[
{
'description': self.description,
'goods': 100,
'nominal': self.nominal,
'vat_code': self.vat_code,
'vat': 20
}
] * 20,
self.vat_nominal,
self.sale_control
)
invoice = SaleHeader.objects.first()
receipt1 = create_receipts(
self.customer, "receipt", 1, self.period, 1200)[0]
receipt2 = create_receipts(
self.customer, "receipt", 1, self.period, -5000)[0]
match(invoice, [(receipt1, -1200), (receipt2, 600)])
invoice.refresh_from_db()
receipt1.refresh_from_db()
receipt2.refresh_from_db()
self.assertEqual(
invoice.due,
1800
)
self.assertEqual(
invoice.paid,
600
)
self.assertEqual(
invoice.total,
2400
)
# here we match with models only so matching is possible
self.assertEqual(
receipt1.due,
0
)
self.assertEqual(
receipt1.paid,
-1200
)
self.assertEqual(
receipt1.total,
-1200
)
self.assertEqual(
receipt2.due,
4400
)
self.assertEqual(
receipt2.paid,
600
)
self.assertEqual(
receipt2.total,
5000
)
matches = SaleMatching.objects.all().order_by("pk")
self.assertEqual(
len(matches),
2
)
self.assertEqual(
matches[0].matched_by,
invoice
)
self.assertEqual(
matches[0].matched_to,
receipt1
)
self.assertEqual(
matches[0].value,
-1200
)
self.assertEqual(
matches[0].period,
self.period
)
self.assertEqual(
matches[1].matched_by,
invoice
)
self.assertEqual(
matches[1].matched_to,
receipt2
)
self.assertEqual(
matches[1].value,
600
)
self.assertEqual(
matches[1].period,
self.period
)
cashbook = CashBook.objects.create(
name="current", nominal=self.nominal)
new_customer = Customer.objects.create(name="new", code="new")
url = reverse("sales:edit", kwargs={"pk": receipt2.pk})
data = {}
header_data = create_header(
HEADER_FORM_PREFIX,
{
"cash_book": cashbook.pk,
"type": receipt2.type,
"customer": new_customer.pk,
"period": receipt2.period.pk,
"ref": receipt2.ref,
"date": receipt2.date.strftime(DATE_INPUT_FORMAT),
"total": receipt2.total * -1
}
)
data.update(header_data)
line_data = create_formset_data(LINE_FORM_PREFIX, [])
data.update(line_data)
matching_trans = [invoice]
matching_trans_as_dicts = [to_dict(m) for m in matching_trans]
matching_trans = [get_fields(
m, ['type', 'ref', 'total', 'paid', 'due', 'id']) for m in matching_trans_as_dicts]
matching_forms = []
matching_forms += add_and_replace_objects(
matching_trans, {"id": "matched_by"}, {"value": -600})
matches = SaleMatching.objects.all().order_by("pk")
matching_forms[0]["id"] = matches[1].pk
matching_forms[0]["matched_to"] = receipt2.pk
matching_data = create_formset_data(match_form_prefix, matching_forms)
matching_data["match-INITIAL_FORMS"] = 1
data.update(matching_data)
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
"You cannot change the customer if the transaction is matched to other transactions. Please remove the matches first."
)
def test_cannot_change_period_so_matched_by_period_is_less_than_matched_to_41(self):
self.client.force_login(self.user)
create_invoice_with_nom_entries(
{
"type": "si",
"customer": self.customer,
"period": self.period,
"ref": self.ref,
"date": self.model_date,
"due_date": self.model_due_date,
"total": 2400,
"paid": 0,
"due": 2400,
"goods": 2000,
"vat": 400
},
[
{
'description': self.description,
'goods': 100,
'nominal': self.nominal,
'vat_code': self.vat_code,
'vat': 20
}
] * 20,
self.vat_nominal,
self.sale_control
)
invoice = SaleHeader.objects.first()
receipt1 = create_receipts(
self.customer, "receipt", 1, self.period, 1200)[0]
match(invoice, [(receipt1, -1200)])
invoice.refresh_from_db()
receipt1.refresh_from_db()
self.assertEqual(
invoice.due,
1200
)
self.assertEqual(
invoice.paid,
1200
)
self.assertEqual(
invoice.total,
2400
)
self.assertEqual(
receipt1.due,
0
)
self.assertEqual(
receipt1.paid,
-1200
)
self.assertEqual(
receipt1.total,
-1200
)
matches = SaleMatching.objects.all().order_by("pk")
self.assertEqual(
len(matches),
1
)
self.assertEqual(
matches[0].matched_by,
invoice
)
self.assertEqual(
matches[0].matched_to,
receipt1
)
self.assertEqual(
matches[0].value,
-1200
)
self.assertEqual(
matches[0].period,
self.period
)
# later period
last_fy = FinancialYear.objects.create(financial_year=2019, number_of_periods=1)
last_period = Period.objects.create(
fy=last_fy, period="01", fy_and_period="201901", month_start=date(2019, 1, 31))
url = reverse("sales:edit", kwargs={"pk": invoice.pk})
data = {}
header_data = create_header(
HEADER_FORM_PREFIX,
{
"type": invoice.type,
"customer": invoice.customer.pk,
"period": last_period.pk,
"ref": invoice.ref,
"date": invoice.date.strftime(DATE_INPUT_FORMAT),
"total": invoice.total
}
)
data.update(header_data)
lines = SaleLine.objects.all().order_by("pk")
lines_as_dicts = [to_dict(line) for line in lines]
line_trans = [get_fields(line, ['id', 'description', 'goods',
'nominal', 'vat_code', 'vat']) for line in lines_as_dicts]
line_forms = line_trans
line_data = create_formset_data(LINE_FORM_PREFIX, line_forms)
line_data["line-INITIAL_FORMS"] = 20
data.update(line_data)
matching_trans = [receipt1]
matching_trans_as_dicts = [to_dict(m) for m in matching_trans]
matching_trans = [get_fields(
m, ['type', 'ref', 'total', 'paid', 'due', 'id']) for m in matching_trans_as_dicts]
matching_forms = []
matching_forms += add_and_replace_objects(
matching_trans, {"id": "matched_to"}, {"value": 1200})
matches = SaleMatching.objects.all().order_by("pk")
matching_forms[0]["id"] = matches[0].pk
matching_data = create_formset_data(match_form_prefix, matching_forms)
matching_data["match-INITIAL_FORMS"] = 1
data.update(matching_data)
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
"Cannot do this. In the matched relationship already established the transaction you are editing is B. The transaction you have "
"matched B to is this matched transaction A. The period of A cannot be after the period of B."
)
invoice.refresh_from_db()
receipt1.refresh_from_db()
self.assertEqual(
invoice.due,
1200
)
self.assertEqual(
invoice.paid,
1200
)
self.assertEqual(
invoice.total,
2400
)
self.assertEqual(
receipt1.due,
0
)
self.assertEqual(
receipt1.paid,
-1200
)
self.assertEqual(
receipt1.total,
-1200
)
matches = SaleMatching.objects.all().order_by("pk")
self.assertEqual(
len(matches),
1
)
self.assertEqual(
matches[0].matched_by,
invoice
)
self.assertEqual(
matches[0].matched_to,
receipt1
)
self.assertEqual(
matches[0].value,
-1200
)
self.assertEqual(
matches[0].period,
self.period
)
def test_cannot_change_period_so_matched_to_period_is_greater_than_matched_by_42(self):
self.client.force_login(self.user)
create_invoice_with_nom_entries(
{
"type": "si",
"customer": self.customer,
"period": self.period,
"ref": self.ref,
"date": self.model_date,
"due_date": self.model_due_date,
"total": 2400,
"paid": 0,
"due": 2400,
"goods": 2000,
"vat": 400,
},
[
{
'description': self.description,
'goods': 100,
'nominal': self.nominal,
'vat_code': self.vat_code,
'vat': 20
}
] * 20,
self.vat_nominal,
self.sale_control
)
invoice = SaleHeader.objects.first()
receipt1 = create_receipts(
self.customer, "receipt", 1, self.period, 1200)[0]
receipt2 = create_receipts(
self.customer, "receipt", 1, self.period, -5000)[0]
match(invoice, [(receipt1, -1200), (receipt2, 600)])
invoice.refresh_from_db()
receipt1.refresh_from_db()
receipt2.refresh_from_db()
self.assertEqual(
invoice.due,
1800
)
self.assertEqual(
invoice.paid,
600
)
self.assertEqual(
invoice.total,
2400
)
# here we match with models only so matching is possible
self.assertEqual(
receipt1.due,
0
)
self.assertEqual(
receipt1.paid,
-1200
)
self.assertEqual(
receipt1.total,
-1200
)
self.assertEqual(
receipt2.due,
4400
)
self.assertEqual(
receipt2.paid,
600
)
self.assertEqual(
receipt2.total,
5000
)
matches = SaleMatching.objects.all().order_by("pk")
self.assertEqual(
len(matches),
2
)
self.assertEqual(
matches[0].matched_by,
invoice
)
self.assertEqual(
matches[0].matched_to,
receipt1
)
self.assertEqual(
matches[0].value,
-1200
)
self.assertEqual(
matches[0].period,
self.period
)
self.assertEqual(
matches[1].matched_by,
invoice
)
self.assertEqual(
matches[1].matched_to,
receipt2
)
self.assertEqual(
matches[1].value,
600
)
self.assertEqual(
matches[1].period,
self.period
)
cashbook = CashBook.objects.create(
name="current", nominal=self.nominal)
# later period
next_period = Period.objects.create(
fy=self.fy, period="02", fy_and_period="202002", month_start=date(2020, 2, 29))
url = reverse("sales:edit", kwargs={"pk": receipt2.pk})
data = {}
header_data = create_header(
HEADER_FORM_PREFIX,
{
"cash_book": cashbook.pk,
"type": receipt2.type,
"customer": self.customer.pk,
"period": next_period.pk,
"ref": receipt2.ref,
"date": receipt2.date.strftime(DATE_INPUT_FORMAT),
"total": receipt2.total * -1
}
)
data.update(header_data)
line_data = create_formset_data(LINE_FORM_PREFIX, [])
data.update(line_data)
matching_trans = [invoice]
matching_trans_as_dicts = [to_dict(m) for m in matching_trans]
matching_trans = [get_fields(
m, ['type', 'ref', 'total', 'paid', 'due', 'id']) for m in matching_trans_as_dicts]
matching_forms = []
matching_forms += add_and_replace_objects(
matching_trans, {"id": "matched_by"}, {"value": -600})
matches = SaleMatching.objects.all().order_by("pk")
matching_forms[0]["id"] = matches[1].pk
matching_forms[0]["matched_to"] = receipt2.pk
matching_data = create_formset_data(match_form_prefix, matching_forms)
matching_data["match-INITIAL_FORMS"] = 1
data.update(matching_data)
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
"Cannot do this. In the matched relationship already established the transaction you are editing is A. The transaction you "
"have matched A to is this matched transaction B. The period of A cannot be after the | |
SconsBuilderConfig.UNITTEST_TEST_PREFIX,
SCB_UNITTEST_LIB_PREFIX = SconsBuilderConfig.UNITTEST_LIB_PREFIX,
SCB_UNITTEST_LIB_DIR = SconsBuilderConfig.UNITTEST_LIB_DIR,
SCB_UNITTEST_LIB = SconsBuilderConfig.UNITTEST_LIB,
SCB_UNITTEST_INCLUDE_DIR = SconsBuilderConfig.UNITTEST_INCLUDE_DIR,
SCB_UNITTEST_TESTRUNNER = SconsBuilderConfig.UNITTEST_TESTRUNNER,
SCB_UNITTEST_MODIFY = [],
SCB_UNITTESTRUNNER_MODIFY = [],
SCB_PLATFORM = baseenv['SCB_PLATFORM'],
SCB_PRINT_BUILD = printBuild,
SCB_PREPARE_CALLBACK = None,
PRINT_CMD_LINE_FUNC = printCmdLine,
platform = baseenv['SCB_PLATFORM']
)
# register our special builders
env['BUILDERS']['SconsBuilderHardLink'] = SCons.Script.Builder(
action=SCons.Script.Action(
HardLinkAction,
'Hard Link ${SOURCE} -> ${TARGETS}'
)
)
env['BUILDERS']['SconsBuilderSymLink'] = SCons.Script.Builder(
action=SCons.Script.Action(
SymLinkAction,
'Symbolic Link ${SOURCE} -> ${TARGETS}'
)
)
allEnvironments[None] = env
# allow to (re)build files by specifying the source(s)
if SCons.Script.GetOption('forcemodified'):
SCons.Script.Decider(forcedDecider)
return env
def prepareEnvironment(path = SCB_SCRIPT_DIR):
relpath = relativePath(path)
path = os.path.abspath(path)
parentenv = getEnvironment(os.path.dirname(path))
log(4, ['']) # a newline as logging get's more detailed
log(3, ['Preparing environment for %s' % (relpath)])
if parentenv:
allEnvironments[path] = parentenv.Clone()
else:
# clone the base environment
allEnvironments[path] = buildBaseEnvironment().Clone()
parentenv = allEnvironments[path]
env = allEnvironments[path]
rootenv = rootEnvironment()
env['SCB_ABSOLUTE_PATH'] = path
env['SCB_RELATIVE_PATH'] = relativePath(path, prefix=False)
# always start with an empty list (can not be inherited)
env['SCB_CXX_SKIP'] = SCons.Util.CLVar()
fullConfiguration = ''
if parentenv['SCB_BUILD_TARGET'] and parentenv['SCB_BUILD_TARGET'] != '':
fullConfiguration = os.path.join(fullConfiguration, parentenv['SCB_BUILD_TARGET'])
if parentenv['SCB_BUILD_CONFIGURATION'] and parentenv['SCB_BUILD_CONFIGURATION'] != '':
fullConfiguration = os.path.join(fullConfiguration, parentenv['SCB_BUILD_CONFIGURATION'])
# calculate the effective variant dir
variantdir = parentenv['SCB_BASE_VARIANT_DIR']
if fullConfiguration != '':
variantdir = os.path.join(variantdir, fullConfiguration)
env['SCB_VARIANT_DIR'] = os.path.join(variantdir, env['SCB_RELATIVE_PATH'])
if env['SCB_BIN_DIR'] is None:
if fullConfiguration != '':
env['SCB_BIN_DIR'] = os.path.join(
SconsBuilderConfig.BIN_DIR,
fullConfiguration
)
else:
env['SCB_BIN_DIR'] = SconsBuilderConfig.BIN_DIR
if env['SCB_LIB_DIR'] is None:
if fullConfiguration != '':
env['SCB_LIB_DIR'] = os.path.join(
SconsBuilderConfig.LIB_DIR,
fullConfiguration
)
else:
env['SCB_LIB_DIR'] = SconsBuilderConfig.LIB_DIR
if env.has_key('LIBPATH'):
env.AppendUnique(LIBPATH=env['SCB_LIB_DIR'])
else:
env.Replace(LIBPATH=SCons.Util.CLVar())
env.AppendUnique(LIBPATH=env['SCB_LIB_DIR'])
log(12, [
'env[\'LIBPATH\']=' + tryGetEnvironment(env, 'LIBPATH'),
'env[\'SCB_VARIANT_DIR\']=' + tryGetEnvironment(env, 'SCB_VARIANT_DIR'),
'env[\'SCB_RELATIVE_PATH\']=' + tryGetEnvironment(env, 'SCB_RELATIVE_PATH')
])
dirname = os.path.basename(path)
libname = dirname
# handle unit tests
if tryGetEnvironment(env, 'SCB_UNITTEST_ENABLED') and \
os.path.basename(path) == env['SCB_UNITTEST_DIR_NAME']:
# unit tests can have no executable name
env['SCB_EXECUTABLE_NAME'] = None
parentdirname = os.path.abspath(os.path.join(path, '..'))
parentenv = allEnvironments[parentdirname]
parentlib = tryGetEnvironment(parentenv, 'SCB_SHARED_LIB_NAME') or \
tryGetEnvironment(parentenv, 'SCB_STATIC_LIB_NAME') or \
os.path.basename(parentdirname)
log(12, ['parentlib=%s' % (parentlib)])
libname = env['SCB_UNITTEST_LIB_PREFIX'] + parentlib
env['SCB_UNITTEST_PARENTLIB_NAME'] = parentlib
env['SCB_UNITTEST_TEST_NAME'] = env['SCB_UNITTEST_TEST_PREFIX'] + parentlib
log(12, [
'SCB_UNITTEST_PARENTLIB_NAME=%s' % (env['SCB_UNITTEST_PARENTLIB_NAME']),
'SCB_UNITTEST_TEST_NAME=%s' % (env['SCB_UNITTEST_TEST_NAME'])
])
# reset some non inheritable settings
env['SCB_EXECUTABLE_NAME'] = None
env['SCB_STATIC_LIB_NAME'] = None
env['SCB_SHARED_LIB_NAME'] = None
# default is to build (shared) libraries
env['SCB_SHARED_LIB_NAME'] = libname
log(13, ['SCB_SHARED_LIB_NAME before: %s' % (env['SCB_SHARED_LIB_NAME'])])
updateEnvironment(env, path)
log(13, ['SCB_SHARED_LIB_NAME after=%s' % (env['SCB_SHARED_LIB_NAME'])])
if tryGetEnvironment(env, 'SCB_UNITTEST_ENABLED') and \
os.path.basename(path) == env['SCB_UNITTEST_DIR_NAME']:
log(12, [
'SCB_UNITTEST_PARENTLIB_NAME after=%s' % (env['SCB_UNITTEST_PARENTLIB_NAME']),
'SCB_UNITTEST_TEST_NAME after=%s' % (env['SCB_UNITTEST_TEST_NAME'])
])
log(12, [
'relpath = %s' % (relpath),
'SCB_EXECUTABLE_NAME: %s' % (tryGetEnvironment(env, 'SCB_EXECUTABLE_NAME')),
'SCB_STATIC_LIB_NAME: %s' % (tryGetEnvironment(env, 'SCB_STATIC_LIB_NAME')),
'SCB_SHARED_LIB_NAME: %s' % (tryGetEnvironment(env, 'SCB_SHARED_LIB_NAME')),
])
printEnv(verbosity(), env)
env.VariantDir(env['SCB_VARIANT_DIR'], path, duplicate=0)
dirsToScan = listDirectories(path, env['SCB_SKIP_DIRS'])
for entry in dirsToScan:
dirToScan = os.path.join(path, entry)
prepareEnvironment(dirToScan)
return env
def updateEnvironment(env, path, restrictList = []):
relpath = relativePath(path)
log(4, ['Updating environment for %s ' % (relpath)])
filename = os.path.join(path, SconsBuilderConfig.SCONSBUILDER_CONFIG_FILE_NAME)
try:
configfile = file(filename)
except:
return
dict = env.Dictionary()
config = {}
afterActionItems = {}
beforeActionItems = {}
fileActionItems = {}
unittestActionItems = []
unittestrunnerActionItems = []
actionItems = []
sconsfiles = []
lines = []
lineno = 0
for line in configfile:
lineno += 1
errorprefix = 'error in ' + filename + ' line ' + str(lineno)
line = string.strip(line)
if line == '':
continue
if line[0] == '#':
continue
items = line.split()
match = True
again = True
afterItems = None
beforeItems = None
fileItems = None
unittestItems = None
unittestrunnerItems = None
while again:
again = False;
if items[0] == 'platform':
if items[1] == str(env['SCB_PLATFORM']):
log(9, ['Detected %s match %s for line %s' % ('platform', str(env['SCB_PLATFORM']), line)])
again = True
items = items[2:]
else:
log(9, ['Detected %s mismatch %s for line %s' % ('platform', str(env['SCB_PLATFORM']), line)])
match = False
elif items[0] == 'configuration':
if items[1] == env['SCB_BUILD_CONFIGURATION']:
log(9, ['Detected %s match %s for line %s' % ('configuration', str(env['SCB_BUILD_CONFIGURATION']), line)])
again = True
items = items[2:]
else:
log(9, ['Detected %s mismatch %s for line %s' % ('configuration', str(env['SCB_BUILD_CONFIGURATION']), line)])
match = False
elif items[0] == 'target':
if items[1] == env['SCB_BUILD_TARGET']:
log(9, ['Detected %s match %s for line %s' % ('target', str(env['SCB_BUILD_TARGET']), line)])
again = True
items = items[2:]
else:
log(9, ['Detected %s mismatch %s for line %s' % ('target', str(env['SCB_BUILD_TARGET']), line)])
match = False
elif items[0] == 'after':
filename = items[1]
if os.path.exists(filename):
try:
afterItems = afterActionItems[filename]
except:
afterActionItems[filename] = []
afterItems = afterActionItems[filename]
log(9, ['Detected %s for file %s for line %s' % ('after', filename, line)])
again = True
items = items[2:]
else:
log(1, ['Detected %s ignored for nonexistent file %s for line %s' % ('after', filename, line)])
match = False
elif items[0] == 'before':
filename = items[1]
if os.path.exists(filename):
try:
beforeItems = beforeActionItems[filename]
except:
beforeActionItems[filename] = []
beforeItems = beforeActionItems[filename]
log(9, ['Detected %s for file %s for line %s' % ('before', filename, line)])
again = True
items = items[2:]
else:
log(1, ['Detected %s ignored for nonexistent file %s for line %s' % ('before', filename, line)])
match = False
elif items[0] == 'file':
filename = items[1]
filepath = os.path.join(path, filename)
if os.path.exists(filepath):
try:
fileItems = fileActionItems[filename]
except:
fileActionItems[filename] = []
fileItems = fileActionItems[filename]
log(9, ['Detected %s for file %s for line %s' % ('file', filename, line)])
again = True
items = items[2:]
else:
log(1, ['Detected %s ignored for nonexistent file %s for line %s' % ('file', filename, line)])
match = False
elif items[0] == 'unittest':
unittestItems = unittestActionItems
log(9, ['Detected %s for line %s' % ('unittest', line)])
again = True
items = items[1:]
elif items[0] == 'unittestrunner':
unittestrunnerItems = unittestrunnerActionItems
log(9, ['Detected %s for line %s' % ('unittestrunner', line)])
again = True
items = items[1:]
elif items[0] == 'sconsfile':
filename = items[1]
filepath = os.path.join(path, filename)
if os.path.exists(filepath):
log(9, ['Detected %s for file %s for line %s' % ('sconsfile', filename, line)])
sconsfiles.append(filename)
match = False
else:
log(1, ['Detected %s ignored for nonexistent file %s for line %s' % ('sconsfile', filename, line)])
match = False
if not match:
log(6, ['Ignoring unmatched line %s' % (line)])
else:
name = items[0]
action = items[1]
value = line[line.find(action) + len(action):].strip()
actionItem = (name, action, value)
log(13, ['actionItem=%s' % (repr(actionItem))])
if afterItems is not None:
afterItems.append(actionItem)
elif beforeItems is not None:
beforeItems.append(actionItem)
elif fileItems is not None:
fileItems.append(actionItem)
elif unittestItems is not None:
unittestItems.append(actionItem)
elif unittestrunnerItems is not None:
unittestrunnerItems.append(actionItem)
else:
actionItems.append(actionItem)
log(10, [
'afterItems=%s' % (afterItems),
'beforeItems=%s' % (beforeItems),
'fileItems=%s' % (fileItems),
'unittestItems=%s' % (unittestItems),
'unittestrunnerItems=%s' % (unittestrunnerItems)
])
log(10, [
'actionItems=%s' % (actionItems),
'afterActionItems=%s' % (afterActionItems),
'beforeActionItems=%s' % (beforeActionItems),
'fileActionItems=%s' % (fileActionItems),
'unittestActionItems=%s' % (unittestActionItems),
'unittestrunnerActionItems=%s' % (unittestrunnerActionItems)
])
for item in unittestActionItems:
env.AppendUnique(SCB_UNITTEST_MODIFY=item)
for item in unittestrunnerActionItems:
env.AppendUnique(SCB_UNITTESTRUNNER_MODIFY=item)
env.Replace(SCB_FILE_MODIFY=fileActionItems)
log(10, [
'env[\'SCB_UNITTEST_MODIFY\']=%s' % (repr(tryGetEnvironment(env, 'SCB_UNITTEST_MODIFY'))),
'env[\'SCB_UNITTESTRUNNER_MODIFY\']=%s' % (repr(tryGetEnvironment(env, 'SCB_UNITTESTRUNNER_MODIFY'))),
'env[\'SCB_FILE_MODIFY\']=%s' % (repr(tryGetEnvironment(env, 'SCB_FILE_MODIFY')))
])
if len(restrictList) > 0:
restrictedItems = []
for key, action, value in actionItems:
if key in restrictList:
restrictedItems.append((key, action, value))
try:
modifyEnvironment(env, restrictedItems)
except Exception, ex:
msg = 'ModifyEnvironment (restricted) for %s failed: %s' % (relpath, str(ex))
raise Exception(msg)
else:
try:
if actionItems:
log(4, ['Modifying environment for %s' % (relpath)])
modifyEnvironment(env, actionItems)
except Exception, ex:
msg = 'ModifyEnvironment for %s failed: %s' % (relpath, str(ex))
raise Exception(msg)
# if the environment belongs to a unit test add the unit test actions
unittestActionItems = tryGetEnvironment(env, 'SCB_UNITTEST_MODIFY')
unittestDirName = tryGetEnvironment(env, 'SCB_UNITTEST_DIR_NAME')
if unittestActionItems and \
unittestDirName and \
os.path.basename(path) == unittestDirName:
log(4, ['Modifying environment for %s (unit test modifications)' % (relpath)])
modifyEnvironment(env, unittestActionItems)
# execute the user SConscript files
for filename in sconsfiles:
path = os.path.join(env['SCB_RELATIVE_PATH'], filename)
actionItems = None
try:
actionItems = beforeActionItems[filename]
except:
pass
try:
if actionItems:
log(4, ['Modifying environment for %s (before %s)' % (relpath, filename)])
modifyEnvironment(env, actionItems)
except Exception, ex:
msg = 'ModifyEnvironment | |
raise KeyError("%s is not a tracked array"%key)
def __init__(
self,
UIname,
coordinates,
tracked_arrays = [],
tracked_names = [],
tracked_filter_flags = [],
decimation_factor = 1,
filenames_and_nparts = None,
**option_kwargs):
"""
`UIname` - Name of the particle group that shows up in the UI, 4-5 characters is best
`coordinates` - The coordinates of the points in 3d space, should have a shape of `(nparts,3)`.
`tracked_arrays=[]` - The arrays to associate with each coordinate in space, each array
should be one-dimensional and have `nparts` entries.
`tracked_names=[]` - Should be the same length as `tracked_arrays`, and gives a
name to each of the arrays when they show up in the UI dropdowns.
`tracked_filter_flags=[]` - Should be the same length as `tracked_arrays`,
and gives a flag for whether that array should be available as an interactive filter within Firefly.
`decimation_factor=1` - An integer factor to sub-sample the provided dataset at
(in addition to any manual subsampling you might do). This will choose
`nparts/decimation_factor` many points at random from the dataset to display in Firefly.
`filenames_and_nparts=None` - Allows you to manually control how the particles
are distributed among the JSON files, **highly recommended that
you leave this to** `None`, but if for whatever reason you need fine-tuning
you should pass a list of tuples in the form
`[("json_name0.json",nparts_this_file0),("json_name1.json",nparts_this_file1) ... ]`
where where the sum of `nparts_this_file%d` is exactly `nparts`. These files
will automatically be added to `filenames.json` if you use `reader.dumpToJSON`.
`**option_kwargs` - allows you to set default options like the color, particle sizes,
etc... for this particle group at the creation of the instance. You can see available
options by looking at `list(particleGroup.options_default.keys())`.
"""
## assert statements and user-friendly error messages
try:
assert len(tracked_names) == len(tracked_arrays)
except:
raise ValueError("Make sure each tracked_array has a tracked_name")
try:
assert len(tracked_names) == len(tracked_filter_flags)
except:
print(tracked_names,tracked_filter_flags)
warnings.warn("Make sure each tracked_array has a tracked_filter_flag, assuming True.")
new_tracked_filter_flags = np.append(
tracked_filter_flags,
[True]*(len(tracked_names)-len(tracked_filter_flags)),axis=0
)
print(tracked_filter_flags,"becomes ->",new_tracked_filter_flags)
tracked_filter_flags = new_tracked_filter_flags
if filenames_and_nparts is not None:
try:
assert type(filenames_and_nparts[0]) == tuple
assert type(filenames_and_nparts[0][0]) == str
assert type(filenames_and_nparts[0][1]) == int
except AssertionError:
ValueError("filenames_and_nparts should be a list of tuples of strings and ints")
self.decimation_factor = decimation_factor
## what do we want this to be called in the UI?
self.UIname = UIname
## the most important thing, where do you want these particles
## to live?
self.coordinates = coordinates
## initialize this badboy
self.nparts = len(coordinates)
## these are the values we're associating with each particle
## make sure each one has a name
for name,array in zip(tracked_names,tracked_arrays):
try:
assert len(array) == self.nparts
except:
raise ValueError("You passed me %s that is not the right shape!"%name)
self.tracked_names = tracked_names
self.tracked_arrays = tracked_arrays
self.tracked_filter_flags = tracked_filter_flags
self.filenames_and_nparts = filenames_and_nparts
## TODO how do these interface with javascript code?
self.radiusFunction = None
self.weightFunction = None
## setup the options for this particleGroup
self.options_default = {
'UIparticle':True,
'UIdropdown':True,
'UIcolorPicker':True,
'color': np.append(np.random.random(3),[1]),
'sizeMult':1.,
'showParts':True,
'filterVals':dict(),
'filterLims':dict(),
'showVel':False,
'plotNmax':None,
'velType':None
}
## setup default values for the initial filter limits (vals/lims represent the interactive
## "displayed" particles and the available boundaries for the limits)
for tracked_name,tracked_filter_flag in zip(self.tracked_names,self.tracked_filter_flags):
if tracked_filter_flag:
self.options_default['filterVals'][tracked_name] = None
self.options_default['filterLims'][tracked_name] = None
## now let the user overwrite the defaults if they'd like (e.g. the color, likely
## the most popular thing users will like to do
for option_kwarg in option_kwargs:
if option_kwarg in self.options_default.keys():
if option_kwarg == 'color':
try:
assert len(option_kwargs[option_kwarg]) == 4
except AssertionError:
raise ValueError("Make sure you pass the color as an RGBA array")
self.options_default[option_kwarg] = option_kwargs[option_kwarg]
else:
raise KeyError("Invalid option kwarg")
## functions that should happen after initialization:
## get that decimation index array so when we write out we
## have it ready (but still be able to add stuff whenever)
self.getDecimationIndexArray()
def trackArray(self,name,arr,filter_flag=1):
"""
Adds a new "tracked" array to the particle group
Input:
name - name of the tracked array in the UI
arr - the array itself
filter_flag=1 - whether this array should be filterable in the app
"""
## check that it's the correct length
assert self.nparts == len(arr)
## go ahead and put it in the tracked arrays
self.tracked_names += [name]
self.tracked_arrays += [arr]
self.tracked_filter_flags += [filter_flag]
## and add this to the filter limits arrays, see __init__ above
if filter_flag:
self.options_default['filterVals'][name] = None
self.options_default['filterLims'][name] = None
if self.linked_options is not None:
self.linked_options['filterVals'][self.UIname][name] = None
self.linked_options['filterLims'][self.UIname][name] = None
def getDecimationIndexArray(self):
"""
Creates a numpy index array to handle decimation (sub-sampling) of your
data. Chooses nparts/decimation_factor many particles randomly without
replacement.
"""
if self.decimation_factor > 1:
## use an array of indices
self.dec_inds = np.random.choice(
np.arange(self.nparts),int(self.nparts/self.decimation_factor),
replace=False)
else:
## use a boolean mask instead
self.dec_inds = np.ones(self.nparts,dtype=bool)
def outputToJSON(
self,
path, ## sub-directory name
path_prefix, ## absolute path to Firefly/data
prefix, ## prefix of JSON filename
loud=1,
nparts_per_file = 10**4,
clean=0):
"""
Outputs this ParticleGroup instance's data to JSON format, best used when coupled with a Reader
instance's dumpToJSON method.
Input:
path - the name of the sub-directory of Firefly/data you want to put these files into
path_prefix - the the path to Firefly/data
prefix - the string you want to prepend to the data JSONs
loud=1 - flag to print warnings that you should hear if you're not using a
reader that does these things for you
nparts_per_file=10**4 - maximum number of particles per JSON file
clean=0 - flag for whether the JSON directory should be purged before writing your files.
"""
if not os.path.isdir(path):
os.makedirs(path)
if loud:
warnings.warn("You will need to add the sub-filenames to"+
" filenames.json if this was not called by a Reader instance.")
print("Writing:",self,"JSON to %s"%path)
if clean:
warnings.warn("Removing data files from %s"%path)
for fname in os.listdir(path):
if "json" in fname:
os.remove(os.path.join(path,fname))
if self.filenames_and_nparts is None:
if self.dec_inds.dtype == bool:
nparts = np.sum(self.dec_inds)
else:
nparts = self.dec_inds.shape[0]
nfiles = int(nparts/nparts_per_file + ((nparts%nparts_per_file)!=0))
filenames = [os.path.join(path,"%s%s%03d.json"%(prefix,self.UIname,i_file)) for i_file in range(nfiles)]
nparts = [min(nparts_per_file,nparts-(i_file)*(nparts_per_file)) for i_file in range(nfiles)]
self.filenames_and_nparts = zip(filenames,nparts)
cur_index = 0
for i_file,(fname,nparts_this_file) in enumerate(self.filenames_and_nparts):
## which particles to save?
if self.decimation_factor > 1:
these_dec_inds = self.dec_inds[cur_index:cur_index+nparts_this_file]
else:
these_dec_inds = np.arange(cur_index,cur_index+nparts_this_file)
outDict = dict()
outDict['Coordinates'] = self.coordinates[these_dec_inds]
for tracked_name,tracked_arr in zip(self.tracked_names,self.tracked_arrays):
outDict[tracked_name]=tracked_arr[these_dec_inds]
cur_index+=nparts_this_file
if i_file == 0:
print(self.tracked_names,self.tracked_filter_flags)
outDict['filterKeys'] = np.array(self.tracked_names)[np.array(self.tracked_filter_flags,dtype=bool)]
## TODO this needs to be changed, this is a flag for having the
## opacity vary across a particle as the impact parameter projection
## of cubic spline kernel
outDict['doSPHrad'] = [0]
pd.Series(outDict).to_json(os.path.join(path_prefix,fname), orient='index')
return self.filenames_and_nparts
def outputToHDF5(self):
"""
Hook for a future implementation of Firefly that can use HDF5 formats.
"""
raise Exception("Unimplemented!")
class Reader(object):
"""
This class provides a framework to unify the Options and ParticleGroup classes
to make sure that the user can easily produce Firefly compatible files. It also
provides some rudimentary data validation. You should use this Reader as a base
for any custom readers you may build (and should use inheritance, as demonstrated
below in FIREreader!
"""
def __init__(self,
JSONdir=None, ## abs path, must be a sub-directory of Firefly/data
options=None,
write_startup = 'append',# True -> write | False -> leave alone | "append" -> adds to existing file
max_npart_per_file = 10**4,
prefix = 'Data',
clean_JSONdir = 0,
):
"""
`JSONdir=None` - This should be the name of the sub-directory that will
contain your JSON files, if you are not running python from
`/path/to/Firefly/data` it should be the absolute path.
`options=None` - An `Options` instance, if you have created one you can
pass it here. `None` will generate default options. `reader.options.listKeys()`
will give you a list of the different available options you can set
using `reader.options["option_name"] = option_value`.
`write_startup='append'` - This is a flag for whether `startup.json` file
should be written. It has 3 values: `True` -> writes a new `startup.json`
that will contain only this visualization, | |
cluster members, and resolve any connectivity issues before upgrade process '
return ping_output, ping_output_failed,check_result,check_analysis,check_action
except Exception as e:
log_file_logger.exception(e)
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#Warning Checks
#01:Check:vManage:CPU Speed
def warningCheckone(cpu_speed):
if cpu_speed < 2.8:
check_result = 'Failed'
check_analysis = 'CPU clock speed is {}, it is below recommended range as per the hardware guide. CPU clock speed should be greater than 2.8.'.format(cpu_speed)
check_action = 'Upgrade the hardware type'
else:
check_result = 'SUCCESSFUL'
check_analysis = 'CPU Clock speed is {}, matches hardware recommendations'.format(cpu_speed)
check_action = None
return check_result,check_analysis,check_action
#02:Check:vManage:Network Card type
def warningChecktwo():
eth_data = executeCommand("ifconfig | grep '^eth[0-9]'")
eth_drivers = {}
eth_data = [e for e in eth_data.split() if 'eth' in e]
for eth in eth_data:
driver = executeCommand('ethtool -i {} | grep driver'.format(eth))
if 'e1000' in driver.split()[1]:
eth_drivers[eth] = driver.split()[1]
if len(eth_drivers) == 0:
check_result = 'SUCCESSFUL'
check_action = None
check_analysis = 'VM is not using Intel e1000 card type'
else:
check_action = 'Intel e1000 controller types can lead to crashes and other stability issues. Customer should change NIC hardware type used for the VM as soon as possible'
check_analysis = 'VM is using Intel e1000 card type'
check_result = 'Failed'
return eth_drivers, check_action, check_analysis, check_result
#03:Check:vManage:Backup status
def warningCheckthree():
if os.path.isfile('/var/log/nms/neo4j-backup.log') == False:
date_time_obj = 'unknown'
check_result = 'Failed'
check_analysis = '/var/log/nms/neo4j-backup.log file not found'
check_action = 'Investigate why the /var/log/nms/neo4j-backup.log is missing'
elif os.path.isfile('/var/log/nms/neo4j-backup.log') == True:
last_48hr_date = datetime.now() - timedelta(hours = 48)
backup_log_data = executeCommand('tail -n 50 /var/log/nms/neo4j-backup.log')
backup_log_data_list = re.split('(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})', str(backup_log_data))
if 'Backup complete' not in str(backup_log_data):
check_result = 'Failed'
check_analysis = 'Unable to identify when the last backup was performed.'
check_action = 'Please validate if there has been any recent backup available, before performing the upgrade'
date_time_obj = 'unknown'
elif 'Backup complete' in str(backup_log_data):
for line in backup_log_data_list:
if 'Backup complete' in line:
last_backup_date = (backup_log_data_list[backup_log_data_list.index(line)-1])
date_time_obj = datetime.strptime(last_backup_date, '%Y-%m-%d %H:%M:%S')
if date_time_obj < last_48hr_date:
check_result = 'Failed'
check_analysis = 'The last backup is older than 48h, it is advisable to have a recent upgrade before attempting an upgrade.'
check_action = 'Perform a Backup before upgrading'
elif date_time_obj >= last_48hr_date:
check_result = 'SUCCESSFUL'
check_analysis = 'Last Backup preformed recently, it meets best practices recommendations'
check_action = None
return date_time_obj, check_result, check_analysis, check_action
#04:Check:vManage:Evaluate Neo4j performance
def warningCheckfour():
if os.path.isfile('/var/log/nms/query.log') == False:
check_result = 'Failed'
check_analysis = '/var/log/nms/query.log file not found'
check_action = 'Investigate why the /var/log/nms/query.log is missing'
elif os.path.isfile('/var/log/nms/query.log') == True:
with open ('/var/log/nms/query.log') as query_log_file:
query_text = query_log_file.readlines()
matches = []
number = 0
for line in query_text:
match1 = re.findall(r'(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})', line)
match2 = re.findall(r"\d+ ms", line)
if match1 != [] and match2 != []:
matches.append((match1,match2))
last_24hr_date_time = datetime.now() - timedelta(hours = 24)
num = len(matches)+1
slow_queries = []
for match in matches:
date = match[0][0]
time = int((match[1][0].split())[0])
date_time_obj = datetime.strptime(date, '%Y-%m-%d %H:%M:%S')
if date_time_obj > last_24hr_date_time and time > 5000:
slow_queries.append(match)
if slow_queries != [] and len(slow_queries) >= 5:
check_result = 'Failed'
check_analysis = 'More than 5 slow queries found in /var/log/nms/query.log during the last 24 hours. Slow queries are queries that take more than 5 sec.'
check_action = 'Open TAC case to investigate possible root causes. Most common cause is use of IDE as disk controller, they may point towards perfomance issues'
elif slow_queries != [] and len(slow_queries) > 0 and len(slow_queries) < 5:
check_result = 'SUCCESSFUL'
check_analysis = 'No database performance issues found.'
check_action = None
else:
check_result = 'SUCCESSFUL'
check_analysis = 'No database performance issues found.'
check_action = None
return check_result, check_analysis, check_action
#05:Check:vManage:Confirm there are no pending tasks
def warningCheckfive(tasks):
tasks_running= {}
if tasks['runningTasks'] == []:
check_result = 'SUCCESSFUL'
check_analysis = 'There are no stuck or pending tasks on the server'
check_action = None
else:
for task in tasks:
name = tasks[task][0]['name']
start_time = tasks[task][0]['startTime']
tasks_running[name] = start_time
check_result = 'Failed'
check_analysis = 'Stuck/Pending Tasks found'
check_action = 'Clear pending tasks, wait for them to complete, or open TAC case to get task removed'
return tasks_running, check_result, check_analysis, check_action
#06:Check:vManage:Validate there are no empty password users
def warningChecksix(version_tuple):
if version_tuple[0:2] != ('20','3'):
users_emptypass = []
check_result = 'SUCCESSFUL'
check_analysis = '#06:Check is not required on the current version'
check_action = None
else:
json_userinfo = json.loads(showCommand('show aaa users | display json'))
users_emptypass = []
for user in (json_userinfo['data']['viptela-oper-system:aaa']['users']):
if 'auth-type' not in user.keys():
users_emptypass.append(user['name'])
if len(users_emptypass) == 0:
check_result = 'SUCCESSFUL'
check_analysis = 'All users have authentication configured'
check_action = None
else:
check_result = 'Failed'
check_analysis = 'Users with missing passwords found'
check_action = 'Add password to the users with missing password, or remove them'
return users_emptypass, check_result, check_analysis, check_action
#07:Check:Controllers:Controller versions
def warningCheckseven(controllers_info):
version_list = []
for controller in controllers_info:
version_list.append('.'.join(controllers_info[controller][2].split('.')[0:2]))
if len(set(version_list))==1:
check_result = 'SUCCESSFUL'
check_analysis = 'Versions of all the controllers are same'
check_action = None
else:
check_result = 'Failed'
check_analysis = 'Versions of all the controllers do not match'
check_action = 'All overlay components should belong to the same major.minor version family'
return check_result, check_analysis, check_action
#08:Check:Controllers:Confirm Certificate Expiration Dates
def warningCheckeight(controllers_info):
controllers_exp = {}
controllers_notexp = {}
for controller in controllers_info:
time_remaining = timedelta(seconds=controllers_info[controller][5])
if timedelta(seconds=controllers_info[controller][5]) <= timedelta(seconds=2592000):
controllers_exp[controller] = str(time_remaining)
elif timedelta(seconds=controllers_info[controller][5]) > timedelta(seconds=2592000):
controllers_notexp[controller] = str(time_remaining)
if len(controllers_exp) == 0:
check_result = 'SUCCESSFUL'
check_analysis = 'Certificates are ok'
check_action = None
elif len(controllers_exp) != 0:
check_result = 'Failed'
check_analysis = 'Controllers with certificates close to expiration present'
check_action = 'Renew respective certificates'
return controllers_exp, controllers_notexp, check_result, check_analysis, check_action
#09:Check:Controllers:vEdge list sync
def warningChecknine(controllers_info):
state_vedgeList = []
for controller in controllers_info:
if controllers_info[controller][6] != 'Sync':
state_vedgeList.append(controller, controllers_info[controller][0], controllers_info[controller][1])
if state_vedgeList == []:
check_result = 'SUCCESSFUL'
check_analysis = 'All the controllers have consistent state_vedgeList '
check_action = None
else:
check_result = 'Failed'
check_analysis = 'All the controllers do not have consistent state_vedgeList'
check_action = 'Customer should do controller sync on vManage'
return state_vedgeList ,check_result, check_analysis, check_action
#10:Check:Controllers: Confirm control connections
def warningCheckten(vsmart_count, vbond_count):
control_sum_json = json.loads(showCommand('show control summary | display json'))
control_sum_tab = showCommand('show control summary | tab')
discrepancy = []
for instance in control_sum_json['data']["viptela-security:control"]["summary"]:
if (instance['vbond_counts']) != vbond_count or (instance['vsmart_counts']) != vsmart_count:
discrepancy.append(instance)
if len(discrepancy) != 0:
check_result = 'Failed'
check_analysis = 'The vbond and vsmart count on API call does not match the currently control connected devices.'
check_action = 'Troubleshoot: vBond and vSmart count showing discrepancy.'
elif len(discrepancy) == 0:
check_result = 'SUCCESSFUL'
check_analysis = 'The vBond and vSmart count on API call matches the currently control connected devices. '
check_action = None
return control_sum_tab, discrepancy, check_result, check_analysis, check_action
#11:Check:xEdge:Version compatibility
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#Information Checks
#01:Check:vManage:Disk controller type
def infoCheckone(server_type, disk_controller):
if server_type == 'on-prem':
if 'hd' in [d for d in disk_controller.split('/') if d!= ''][1]:
check_result = 'Failed'
check_analysis = 'Disk Type is IDE'
check_action = 'On most scenarios, changing IDE to SCSI can improve disk IO performance. When using 20.3 or higher, it is advisable to change the controller type on the VM configuration.'
else:
check_result = 'SUCCESSFUL'
check_analysis = 'Disk type is not IDE, safe to upgrade. '
check_action = None
elif server_type == 'on-cloud':
check_result = 'SUCCESSFUL'
check_analysis = 'Check is not required for on-cloud deployments'
check_action = None
return check_result, check_analysis, check_action
#02:Check:Controllers:Validate there is at minimum vBond, vSmart present
def infoChecktwo(vsmart_count, vbond_count):
if vsmart_count >= 1 and vbond_count >= 1:
check_result = 'SUCCESSFUL'
check_analysis = 'One or more than one vBond and vSmart present, safe to upgrade'
check_action = None
else:
check_result = 'Failed'
check_analysis = 'At Minimum one vBond and vSmart not present'
check_action = 'Customer to confirm if this is a lab scenario (and not full overlay)'
return check_result, check_analysis, check_action
#03:Check:Controllers:Validate all controllers are reachable
def infoChecktthree(controllers_info):
unreach_controllers = []
for controller in controllers_info:
if (controllers_info[controller][3]) != 'reachable' :
unreach_controllers.append((controller,controllers_info[controller][1],controllers_info[controller][2]))
if len(unreach_controllers) != 0:
check_result = 'Failed'
check_analysis = 'The vManage reported Controllers that are not reachable. '
check_action = 'Either troubleshoot why the controller is down, or delete any invalid device from the overlay '
elif len(unreach_controllers) == 0:
check_result = 'SUCCESSFUL'
check_analysis = 'All the controllers are reachable'
check_action = None
return unreach_controllers,check_result, check_analysis, check_action
if __name__ == "__main__":
start_time = datetime.now().strftime("%d-%m-%Y %H:%M:%S")
#Validating the vmanage sever
try:
is_vmanage = is_vmanage()
except:
raise SystemExit('\033[1;31m \n\n ERROR: Failed to identify if the server you are currently executing the script on is a vManage server, verify if you are running the script on a vManage server. \033[0;0m \n\n')
if is_vmanage == False:
raise SystemExit('\033[1;31m \n\n ERROR: The server on which you are currently executing the script is not a vManage server, AURA tool is specifically for vManage servers. \033[0;0m \n\n')
#Parsing the arguments and validating the flag
try:
args = argumentParser()
argValidation(args)
except:
raise SystemExit('\033[1;31m \n\n ERROR: Error validating the command line arguments. \033[0;0m \n\n')
#Getting the password and validating it
try:
password = getpass.getpass('vManage Password:')
if len(password) == 0:
raise SystemExit('\033[1;31m \n\nERROR: Invalid Password provided \033[0;0m \n\n')
except:
raise SystemExit('\033[1;31m \n\nERROR: Invalid Password provided \033[0;0m \n\n')
#vManage version and loopback ip address
try:
version, version_tuple = vManageVersion()
except:
raise SystemExit('\033[1;31m ERROR: Error identifying the current vManage version. \033[0;0m \n\n')
try:
vmanage_lo_ip = getLoip()
except:
vmanage_lo_ip = '127.0.0.1'
print('\033[1;31m ERROR: Error retrieving the vManage loopback IP address. This may be related to issues on server name resolution (check with hostname -f). \033[0;0m \n\n')
#Creating Directory
directory_name = 'sdwan_sure'
dir_path = '{}'.format(directory_name)
try:
createDir(dir_path)
except:
raise SystemExit('\033[1;31m ERROR: Error creating {} directory. \033[0;0m \n\n'.format(dir_path))
#Creating Log file and Report File
try:
report_file_path = '{}/sure_report_{}.txt'.format(dir_path, datetime.now().strftime("%d_%m_%Y_%H_%M_%S"))
log_file_path = '{}/sure_logs_{}.log'.format(dir_path, datetime.now().strftime("%d_%m_%Y_%H_%M_%S"))
report_file = createFile(report_file_path)
log_file = createFile(log_file_path)
setup_logger('log_file_logger', log_file_path)
log_file_logger = logging.getLogger('log_file_logger')
except:
raise SystemExit('\033[1;31m ERROR: Error creating Report file and Log file. \033[0;0m \n\n')
writeFile(report_file, | |
integer, a tuple, a string that makes sense to R, or an RElement
OUTPUT: RElement
EXAMPLES::
sage: x = r([10.4,5.6,3.1,6.4,21.7])
sage: x[0]
numeric(0)
sage: x[1]
[1] 10.4
sage: x[-1]
[1] 5.6 3.1 6.4 21.7
sage: x[-2]
[1] 10.4 3.1 6.4 21.7
sage: x[-3]
[1] 10.4 5.6 6.4 21.7
sage: x['c(2,3)']
[1] 5.6 3.1
sage: key = r.c(2,3)
sage: x[key]
[1] 5.6 3.1
sage: m = r.array('1:3',r.c(2,4,2))
sage: m
, , 1
[,1] [,2] [,3] [,4]
[1,] 1 3 2 1
[2,] 2 1 3 2
, , 2
[,1] [,2] [,3] [,4]
[1,] 3 2 1 3
[2,] 1 3 2 1
sage: m[1,2,2]
[1] 2
sage: m[1,r.c(1,2),1]
[1] 1 3
"""
P = self._check_valid()
if isinstance(n, six.string_types):
n = n.replace('self', self._name)
return P.new('%s[%s]'%(self._name, n))
elif parent(n) is P: # the key is RElement itself
return P.new('%s[%s]'%(self._name, n.name()))
elif not isinstance(n,tuple):
return P.new('%s[%s]'%(self._name, n))
else:
L = []
for i in range(len(n)):
if parent(n[i]) is P:
L.append(n[i].name())
else:
L.append(str(n[i]))
return P.new('%s[%s]'%(self._name, ','.join(L)))
def __bool__(self):
"""
Implements bool(self).
.. note::
bool(self) will only return True if self == 0 contains a FALSE in its representation.
EXAMPLES::
sage: x = r([10.4,5.6,3.1,6.4,21.7])
sage: bool(x)
True
sage: y = r([0,0,0,0])
sage: bool(y)
False
sage: bool(r(0))
False
sage: bool(r(1))
True
"""
return "FALSE" in repr(self==0)
__nonzero__ = __bool__
def _comparison(self, other, symbol):
"""
Used to implement comparison of two objects.
INPUT:
- other -- RElement
- symbol -- string
OUTPUT: RElement -- output is an R element; not a bool!
TESTS::
sage: x = r([10.4,5.6,3.1,6.4,21.7])
sage: x._comparison(10.4, "==")
[1] TRUE FALSE FALSE FALSE FALSE
"""
P = self.parent()
other = P(other)
return P('%s %s %s'%(self.name(), symbol, other.name()))
def __eq__(self, other):
"""
Equality testing term by term.
INPUT:
- other -- RElement
OUTPUT: RElement -- an R element; not a bool!
EXAMPLES:
Notice that comparison is term by term and returns an R element. ::
sage: x = r([10.4,5.6,3.1,6.4,21.7])
sage: x == 10.4
[1] TRUE FALSE FALSE FALSE FALSE
"""
return self._comparison(other, "==")
def __lt__(self, other):
"""
Less than testing term by term.
INPUT:
- other -- RElement
OUTPUT: RElement -- an R element; not a bool!
EXAMPLES:
Notice that comparison is term by term and returns an R element. ::
sage: x = r([10.4,5.6,3.1,6.4,21.7])
sage: x < 7
[1] FALSE TRUE TRUE TRUE FALSE
"""
return self._comparison(other, "<")
def __gt__(self, other):
"""
Greater than testing term by term.
INPUT:
- other -- RElement
OUTPUT: RElement -- an R element; not a bool!
EXAMPLES:
Notice that comparison is term by term and returns an R element. ::
sage: x = r([10.4,5.6,3.1,6.4,21.7])
sage: x > 8
[1] TRUE FALSE FALSE FALSE TRUE
"""
return self._comparison(other, ">")
def __le__(self, other):
"""
Less than or equal testing term by term.
INPUT:
- other -- RElement
OUTPUT: RElement -- an R element; not a bool!
EXAMPLES::
sage: x = r([10.4,5.6,3.1,6.4,21.7])
sage: x <= 10.4
[1] TRUE TRUE TRUE TRUE FALSE
"""
return self._comparison(other, "<=")
def __ge__(self, other):
"""
Greater than or equal testing term by term.
INPUT:
- other -- RElement
OUTPUT: RElement -- an R element; not a bool!
EXAMPLES::
sage: x = r([10.4,5.6,3.1,6.4,21.7])
sage: x >= 10.4
[1] TRUE FALSE FALSE FALSE TRUE
"""
return self._comparison(other, ">=")
def __ne__(self, other):
"""
Not equal testing term by term.
INPUT:
- other -- RElement
OUTPUT: RElement -- an R element; not a bool!
EXAMPLES::
sage: x = r([10.4,5.6,3.1,6.4,21.7])
sage: x != 10.4
[1] FALSE TRUE TRUE TRUE TRUE
"""
return self._comparison(other, "!=")
def dot_product(self, other):
"""
Implements the notation self . other.
INPUT:
- self, other -- R elements
OUTPUT: R element
EXAMPLES::
sage: c = r.c(1,2,3,4)
sage: c.dot_product(c.t())
[,1] [,2] [,3] [,4]
[1,] 1 2 3 4
[2,] 2 4 6 8
[3,] 3 6 9 12
[4,] 4 8 12 16
sage: v = r([3,-1,8])
sage: v.dot_product(v)
[,1]
[1,] 74
"""
P = self._check_valid()
Q = P(other)
# the R operator is %*% for matrix multiplication
return P('%s %%*%% %s'%(self.name(), Q.name()))
def _sage_(self):
r"""
Returns Sage representation of the R object.
R objects are basic C structures, of different kind, that can
be stacked together. This is similar to Python lists with
variable objects, including lists of lists. If R lists have
names, they are translated to a Python dictionary, with anonymous
list entries called ``#{number}``.
OUTPUT: object -- Python object
EXAMPLES::
sage: rs = r.summary(r.c(1,4,3,4,3,2,5,1))
sage: d = rs._sage_()
sage: sorted(d.items())
[('DATA', [1, 1.75, 3, 2.875, 4, 5]),
('_Names', ['Min.', '1st Qu.', 'Median', 'Mean', '3rd Qu.', 'Max.']),
('_r_class', ['summaryDefault', 'table'])]
"""
self._check_valid()
P = self.parent()
with localconverter(P._r_to_sage_converter) as cv:
parsed = robjects.r(self.name())
return parsed
def _latex_(self):
r"""
Return LaTeX representation of this R object.
This calls the ``latex`` command in R.
OUTPUT: a latex expression (basically a string)
EXAMPLES::
sage: latex(r(2)) # optional - Hmisc (R package)
2
"""
from sage.misc.latex import LatexExpr
self._check_valid()
P = self.parent()
# latex is in Hmisc, this is currently not part of Sage's R!!!
try:
P.library('Hmisc')
except ImportError:
raise RuntimeError("The R package 'Hmisc' is required for R to LaTeX conversion, but it is not available.")
return LatexExpr(P.eval('latex(%s, file="");' % self.name()))
@instancedoc
class RFunctionElement(InterfaceFunctionElement):
def __reduce__(self):
"""
EXAMPLES::
sage: a = r([1,2,3])
sage: a.mean
mean
sage: dumps(a.mean)
Traceback (most recent call last):
...
NotImplementedError: pickling of R element methods is not yet supported
"""
raise NotImplementedError("pickling of R element methods is not yet supported")
def _instancedoc_(self):
"""
Returns the help for self as a string.
EXAMPLES::
sage: a = r([1,2,3])
sage: length = a.length
sage: print(length.__doc__)
title
-----
<BLANKLINE>
Length of an Object
<BLANKLINE>
name
----
<BLANKLINE>
length
...
"""
M = self._obj.parent()
return M.help(self._name)
def _sage_src_(self):
"""
Returns the source code of self.
EXAMPLES::
sage: a = r([1,2,3])
sage: length = a.length
sage: print(length._sage_src_())
function (x) .Primitive("length")
"""
M = self._obj.parent()
return M.source(self._name)
def __call__(self, *args, **kwds):
"""
EXAMPLES::
sage: a = r([1,2,3])
sage: length = a.length
sage: length()
[1] 3
"""
return self._obj.parent().function_call(self._name, args=[self._obj] + list(args), kwds=kwds)
@instancedoc
class RFunction(InterfaceFunction):
def __init__(self, parent, name, r_name=None):
"""
A Function in the R interface.
INPUT:
- parent -- the R interface
- name -- the name of the function for Python
- r_name -- the name of the function in R itself (which can have dots in it)
EXAMPLES::
sage: length = r.length
sage: type(length)
<class 'sage.interfaces.r.RFunction'>
sage: loads(dumps(length))
length
"""
self._parent = parent
if r_name:
self._name = name
else:
self._name = parent._sage_to_r_name(name)
def __eq__(self, other):
"""
EXAMPLES::
sage: r.mean == loads(dumps(r.mean))
True
sage: r.mean == r.lr
False
"""
return (isinstance(other, RFunction) and
self._name == other._name)
def __ne__(self, other):
"""
EXAMPLES::
sage: r.mean != loads(dumps(r.mean))
False
sage: r.mean != r.lr
True
"""
return not (self == other)
def _instancedoc_(self):
"""
Returns the help for self.
EXAMPLES::
sage: length = r.length
sage: print(length.__doc__)
title
-----
<BLANKLINE>
Length of an Object
<BLANKLINE>
name
----
<BLANKLINE>
length
...
"""
M = self._parent
return M.help(self._name)
def _sage_src_(self):
"""
Returns the source of self.
EXAMPLES::
sage: length = r.length
sage: print(length._sage_src_())
function (x) .Primitive("length")
"""
M = self._parent
return M.source(self._name)
def __call__(self, *args, **kwds):
"""
EXAMPLES::
sage: length = r.length
sage: length([1,2,3])
[1] 3
"""
return self._parent.function_call(self._name, args=list(args), kwds=kwds)
def is_RElement(x):
"""
Return True if x is an element in an R interface.
INPUT:
- x -- object
OUTPUT: bool
EXAMPLES::
sage: from sage.interfaces.r import is_RElement
sage: is_RElement(2)
False
sage: is_RElement(r(2))
True
"""
return isinstance(x, RElement)
# An instance of R
r = R()
def reduce_load_R():
"""
Used for reconstructing a copy of the R interpreter from a pickle.
EXAMPLES::
sage: from sage.interfaces.r import reduce_load_R
sage: reduce_load_R()
R Interpreter
"""
return r
import os
def r_console():
"""
Spawn a new R command-line session.
EXAMPLES::
sage: r.console() # not tested
R version 2.6.1 (2007-11-26)
Copyright (C) 2007 The R Foundation for Statistical Computing
ISBN 3-900051-07-0
...
"""
from sage.repl.rich_output.display_manager import get_display_manager
if not get_display_manager().is_in_terminal():
raise RuntimeError('Can | |
51, 46, 49, 52, 47, 50, 53, 38, 37, 36, 41, 40, 39, 44, 43,
42, 35, 32, 29, 34, 31, 28, 33, 30, 27, 6, 7, 8, 3, 4, 5, 0,
1, 2, 26, 23, 20, 25, 22, 19, 24, 21, 18, 17, 14, 11, 16, 13, 10,
15, 12, 9]])
opp_action_permutations = \
np.array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
[ 1, 0, 3, 2, 5, 4, 7, 6, 11, 10, 9, 8],
[ 0, 1, 2, 3, 10, 11, 8, 9, 4, 5, 6, 7],
[ 1, 0, 3, 2, 9, 8, 11, 10, 5, 4, 7, 6],
[ 0, 1, 2, 3, 6, 7, 4, 5, 10, 11, 8, 9],
[ 1, 0, 3, 2, 7, 6, 5, 4, 9, 8, 11, 10],
[ 0, 1, 2, 3, 8, 9, 10, 11, 6, 7, 4, 5],
[ 1, 0, 3, 2, 11, 10, 9, 8, 7, 6, 5, 4],
[ 4, 5, 6, 7, 0, 1, 2, 3, 10, 11, 8, 9],
[ 5, 4, 7, 6, 1, 0, 3, 2, 9, 8, 11, 10],
[ 8, 9, 10, 11, 0, 1, 2, 3, 4, 5, 6, 7],
[11, 10, 9, 8, 1, 0, 3, 2, 5, 4, 7, 6],
[ 6, 7, 4, 5, 0, 1, 2, 3, 8, 9, 10, 11],
[ 7, 6, 5, 4, 1, 0, 3, 2, 11, 10, 9, 8],
[10, 11, 8, 9, 0, 1, 2, 3, 6, 7, 4, 5],
[ 9, 8, 11, 10, 1, 0, 3, 2, 7, 6, 5, 4],
[ 4, 5, 6, 7, 8, 9, 10, 11, 0, 1, 2, 3],
[ 5, 4, 7, 6, 11, 10, 9, 8, 1, 0, 3, 2],
[10, 11, 8, 9, 4, 5, 6, 7, 0, 1, 2, 3],
[ 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2],
[ 8, 9, 10, 11, 6, 7, 4, 5, 0, 1, 2, 3],
[11, 10, 9, 8, 7, 6, 5, 4, 1, 0, 3, 2],
[ 6, 7, 4, 5, 10, 11, 8, 9, 0, 1, 2, 3],
[ 7, 6, 5, 4, 9, 8, 11, 10, 1, 0, 3, 2],
[ 4, 5, 6, 7, 2, 3, 0, 1, 8, 9, 10, 11],
[ 5, 4, 7, 6, 3, 2, 1, 0, 11, 10, 9, 8],
[10, 11, 8, 9, 2, 3, 0, 1, 4, 5, 6, 7],
[ 9, 8, 11, 10, 3, 2, 1, 0, 5, 4, 7, 6],
[ 6, 7, 4, 5, 2, 3, 0, 1, 10, 11, 8, 9],
[ 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 11, 10],
[ 8, 9, 10, 11, 2, 3, 0, 1, 6, 7, 4, 5],
[11, 10, 9, 8, 3, 2, 1, 0, 7, 6, 5, 4],
[ 2, 3, 0, 1, 4, 5, 6, 7, 10, 11, 8, 9],
[ 3, 2, 1, 0, 5, 4, 7, 6, 9, 8, 11, 10],
[ 2, 3, 0, 1, 8, 9, 10, 11, 4, 5, 6, 7],
[ 3, 2, 1, 0, 11, 10, 9, 8, 5, 4, 7, 6],
[ 2, 3, 0, 1, 6, 7, 4, 5, 8, 9, 10, 11],
[ 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8],
[ 2, 3, 0, 1, 10, 11, 8, 9, 6, 7, 4, 5],
[ 3, 2, 1, 0, 9, 8, 11, 10, 7, 6, 5, 4],
[ 4, 5, 6, 7, 10, 11, 8, 9, 2, 3, 0, 1],
[ 5, 4, 7, 6, 9, 8, 11, 10, 3, 2, 1, 0],
[ 8, 9, 10, 11, 4, 5, 6, 7, 2, 3, 0, 1],
[11, 10, 9, 8, 5, 4, 7, 6, 3, 2, 1, 0],
[10, 11, 8, 9, 6, 7, 4, 5, 2, 3, 0, 1],
[ 9, 8, 11, 10, 7, 6, 5, 4, 3, 2, 1, 0],
[ 6, 7, 4, 5, 8, 9, 10, 11, 2, 3, 0, 1],
[ 7, 6, 5, 4, 11, 10, 9, 8, 3, 2, 1, 0]])
action_permutations = \
np.array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
[ 1, 0, 3, 2, 5, 4, 7, 6, 11, 10, 9, 8],
[ 0, 1, 2, 3, 8, 9, 10, 11, 6, 7, 4, 5],
[ 1, 0, 3, 2, 9, 8, 11, 10, 5, 4, 7, 6],
[ 0, 1, 2, 3, 6, 7, 4, 5, 10, 11, 8, 9],
[ 1, 0, 3, 2, 7, 6, 5, 4, 9, 8, 11, 10],
[ 0, 1, 2, 3, 10, 11, 8, 9, 4, 5, 6, 7],
[ 1, 0, 3, 2, 11, 10, 9, 8, 7, 6, 5, 4],
[ 4, 5, 6, 7, 0, 1, 2, 3, 10, 11, 8, 9],
[ 5, 4, 7, 6, 1, 0, 3, 2, 9, 8, 11, 10],
[ 4, 5, 6, 7, 8, 9, 10, 11, 0, 1, 2, 3],
[ 5, 4, 7, 6, 9, 8, 11, 10, 3, 2, 1, 0],
[ 4, 5, 6, 7, 2, 3, 0, 1, 8, 9, 10, 11],
[ 5, 4, 7, 6, 3, 2, 1, 0, 11, 10, 9, 8],
[ 4, 5, 6, 7, 10, 11, 8, 9, 2, 3, 0, 1],
[ 5, 4, 7, 6, 11, 10, 9, 8, 1, 0, 3, 2],
[ 8, 9, 10, 11, 0, 1, 2, 3, 4, 5, 6, 7],
[ 9, 8, 11, 10, 1, 0, 3, 2, 7, 6, 5, 4],
[ 8, 9, 10, 11, 4, 5, 6, 7, 2, 3, 0, 1],
[ 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2],
[ 8, 9, 10, 11, 6, 7, 4, 5, 0, 1, 2, 3],
[ 9, 8, 11, 10, 7, 6, 5, 4, 3, 2, 1, 0],
[ 8, 9, 10, 11, 2, 3, 0, 1, 6, 7, 4, 5],
[ 9, 8, 11, 10, 3, 2, 1, 0, 5, 4, 7, 6],
[ 6, 7, 4, 5, 0, 1, 2, 3, 8, 9, 10, 11],
[ 7, 6, 5, 4, 1, 0, 3, 2, 11, 10, 9, 8],
[ 6, 7, 4, 5, 8, 9, 10, 11, 2, 3, 0, 1],
[ 7, 6, 5, 4, 9, 8, 11, 10, 1, 0, 3, 2],
[ 6, 7, 4, 5, 2, 3, 0, 1, 10, 11, 8, 9],
[ 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 11, 10],
[ 6, 7, 4, 5, 10, 11, 8, 9, 0, 1, 2, 3],
[ 7, 6, 5, 4, 11, 10, 9, 8, 3, 2, 1, 0],
[ 2, 3, 0, 1, 4, 5, 6, 7, 10, 11, 8, 9],
[ 3, 2, 1, 0, 5, 4, 7, 6, 9, 8, 11, 10],
[ 2, 3, 0, 1, 8, 9, 10, 11, 4, 5, 6, 7],
[ 3, 2, 1, 0, 9, 8, 11, 10, 7, 6, 5, 4],
[ 2, 3, 0, 1, 6, 7, 4, 5, 8, 9, 10, 11],
[ 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8],
[ 2, 3, 0, 1, 10, 11, 8, 9, 6, 7, 4, 5],
[ 3, 2, 1, 0, 11, 10, 9, 8, 5, 4, 7, 6],
[10, 11, 8, 9, 0, 1, 2, 3, 6, 7, 4, 5],
[11, 10, 9, 8, 1, 0, 3, 2, 5, 4, 7, 6],
[10, 11, 8, 9, 4, 5, 6, 7, 0, 1, 2, 3],
[11, 10, 9, 8, 5, 4, 7, 6, 3, 2, 1, 0],
[10, 11, 8, 9, 6, 7, 4, 5, 2, 3, 0, 1],
[11, 10, 9, 8, 7, 6, 5, 4, 1, 0, 3, 2],
[10, 11, 8, 9, 2, 3, 0, 1, 4, 5, 6, 7],
[11, 10, 9, 8, | |
import os
import numpy as np
try:
import pymake
except:
msg = "Error. Pymake package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install https://github.com/modflowpy/pymake/zipball/master"
raise Exception(msg)
try:
import flopy
except:
msg = "Error. FloPy package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install flopy"
raise Exception(msg)
from framework import testing_framework
from simulation import Simulation
paktest = "lak"
budtol = 1e-2
ex = ["ts_lak01"]
exdirs = []
for s in ex:
exdirs.append(os.path.join("temp", s))
ddir = "data"
# run all examples on Travis
continuous_integration = [True for idx in range(len(exdirs))]
# set replace_exe to None to use default executable
replace_exe = None
# static model data
# spatial discretization
nlay, nrow, ncol = 5, 17, 17
shape3d = (nlay, nrow, ncol)
size3d = nlay * nrow * ncol
delr = delc = [
250.00,
1000.0,
1000.0,
1000.0,
1000.0,
1000.0,
500.00,
500.00,
500.00,
500.00,
500.00,
1000.0,
1000.0,
1000.0,
1000.0,
1000.0,
250.00,
]
top = 500.0
botm = [107.0, 97.0, 87.0, 77.0, 67.0]
idomain = np.ones(shape3d, dtype=int)
idomain[0, 6:11, 6:11] = 0
idomain[1, 7:10, 7:10] = 0
# temporal discretization
nper = 10
sim_time = 15000.0
pertime = sim_time / float(nper)
period_data = []
for n in range(nper):
period_data.append((pertime, 10, 1.0))
strt = 115.0
icelltype = iconvert = 1
kh, kv, sy, ss = 30.0, [1179.0, 30.0, 30.0, 30.0], 3e-4, 0.2
storage_coefficient = True
# chd data
chd_spd = []
chd_arr = np.linspace(160, 140, ncol)
for k in range(nlay):
for j in range(nrow):
if j > 0 and j < nrow - 1:
chd_spd.append([(k, j, 0), chd_arr[0]])
chd_spd.append([(k, j, ncol - 1), chd_arr[-1]])
else:
for i in range(ncol):
chd_spd.append([(k, j, i), chd_arr[i]])
# recharge data
recharge = 0.116e-1
# lake data
stage, temp, conc = 110.0, 75.0, 0.5
packagedata = [(0, stage, 57, temp, conc)]
outlets = [(0, 0, -1, "SPECIFIED", -999, -999, -999, -999)]
nlakes = len(packagedata)
noutlets = len(outlets)
connectiondata = [
(0, 0, (0, 6, 5), "HORIZONTAL", 0.1, 0, 0, 500, 500),
(0, 1, (0, 7, 5), "HORIZONTAL", 0.1, 0, 0, 500, 500),
(0, 2, (0, 8, 5), "HORIZONTAL", 0.1, 0, 0, 500, 500),
(0, 3, (0, 9, 5), "HORIZONTAL", 0.1, 0, 0, 500, 500),
(0, 4, (0, 10, 5), "HORIZONTAL", 0.1, 0, 0, 500, 500),
(0, 5, (0, 5, 6), "HORIZONTAL", 0.1, 0, 0, 500, 500),
(0, 6, (1, 6, 6), "VERTICAL", 0.1, 0, 0, 0, 0),
(0, 7, (1, 7, 6), "VERTICAL", 0.1, 0, 0, 0, 0),
(0, 8, (1, 7, 6), "HORIZONTAL", 0.1, 0, 0, 250, 500),
(0, 9, (1, 8, 6), "VERTICAL", 0.1, 0, 0, 0, 0),
(0, 10, (1, 8, 6), "HORIZONTAL", 0.1, 0, 0, 250, 500),
(0, 11, (1, 9, 6), "VERTICAL", 0.1, 0, 0, 0, 0),
(0, 12, (1, 9, 6), "HORIZONTAL", 0.1, 0, 0, 250, 500),
(0, 13, (1, 10, 6), "VERTICAL", 0.1, 0, 0, 0, 0),
(0, 14, (0, 11, 6), "HORIZONTAL", 0.1, 0, 0, 500, 500),
(0, 15, (0, 5, 7), "HORIZONTAL", 0.1, 0, 0, 500, 500),
(0, 16, (1, 6, 7), "VERTICAL", 0.1, 0, 0, 0, 0),
(0, 17, (1, 6, 7), "HORIZONTAL", 0.1, 0, 0, 250, 500),
(0, 18, (2, 7, 7), "VERTICAL", 0.1, 0, 0, 0, 0),
(0, 19, (2, 8, 7), "VERTICAL", 0.1, 0, 0, 0, 0),
(0, 20, (2, 9, 7), "VERTICAL", 0.1, 0, 0, 0, 0),
(0, 21, (1, 10, 7), "VERTICAL", 0.1, 0, 0, 0, 0),
(0, 22, (1, 10, 7), "HORIZONTAL", 0.1, 0, 0, 250, 500),
(0, 23, (0, 11, 7), "HORIZONTAL", 0.1, 0, 0, 500, 500),
(0, 24, (0, 5, 8), "HORIZONTAL", 0.1, 0, 0, 500, 500),
(0, 25, (1, 6, 8), "VERTICAL", 0.1, 0, 0, 0, 0),
(0, 26, (1, 6, 8), "HORIZONTAL", 0.1, 0, 0, 250, 500),
(0, 27, (2, 7, 8), "VERTICAL", 0.1, 0, 0, 0, 0),
(0, 28, (2, 8, 8), "VERTICAL", 0.1, 0, 0, 0, 0),
(0, 29, (2, 9, 8), "VERTICAL", 0.1, 0, 0, 0, 0),
(0, 30, (1, 10, 8), "VERTICAL", 0.1, 0, 0, 0, 0),
(0, 31, (1, 10, 8), "HORIZONTAL", 0.1, 0, 0, 250, 500),
(0, 32, (0, 11, 8), "HORIZONTAL", 0.1, 0, 0, 500, 500),
(0, 33, (0, 5, 9), "HORIZONTAL", 0.1, 0, 0, 500, 500),
(0, 34, (1, 6, 9), "VERTICAL", 0.1, 0, 0, 0, 0),
(0, 35, (1, 6, 9), "HORIZONTAL", 0.1, 0, 0, 250, 500),
(0, 36, (2, 7, 9), "VERTICAL", 0.1, 0, 0, 0, 0),
(0, 37, (2, 8, 9), "VERTICAL", 0.1, 0, 0, 0, 0),
(0, 38, (2, 9, 9), "VERTICAL", 0.1, 0, 0, 0, 0),
(0, 39, (1, 10, 9), "VERTICAL", 0.1, 0, 0, 0, 0),
(0, 40, (1, 10, 9), "HORIZONTAL", 0.1, 0, 0, 250, 500),
(0, 41, (0, 11, 9), "HORIZONTAL", 0.1, 0, 0, 500, 500),
(0, 42, (0, 5, 10), "HORIZONTAL", 0.1, 0, 0, 500, 500),
(0, 43, (1, 6, 10), "VERTICAL", 0.1, 0, 0, 0, 0),
(0, 44, (1, 7, 10), "VERTICAL", 0.1, 0, 0, 0, 0),
(0, 45, (1, 7, 10), "HORIZONTAL", 0.1, 0, 0, 250, 500),
(0, 46, (1, 8, 10), "VERTICAL", 0.1, 0, 0, 0, 0),
(0, 47, (1, 8, 10), "HORIZONTAL", 0.1, 0, 0, 250, 500),
(0, 48, (1, 9, 10), "VERTICAL", 0.1, 0, 0, 0, 0),
(0, 49, (1, 9, 10), "HORIZONTAL", 0.1, 0, 0, 250, 500),
(0, 50, (1, 10, 10), "VERTICAL", 0.1, 0, 0, 0, 0),
(0, 51, (0, 11, 10), "HORIZONTAL", 0.1, 0, 0, 500, 500),
(0, 52, (0, 6, 11), "HORIZONTAL", 0.1, 0, 0, 500, 500),
(0, 53, (0, 7, 11), "HORIZONTAL", 0.1, 0, 0, 500, 500),
(0, 54, (0, 8, 11), "HORIZONTAL", 0.1, 0, 0, 500, 500),
(0, 55, (0, 9, 11), "HORIZONTAL", 0.1, 0, 0, 500, 500),
(0, 56, (0, 10, 11), "HORIZONTAL", 0.1, 0, 0, 500, 500),
]
stage, evap, runoff, withdrawal, rate = (
110.0,
0.0103,
1000.0,
10000.0,
-225000.0,
)
lakeperioddata0 = [
(0, "status", "active"),
(0, "stage", stage),
(0, "rainfall", recharge),
(0, "evaporation", evap),
(0, "runoff", runoff),
(0, "withdrawal", withdrawal),
(0, "rate", rate),
(0, "AUXILIARY", "temperature", temp),
(0, "AUXILIARY", "salinity", conc),
]
lakeperioddata1 = [(0, "rainfall", 0.0)]
lakeperioddata2 = [(0, "rainfall", recharge)]
lakeperioddata3 = [(0, "withdrawal", -rate), (0, "rate", -withdrawal)]
lakeperioddata = {
0: lakeperioddata0,
1: lakeperioddata1,
2: lakeperioddata2,
3: lakeperioddata3,
}
lakeperioddatats0 = [
(0, "status", "active"),
(0, "stage", "stage"),
(0, "rainfall", "rainfall"),
(0, "evaporation", "evap"),
(0, "runoff", "runoff"),
(0, "withdrawal", "withdrawal"),
(0, "rate", "outlet"),
(0, "AUXILIARY", "salinity", "concentration"),
(0, "AUXILIARY", "temperature", "temperature"),
]
lakeperioddatats1 = [(0, "rainfall", 0.0), (0, "rate", rate)]
lakeperioddatats2 = [(0, "rainfall", "rainfall2"), (0, "rate", "outlet")]
lakeperioddatats3 = [
(0, "stage", "stage"),
(0, "rainfall", "rainfall"),
(0, "evaporation", "evap"),
(0, "runoff", "runoff"),
(0, "withdrawal", "outlet2"),
(0, "rate", "withdrawal2"),
]
lakeperioddatats = {
0: lakeperioddatats0,
1: lakeperioddatats1,
2: lakeperioddatats2,
3: lakeperioddatats3,
}
ts_names = [
"stage",
"rainfall",
"evap",
"runoff",
"withdrawal",
"outlet",
"concentration",
"temperature",
"rainfall2",
"outlet2",
"withdrawal2",
]
ts_methods = ["linearend"] * len(ts_names)
ts_data = []
ts_times = np.arange(0.0, sim_time + 2.0 * pertime, pertime, dtype=float)
for t in ts_times:
ts_data.append(
(
t,
stage,
recharge,
evap,
runoff,
withdrawal,
rate,
temp,
conc,
recharge,
-rate,
-withdrawal,
)
)
lak_obs = {"lak_obs.csv": [("lake1", "STAGE", (0,))]}
def build_model(ws, name, timeseries=False):
hdsfile = "{}.hds".format(name)
# build the model
sim = flopy.mf6.MFSimulation(sim_name=name, exe_name="mf6", sim_ws=ws)
tdis = flopy.mf6.ModflowTdis(sim, nper=nper, perioddata=period_data)
ims = flopy.mf6.ModflowIms(
sim,
print_option="NONE",
linear_acceleration="CG",
outer_maximum=500,
inner_maximum=100,
outer_dvclose=1e-6,
inner_dvclose=1e-3,
rcloserecord=[0.01, "strict"],
)
gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True)
dis = flopy.mf6.ModflowGwfdis(
gwf,
nlay=nlay,
nrow=nrow,
ncol=ncol,
delr=delr,
delc=delc,
top=top,
botm=botm,
idomain=idomain,
)
npf = flopy.mf6.ModflowGwfnpf(gwf, k=kh, icelltype=icelltype)
sto = flopy.mf6.ModflowGwfsto(
gwf,
storagecoefficient=storage_coefficient,
sy=sy,
ss=ss,
transient={0: True},
iconvert=iconvert,
)
chd = flopy.mf6.ModflowGwfchd(gwf, stress_period_data=chd_spd)
rch = flopy.mf6.ModflowGwfrcha(gwf, recharge=recharge)
if timeseries:
lakpd = lakeperioddatats
else:
lakpd = lakeperioddata
lak = flopy.mf6.ModflowGwflak(
gwf,
nlakes=nlakes,
noutlets=noutlets,
print_input=True,
print_stage=True,
print_flows=True,
auxiliary=["temperature", "salinity"],
packagedata=packagedata,
connectiondata=connectiondata,
outlets=outlets,
perioddata=lakpd,
pname="lak-1",
)
lak.obs.initialize(
filename="{}.lak.obs".format(name),
digits=20,
print_input=True,
continuous=lak_obs,
)
if timeseries:
fname = "{}.lak.ts".format(name)
lak.ts.initialize(
filename=fname,
timeseries=ts_data,
time_series_namerecord=ts_names,
interpolation_methodrecord=ts_methods,
)
ic = flopy.mf6.ModflowGwfic(gwf, strt=strt)
oc = flopy.mf6.ModflowGwfoc(
gwf,
head_filerecord="{}.hds".format(name),
budget_filerecord="{}.cbc".format(name),
saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
printrecord=[("BUDGET", "LAST")],
)
return sim
def get_model(idx, dir):
name = ex[idx]
# build MODFLOW 6 files
ws = dir
sim = build_model(ws, name)
# build MODFLOW 6 files with UZF package
ws = os.path.join(dir, "mf6")
mc = build_model(ws, name, timeseries=True)
return sim, mc
def eval_budget(sim):
print("evaluating budgets...")
from budget_file_compare import eval_bud_diff
# get ia/ja from | |
body='{"data": {"key": "value"}}',
status=200,
content_type="application/json",
)
with pytest.raises(TypeError):
await none_semaphore_client.test().get()
"""
test iterator features
"""
async def test_simple_pages_iterator(mocked, client):
next_url = "http://api.example.org/next_batch"
mocked.get(
client.test().data,
body='{"data": [{"key": "value"}], "paging": {"next": "%s"}}' % next_url,
status=200,
content_type="application/json",
)
mocked.get(
next_url,
body='{"data": [{"key": "value"}], "paging": {"next": ""}}',
status=200,
content_type="application/json",
)
response = await client.test().get()
await check_pages_responses(response, total_pages=2)
async def test_simple_pages_with_max_pages_iterator(mocked, client):
next_url = "http://api.example.org/next_batch"
mocked.get(
client.test().data,
body='{"data": [{"key": "value"}], "paging": {"next": "%s"}}' % next_url,
status=200,
content_type="application/json",
)
mocked.get(
next_url,
body='{"data": [{"key": "value"}, {"key": "value"}, {"key": "value"}], "paging": {"next": "%s"}}'
% next_url,
status=200,
content_type="application/json",
)
mocked.get(
next_url,
body='{"data": [{"key": "value"}, {"key": "value"}, {"key": "value"}], "paging": {"next": "%s"}}'
% next_url,
status=200,
content_type="application/json",
)
mocked.get(
next_url,
body='{"data": [{"key": "value"}, {"key": "value"}, {"key": "value"}], "paging": {"next": ""}}',
status=200,
content_type="application/json",
)
response = await client.test().get()
await check_pages_responses(response, total_pages=7, max_pages=3)
async def test_simple_pages_with_max_items_iterator(mocked, client):
next_url = "http://api.example.org/next_batch"
mocked.get(
client.test().data,
body='{"data": [{"key": "value"}], "paging": {"next": "%s"}}' % next_url,
status=200,
content_type="application/json",
)
mocked.get(
next_url,
body='{"data": [{"key": "value"}, {"key": "value"}, {"key": "value"}], "paging": {"next": "%s"}}'
% next_url,
status=200,
content_type="application/json",
)
mocked.get(
next_url,
body='{"data": [{"key": "value"}, {"key": "value"}, {"key": "value"}], "paging": {"next": "%s"}}'
% next_url,
status=200,
content_type="application/json",
)
mocked.get(
next_url,
body='{"data": [{"key": "value"}, {"key": "value"}, {"key": "value"}], "paging": {"next": ""}}',
status=200,
content_type="application/json",
)
response = await client.test().get()
await check_pages_responses(response, total_pages=3, max_items=3)
async def test_simple_pages_with_max_pages_and_max_items_iterator(mocked, client):
next_url = "http://api.example.org/next_batch"
mocked.get(
client.test().data,
body='{"data": [{"key": "value"}], "paging": {"next": "%s"}}' % next_url,
status=200,
content_type="application/json",
)
mocked.get(
next_url,
body='{"data": [{"key": "value"}, {"key": "value"}, {"key": "value"}], "paging": {"next": ""}}',
status=200,
content_type="application/json",
)
response = await client.test().get()
await check_pages_responses(response, total_pages=3, max_pages=2, max_items=3)
async def test_simple_pages_max_pages_zero_iterator(mocked, client):
next_url = "http://api.example.org/next_batch"
mocked.get(
client.test().data,
body='{"data": [{"key": "value"}], "paging": {"next": "%s"}}' % next_url,
status=200,
content_type="application/json",
)
mocked.add(
next_url,
body='{"data": [{"key": "value"}], "paging": {"next": ""}}',
status=200,
content_type="application/json",
)
response = await client.test().get()
await check_pages_responses(response, total_pages=0, max_pages=0)
async def test_simple_pages_max_items_zero_iterator(mocked, client):
next_url = "http://api.example.org/next_batch"
mocked.get(
client.test().data,
body='{"data": [{"key": "value"}], "paging": {"next": "%s"}}' % next_url,
status=200,
content_type="application/json",
)
mocked.get(
next_url,
body='{"data": [{"key": "value"}], "paging": {"next": ""}}',
status=200,
content_type="application/json",
)
response = await client.test().get()
await check_pages_responses(response, total_pages=0, max_items=0)
async def test_simple_pages_max_pages_ans_max_items_zero_iterator(mocked, client):
next_url = "http://api.example.org/next_batch"
mocked.get(
client.test().data,
body='{"data": [{"key": "value"}], "paging": {"next": "%s"}}' % next_url,
status=200,
content_type="application/json",
)
mocked.get(
next_url,
body='{"data": [{"key": "value"}], "paging": {"next": ""}}',
status=200,
content_type="application/json",
)
response = await client.test().get()
await check_pages_responses(response, total_pages=0, max_pages=0, max_items=0)
async def test_pages_iterator_with_client_error(mocked, client):
next_url = "http://api.example.org/next_batch"
mocked.get(
client.test().data,
body='{"data": [{"key": "value"}], "paging": {"next": "%s"}}' % next_url,
status=200,
content_type="application/json",
)
mocked.get(
next_url,
body='{"data": [{"key": "value"}], "paging": {"next": "%s"}}' % next_url,
status=200,
content_type="application/json",
)
mocked.get(
next_url,
body='{"data": [{"key": "value"}], "paging": {"next": "%s"}}' % next_url,
status=408,
content_type="application/json",
)
mocked.get(
next_url,
body='{"data": [{"key": "value"}], "paging": {"next": ""}}',
status=200,
content_type="application/json",
)
response = await client.test().get()
result_response = {
response: {
"data": [{"key": "value"}],
"paging": {"next": "http://api.example.org/next_batch"},
},
response.data: [{"key": "value"}],
response.paging: {"next": "http://api.example.org/next_batch"},
response.paging.next: "http://api.example.org/next_batch",
}
for resp, data in result_response.items():
check_response(resp, data)
iterations_count = 0
with pytest.raises(ClientError):
async for item in response().pages():
result_page = {item: {"key": "value"}, item.key: "value"}
for resp, data in result_page.items():
check_response(resp, data)
iterations_count += 1
assert iterations_count == 2
async def test_pages_iterator_with_server_error(mocked, client):
next_url = "http://api.example.org/next_batch"
mocked.get(
client.test().data,
body='{"data": [{"key": "value"}], "paging": {"next": "%s"}}' % next_url,
status=200,
content_type="application/json",
)
mocked.get(
next_url,
body='{"data": [{"key": "value"}], "paging": {"next": "%s"}}' % next_url,
status=200,
content_type="application/json",
)
mocked.get(
next_url,
body='{"data": [{"key": "value"}], "paging": {"next": "%s"}}' % next_url,
status=504,
content_type="application/json",
)
mocked.get(
next_url,
body='{"data": [{"key": "value"}], "paging": {"next": ""}}',
status=200,
content_type="application/json",
)
response = await client.test().get()
result_response = {
response: {
"data": [{"key": "value"}],
"paging": {"next": "http://api.example.org/next_batch"},
},
response.data: [{"key": "value"}],
response.paging: {"next": "http://api.example.org/next_batch"},
response.paging.next: "http://api.example.org/next_batch",
}
for resp, data in result_response.items():
check_response(resp, data)
iterations_count = 0
with pytest.raises(ServerError):
async for item in response().pages():
result_page = {item: {"key": "value"}, item.key: "value"}
for resp, data in result_page.items():
check_response(resp, data)
iterations_count += 1
assert iterations_count == 2
async def test_pages_iterator_with_error_on_single_page(mocked, client):
next_url = "http://api.example.org/next_batch"
mocked.get(
client.test().data,
body='{"data": [{"key": "value"}], "paging": {"next": "%s"}}' % next_url,
status=200,
content_type="application/json",
)
mocked.get(
next_url,
body='{"data": [{"key": "value"}], "paging": {"next": "%s"}}' % next_url,
status=200,
content_type="application/json",
)
mocked.get(
next_url,
body='{"data": [{}], "paging": {"next": "%s"}}' % next_url,
status=204,
content_type="application/json",
)
mocked.get(
next_url,
body='{"data": [{"key": "value"}], "paging": {"next": ""}}',
status=200,
content_type="application/json",
)
response = await client.test().get()
result_response = {
response: {
"data": [{"key": "value"}],
"paging": {"next": "http://api.example.org/next_batch"},
},
response.data: [{"key": "value"}],
response.paging: {"next": "http://api.example.org/next_batch"},
response.paging.next: "http://api.example.org/next_batch",
}
for resp, data in result_response.items():
check_response(resp, data)
iterations_count = 0
async for item in response().pages():
if iterations_count == 2:
status = 204
result_page = {item: dict()}
else:
status = 200
result_page = {item: {"key": "value"}, item.key: "value"}
for resp, data in result_page.items():
check_response(resp, data, status)
iterations_count += 1
assert iterations_count == 4
"""
test XML requests
"""
async def test_xml_post_string(mocked, xml_client):
mocked.post(
xml_client.test().data,
body="Any response",
status=200,
content_type="application/json",
)
data = '<tag1 attr1="val1">' "<tag2>text1</tag2>" "<tag3>text2</tag3>" "</tag1>"
await xml_client.test().post(data=data)
request_body = mocked.requests[("POST", URL(xml_client.test().data))][0].kwargs[
"data"
]
assert request_body == data.encode("utf-8")
async def test_xml_post_dict(mocked, xml_client):
mocked.post(
xml_client.test().data,
body="Any response",
status=200,
content_type="application/json",
)
data = OrderedDict(
[
(
"tag1",
OrderedDict([("@attr1", "val1"), ("tag2", "text1"), ("tag3", "text2")]),
)
]
)
await xml_client.test().post(data=data)
request_body = mocked.requests[("POST", URL(xml_client.test().data))][0].kwargs[
"data"
]
assert request_body == xmltodict.unparse(data).encode("utf-8")
async def test_xml_post_dict_passes_unparse_param(mocked, xml_client):
mocked.post(
xml_client.test().data,
body="Any response",
status=200,
content_type="application/json",
)
data = OrderedDict(
[
(
"tag1",
OrderedDict([("@attr1", "val1"), ("tag2", "text1"), ("tag3", "text2")]),
)
]
)
await xml_client.test().post(data=data, xmltodict_unparse__full_document=False)
request_body = mocked.requests[("POST", URL(xml_client.test().data))][0].kwargs[
"data"
]
assert request_body == xmltodict.unparse(data, full_document=False).encode("utf-8")
async def test_xml_returns_text_if_response_not_xml(mocked, xml_client):
mocked.post(
xml_client.test().data,
body="Any response",
status=200,
content_type="any content",
)
data = OrderedDict(
[
(
"tag1",
OrderedDict([("@attr1", "val1"), ("tag2", "text1"), ("tag3", "text2")]),
)
]
)
response = await xml_client.test().post(data=data)
assert "Any response" == response().data["text"]
async def test_xml_post_dict_returns_dict_if_response_xml(mocked, xml_client):
xml_body = '<tag1 attr1="val1">text1</tag1>'
mocked.post(
xml_client.test().data,
body=xml_body,
status=200,
content_type="application/xml",
)
data = OrderedDict(
[
(
"tag1",
OrderedDict([("@attr1", "val1"), ("tag2", "text1"), ("tag3", "text2")]),
)
]
)
response = await xml_client.test().post(data=data)
assert response().data == xmltodict.parse(xml_body)
"""
test token refreshing
"""
async def test_not_token_refresh_client_propagates_client_error(mocked, client):
no_refresh_client = client
mocked.post(
no_refresh_client.test().data,
callback=callback_401,
content_type="application/json",
)
with pytest.raises(ClientError):
await no_refresh_client.test().post()
async def test_disable_token_refreshing(mocked, refresh_token_possible_false_values):
async with TokenRefreshClient(token="token") as token_refreshing_client:
mocked.post(
token_refreshing_client.test().data,
callback=callback_401,
content_type="application/json",
)
with pytest.raises(ClientError):
await token_refreshing_client.test().post()
for refresh_token in refresh_token_possible_false_values:
async with TokenRefreshClient(
token="token", refresh_token=refresh_token
) as token_refreshing_client:
mocked.post(
token_refreshing_client.test().data,
callback=callback_401,
content_type="application/json",
)
with pytest.raises(ClientError):
await token_refreshing_client.test().post()
async with TokenRefreshClient(token="token") as token_refreshing_client:
mocked.post(
token_refreshing_client.test().data,
callback=callback_401,
content_type="application/json",
)
with pytest.raises(ClientError):
await token_refreshing_client.test().post(refresh_token=refresh_token)
async def test_token_expired_automatically_refresh_authentication(mocked):
async with TokenRefreshClient(token="token") as token_refresh_client:
mocked.post(
token_refresh_client.test().data,
callback=callback_401,
content_type="application/json",
)
mocked.post(
token_refresh_client.test().data,
callback=callback_201,
content_type="application/json",
)
response = await token_refresh_client.test().post(refresh_token=True)
# refresh_authentication method should be able to update api_params
assert response._api_params["token"] == "new_token"
mocked.post(
token_refresh_client.test().data,
callback=callback_401,
content_type="application/json",
)
mocked.post(
token_refresh_client.test().data,
callback=callback_401,
content_type="application/json",
)
# check that the refresh_token flag is not cyclic
with pytest.raises(ClientError):
await token_refresh_client.test().post(refresh_token=True)
async with TokenRefreshClient(
token="token", refresh_token=True
) as token_refresh_client:
mocked.post(
token_refresh_client.test().data,
callback=callback_401,
content_type="application/json",
)
mocked.post(
token_refresh_client.test().data,
callback=callback_201,
content_type="application/json",
)
response = await token_refresh_client.test().post()
# refresh_authentication method should be able to update api_params
assert response._api_params["token"] == "new_token"
mocked.post(
token_refresh_client.test().data,
callback=callback_401,
content_type="application/json",
)
mocked.post(
token_refresh_client.test().data,
callback=callback_401,
content_type="application/json",
)
# check that the refresh_token flag is not cyclic
with pytest.raises(ClientError):
await token_refresh_client.test().post()
async def test_token_expired_automatically_refresh_authentication_by_default(
mocked, token_refresh_by_default_client
):
mocked.post(
token_refresh_by_default_client.test().data,
callback=callback_401,
content_type="application/json",
)
mocked.post(
token_refresh_by_default_client.test().data,
callback=callback_201,
content_type="application/json",
)
response = await token_refresh_by_default_client.test().post()
# refresh_authentication method should be able to update api_params
assert response._api_params["token"] == "new_token"
mocked.post(
token_refresh_by_default_client.test().data,
callback=callback_401,
content_type="application/json",
)
mocked.post(
token_refresh_by_default_client.test().data,
callback=callback_401,
content_type="application/json",
)
# check that the refresh_token flag is not cyclic
with pytest.raises(ClientError):
await token_refresh_by_default_client.test().post()
async def test_raises_error_if_refresh_authentication_method_returns_false_value(
mocked, refresh_token_possible_false_values
):
async with FailTokenRefreshClient(token="token") as fail_client:
mocked.post(
fail_client.test().data,
callback=callback_401,
content_type="application/json",
)
with pytest.raises(ClientError):
await fail_client.test().post()
for refresh_token in (True, *refresh_token_possible_false_values):
async with FailTokenRefreshClient(
token="token", refresh_token=refresh_token
) as fail_client:
mocked.post(
fail_client.test().data,
callback=callback_401,
content_type="application/json",
)
with pytest.raises(ClientError):
await fail_client.test().post()
async with FailTokenRefreshClient(token="token") as fail_client:
mocked.post(
fail_client.test().data,
callback=callback_401,
content_type="application/json",
)
with pytest.raises(ClientError):
await fail_client.test().post(refresh_token=refresh_token)
"""
Test PydanticAdapterMixin.
"""
async def test_pydantic_model_not_found(mocked):
async with PydanticForcedClient() as client:
mocked.get(
client.test_not_found().data,
body="{}",
status=200,
content_type="application/json",
)
with pytest.raises(ValueError):
await client.test_not_found().get()
async def test_bad_pydantic_model(mocked):
async with PydanticForcedClient() as client:
mocked.get(
client.test_bad_pydantic_model().data,
body="{}",
status=200,
content_type="application/json",
)
with pytest.raises(ValueError):
await client.test_bad_pydantic_model().get()
async def test_bad_dataclass_model(mocked):
async with PydanticForcedClient() as client:
mocked.get(
client.test_bad_dataclass_model().data,
body="{}",
status=200,
content_type="application/json",
)
with pytest.raises(TypeError):
await client.test_bad_dataclass_model().get()
async def test_pydantic_mixin_response_to_native(mocked):
response_body_root = (
'[{"key1": "value1", "key2": 123}, {"key1": "value2", "key2": 321}]'
)
response_body = '{"data": %s}' % response_body_root
validate_data_received_list = [True, False]
validate_data_sending_list = [True, False]
extract_root_list | |
<gh_stars>0
from typing import Dict, Tuple, Optional, Any
import demistomock as demisto
import urllib3
from CommonServerPython import *
# Disable insecure warnings
urllib3.disable_warnings()
INTEGRATION_CONTEXT_NAME = 'MSGraphGroups'
NO_OUTPUTS: dict = {}
APP_NAME = 'ms-graph-groups'
def camel_case_to_readable(text: str) -> str:
"""'camelCase' -> 'Camel Case'
Args:
text: the text to transform
Returns:
A Camel Cased string.
"""
if text == 'id':
return 'ID'
return ''.join(' ' + char if char.isupper() else char.strip() for char in text).strip().title()
def parse_outputs(groups_data: Dict[str, str]) -> Tuple[dict, dict]:
"""Parse group data as received from Microsoft Graph API into Demisto's conventions
Args:
groups_data: a dictionary containing the group data
Returns:
A Camel Cased dictionary with the relevant fields.
groups_readable: for the human readable
groups_outputs: for the entry context
"""
# Unnecessary fields, dropping as to not load the incident context.
fields_to_drop = ['@odata.context', '@odata.nextLink', '@odata.deltaLink', '@odata.type', '@removed',
'resourceProvisioningOptions', 'securityIdentifier', 'onPremisesSecurityIdentifier',
'onPremisesNetBiosName', 'onPremisesProvisioningErrors', 'onPremisesSamAccountName',
'resourceBehaviorOptions', 'creationOptions', 'preferredDataLocation']
if isinstance(groups_data, list):
groups_readable, groups_outputs = [], []
for group_data in groups_data:
group_readable = {camel_case_to_readable(i): j for i, j in group_data.items() if i not in fields_to_drop}
if '@removed' in group_data:
group_readable['Status'] = 'deleted'
groups_readable.append(group_readable)
groups_outputs.append({k.replace(' ', ''): v for k, v in group_readable.copy().items()})
return groups_readable, groups_outputs
group_readable = {camel_case_to_readable(i): j for i, j in groups_data.items() if i not in fields_to_drop}
if '@removed' in groups_data:
group_readable['Status'] = 'deleted'
group_outputs = {k.replace(' ', ''): v for k, v in group_readable.copy().items()}
return group_readable, group_outputs
class MsGraphClient:
"""
Microsoft Graph Mail Client enables authorized access to a user's Office 365 mail data in a personal account.
"""
def __init__(self, tenant_id, auth_id, enc_key, app_name, base_url, verify, proxy, self_deployed,
certificate_thumbprint: Optional[str] = None, private_key: Optional[str] = None):
self.ms_client = MicrosoftClient(tenant_id=tenant_id, auth_id=auth_id, enc_key=enc_key, app_name=app_name,
base_url=base_url, verify=verify, proxy=proxy, self_deployed=self_deployed,
certificate_thumbprint=certificate_thumbprint, private_key=private_key)
def test_function(self):
"""Performs basic GET request to check if the API is reachable and authentication is successful.
Returns:
ok if successful.
"""
self.ms_client.http_request(method='GET', url_suffix='groups', params={'$orderby': 'displayName'})
demisto.results('ok')
def list_groups(self, order_by: str = None, next_link: str = None, top: int = None, filter_: str = None):
"""Returns all groups by sending a GET request.
Args:
order_by: the group fields to order by the response.
next_link: the link for the next page of results, if exists. see Microsoft documentation for more details.
docs.microsoft.com/en-us/graph/api/group-list?view=graph-rest-1.0
top: sets the page size of results.
filter_: filters results.
Returns:
Response from API.
"""
if next_link: # pagination
return self.ms_client.http_request(method='GET', full_url=next_link)
# default value = 100
params = {'$top': top}
if order_by:
params['$orderby'] = order_by # type: ignore
if filter_:
params['$filter'] = filter_ # type: ignore
return self.ms_client.http_request(
method='GET',
url_suffix='groups',
params=params)
def get_group(self, group_id: str) -> Dict:
"""Returns a single group by sending a GET request.
Args:
group_id: the group id.
Returns:
Response from API.
"""
group = self.ms_client.http_request(method='GET', url_suffix=f'groups/{group_id}')
return group
def create_group(self, properties: Dict[str, Optional[Any]]) -> Dict:
"""Create a single group by sending a POST request.
Args:
properties: the group properties.
Returns:
Response from API.
"""
group = self.ms_client.http_request(method='POST', url_suffix='groups', json_data=properties)
return group
def delete_group(self, group_id: str):
"""Delete a single group by sending a DELETE request.
Args:
group_id: the group id to delete.
"""
# If successful, this method returns 204 No Content response code.
# It does not return anything in the response body.
# Using resp_type="text" to avoid parsing error in the calling method.
self.ms_client.http_request(method='DELETE', url_suffix=f'groups/{group_id}', resp_type="text")
def list_members(self, group_id: str, next_link: str = None, top: int = None, filter_: str = None):
"""List all group members by sending a GET request.
Args:
group_id: the group id to list its members.
next_link: the link for the next page of results, if exists. see Microsoft documentation for more details.
docs.microsoft.com/en-us/graph/api/group-list-members?view=graph-rest-1.0
top: sets the page size of results.
filter_: filters results.
Returns:
Response from API.
"""
if next_link: # pagination
return self.ms_client.http_request(method='GET', full_url=next_link)
params = {'$top': top}
if filter_:
params['$filter'] = filter_ # type: ignore
return self.ms_client.http_request(
method='GET',
url_suffix=f'groups/{group_id}/members',
params=params)
def add_member(self, group_id: str, properties: Dict[str, str]):
"""Add a single member to a group by sending a POST request.
Args:
group_id: the group id to add the member to.
properties: the member properties.
"""
# If successful, this method returns 204 No Content response code.
# It does not return anything in the response body.
# Using resp_type="text" to avoid parsing error in the calling method.
self.ms_client.http_request(
method='POST',
url_suffix=f'groups/{group_id}/members/$ref',
json_data=properties,
resp_type="text")
def remove_member(self, group_id: str, user_id: str):
"""Remove a single member to a group by sending a DELETE request.
Args:
group_id: the group id to add the member to.
user_id: the user id to remove.
"""
# If successful, this method returns 204 No Content response code.
# It does not return anything in the response body.
# Using resp_type="text" to avoid parsing error in the calling method.
self.ms_client.http_request(
method='DELETE',
url_suffix=f'groups/{group_id}/members/{user_id}/$ref', resp_type="text")
def test_function_command(client: MsGraphClient, args: Dict):
"""Performs a basic GET request to check if the API is reachable and authentication is successful.
Args:
client: Client object with request
args: Usually demisto.args()
"""
client.test_function()
def list_groups_command(client: MsGraphClient, args: Dict) -> Tuple[str, Dict, Dict]:
"""Lists all groups and return outputs in Demisto's format.
Args:
client: Client object with request
args: Usually demisto.args()
Returns:
Outputs.
"""
order_by = args.get('order_by')
next_link = args.get('next_link')
top = args.get('top')
filter_ = args.get('filter')
groups = client.list_groups(order_by, next_link, top, filter_)
groups_readable, groups_outputs = parse_outputs(groups['value'])
next_link_response = ''
if '@odata.nextLink' in groups:
next_link_response = groups['@odata.nextLink']
if next_link_response:
entry_context = {f'{INTEGRATION_CONTEXT_NAME}NextLink': {'GroupsNextLink': next_link_response},
f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': groups_outputs}
title = 'Groups (Note that there are more results. Please use the next_link argument to see them. The value ' \
'can be found in the context under MSGraphGroupsNextLink.GroupsNextLink): '
else:
entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': groups_outputs}
title = 'Groups:'
human_readable = tableToMarkdown(name=title, t=groups_readable,
headers=['ID', 'Display Name', 'Description', 'Created Date Time', 'Mail'],
removeNull=True)
return human_readable, entry_context, groups
def get_group_command(client: MsGraphClient, args: Dict) -> Tuple[str, Dict, Dict]:
"""Get a group by group id and return outputs in Demisto's format.
Args:
client: Client object with request
args: Usually demisto.args()
Returns:
Outputs.
"""
group_id = str(args.get('group_id'))
group = client.get_group(group_id)
group_readable, group_outputs = parse_outputs(group)
human_readable = tableToMarkdown(name="Groups:", t=group_readable,
headers=['ID', 'Display Name', 'Description', 'Created Date Time', 'Mail',
'Security Enabled', 'Visibility'],
removeNull=True)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}(obj.ID === {group_id})': group_outputs}
return human_readable, entry_context, group
def create_group_command(client: MsGraphClient, args: Dict) -> Tuple[str, Dict, Dict]:
"""Create a group and return outputs in Demisto's format.
Args:
client: Client object with request
args: Usually demisto.args()
Returns:
Outputs.
"""
required_properties = {
'displayName': str(args.get('display_name')),
'mailNickname': str(args.get('mail_nickname')),
'mailEnabled': args.get('mail_enabled') == 'true',
'securityEnabled': args.get('security_enabled')
}
# create the group
group = client.create_group(required_properties)
# display the new group and it's properties
group_readable, group_outputs = parse_outputs(group)
human_readable = tableToMarkdown(name=f"{required_properties['displayName']} was created successfully:",
t=group_readable,
headers=['ID', 'Display Name', 'Description', 'Created Date Time', 'Mail',
'Security Enabled', 'Mail Enabled'],
removeNull=True)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': group_outputs}
return human_readable, entry_context, group
def delete_group_command(client: MsGraphClient, args: Dict) -> Tuple[str, Dict, Dict]:
"""Delete a group by group id and return outputs in Demisto's format
Args:
client: Client object with request
args: Usually demisto.args()
Returns:
Outputs.
"""
group_id = str(args.get('group_id'))
client.delete_group(group_id)
# get the group data from the context
group_data = demisto.dt(demisto.context(), f'{INTEGRATION_CONTEXT_NAME}(val.ID === "{group_id}")')
if isinstance(group_data, list):
group_data = group_data[0]
# add a field that indicates that the group was deleted
group_data['Deleted'] = True # add a field with the members to the group
entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': group_data}
human_readable = f'Group: "{group_id}" was deleted successfully.'
return human_readable, entry_context, NO_OUTPUTS
def list_members_command(client: MsGraphClient, args: Dict) -> Tuple[str, Dict, Dict]:
"""List a group members by group id. return outputs in Demisto's format.
Args:
client: Client object with request
args: Usually demisto.args()
Returns:
Outputs.
"""
group_id = str(args.get('group_id'))
next_link = args.get('next_link')
top = args.get('top')
filter_ = args.get('filter')
members = client.list_members(group_id, next_link, top, filter_)
if not members['value']:
human_readable = f'The group {group_id} has no members.'
return human_readable, NO_OUTPUTS, NO_OUTPUTS
members_readable, members_outputs = parse_outputs(members['value'])
# get the group data from the context
group_data = demisto.dt(demisto.context(), f'{INTEGRATION_CONTEXT_NAME}(val.ID === "{group_id}")')
if not group_data:
return_error('Could not find group data in the context, please run "!msgraph-groups-get-group" to retrieve group data.')
if isinstance(group_data, list):
group_data = group_data[0]
if '@odata.nextLink' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.