input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<filename>degoo/API.py
###########################################################################
# A Python front end for the Degoo GraphQL API
import os
import sys
import json
import time
import datetime
import hashlib
import base64
import requests
import humanize
from shutil import copyfile
from appdirs import user_config_dir
from dateutil import parser
class API:
###########################################################################
# URL configuration
#
# The URL the API is found at
URL = "https://production-appsync.degoo.com/graphql"
# The URLS used for logging in
URL_login = "https://rest-api.degoo.com/login"
###########################################################################
# Local files configuration
#
# Class properties, that can be altered on the class
conf_dir = user_config_dir("degoo")
cred_file = os.path.join(conf_dir, "credentials.json")
keys_file = os.path.join(conf_dir, "keys.json")
DP_file = os.path.join(conf_dir, "default_properties.txt")
# Ensure the user configuration directory exists
if not os.path.exists(conf_dir):
os.makedirs(conf_dir)
###########################################################################
# Empirically determined, largest value degoo supports for the Limit
# on the Limit parameter to the GetFileChildren3 operation. It's used
# for paging, and if more items exist there'll be a NextToken returned.
# TODO: Determine the syntax and use of that NextToken so that paged
# results can be fecthed reliably as well. For now we just make calls
# with the max limit and avoid dealing with paging.
LIMIT_MAX = int('1' * 31, 2) - 1
# This appears to be an invariant key that the API expects in the header
# x-api-key:
API_KEY = "<KEY>"
# Keys needed to interact with the API. Provided during login.
KEYS = None
# Known Degoo API item categories
CATS = { 0: "File",
1: "Device",
2: "Folder",
3: "Image",
4: "Video",
5: "Music",
6: "Document",
10: "Recycle Bin",
}
# The types of folder we see on a Degoo account
# These are characteristically different kinds of item to Degoo
# But we will try to provide a more unifor folder style interface
# to them herein.
folder_types = ["Folder", "Device", "Recycle Bin"]
# A guess at the plans available
PLANS = { 0: "Free 100 GB",
1: "Pro 500 GB",
2: "Ultimate 10 TB",
3: "Ultimate Stackcommerce offer 10 TB"
}
# Width of a Category field in text output we produce
# Should be wide enough to handle the longest entry in CATS
# Updated in __init__
CATLEN = 10
# Width of Name field for text output we produce
# Used when listing files, updated to teh width needed to display
# the longest filename. Updated by getFileChildren3 when it returns
# a list of filenames.
NAMELEN = 20
# A list of Degoo Item properties. The three API calls:
# getOverlay3
# getFileChildren3
# getFilesFromPaths
# all want a list of explicit propeties it seems, that they will
# return. We want them all basically, and the superset of all known
# properties that Degoo Items have should be stored in DP_file,
# which is by default:
#
# ~/.config/degoo/default_properties.txt
#
# A sample file should accompany this script. One property per
# line in the file.
#
# TODO: Are there any further properties? It would be nice for
# example if we could ask for the checksum that is canculated
# when the file is upladed and provided to SetUploadFile2.
PROPERTIES = ""
class Error(Exception):
'''Generic exception to raise and log different fatal errors.'''
def __init__(self, msg):
super().__init__(type(self))
self.msg = msg
def __str__(self):
return self.msg
def __unicode__(self):
return self.msg
def __init__(self):
'''
Reads config and state files to intitialise the API.
Specifically:
Loads authentication keys from key_file if available (the product of
loging in)
Loads the superset of known degoo item properties that we can ask
for when sending queries to the remote API.
Sets CATLEN to the length fo the longest CAT name.
'''
keys = {}
if os.path.isfile(self.keys_file):
with open(self.keys_file, "r") as file:
keys = json.loads(file.read())
self.KEYS = keys
if os.path.isfile(self.DP_file):
with open(self.DP_file, "r") as file:
self.PROPERTIES = file.read()
self.CATLEN = max([len(n) for _, n in self.CATS.items()]) # @ReservedAssignment
def _human_readable_times(self, creation, modification, upload):
'''
Given three Degoo timestamps converts them to human readable
text strings. These three timestamps are provided for every
Degoo item.
:param creation: The time of creation
:param modification: The time of last modifciation
:param upload: The time of last upload
:returns: A tuple of 3 strings.
'''
date_format = "%Y-%m-%d %H:%M:%S"
no_date = "Unavailable"
# Add a set of Human Readable timestamps
if creation:
c_time = creation
c_datetime = parser.parse(c_time)
c_dt = c_datetime.strftime(date_format)
else:
c_dt = no_date
if modification:
m_secs = int(modification) / 1000
m_datetime = datetime.datetime.utcfromtimestamp(m_secs)
m_dt = m_datetime.strftime(date_format)
else:
m_dt = no_date
if upload:
u_secs = int(upload) / 1000
u_datetime = datetime.datetime.utcfromtimestamp(u_secs)
u_dt = u_datetime.strftime(date_format)
else:
u_dt = no_date
return (c_dt, m_dt, u_dt)
def check_sum(self, filename, blocksize=65536):
'''
When uploading files Degoo uses a 2 step process:
1) Get Authorisation from the Degoo API - provides metadate needed for step 2
2) Upload the file to a nominated URL (in practice this appears to be Google Cloud Services)
The upload to Google Cloud services wants a checksum for the file (for upload integrity assurance)
This appears to a base64 encoded SHA1 hash of the file. Empirically this, with a little dressing
appears to function. The SHA1 hash seems to use a hardcoded string as a seed (based on JS analysis)
:param filename: The name of the file (full path so it can be read)
:param blocksize: Optionally a block size used for reading the file
'''
Seed = bytes([13, 7, 2, 2, 15, 40, 75, 117, 13, 10, 19, 16, 29, 23, 3, 36])
Hash = hashlib.sha1(Seed)
with open(filename, "rb") as f:
for block in iter(lambda: f.read(blocksize), b""):
Hash.update(block)
cs = list(bytearray(Hash.digest()))
# On one test file we now have:
# [82, 130, 147, 14, 109, 84, 251, 153, 64, 39, 135, 7, 81, 9, 21, 80, 203, 120, 35, 150]
# and need to encode this to:
# [10, 20, 82, 130, 147, 14, 109, 84, 251, 153, 64, 39, 135, 7, 81, 9, 21, 80, 203, 120, 35, 150, 16, 0]
# Which is four bytes longer, prepended by a word and appended by a word.
# JS code inspection is non conclusive, it's well obfuscated Webpack JS alas.
#
# But a hypothesis is:
#
# 10, len(checksum), checksum, 16, type
# And type is always 0 for file uploads.
#
# This passes all tests so far. But remains an hypthesis and not well understood.
#
# TODO: Can we move this from an hypothesis to a conclusion?
CS = [10, len(cs)] + cs + [16, 0]
# Finally, Degoo base64 encode is cehcksum.
checksum = base64.b64encode(bytes(CS)).decode()
return checksum
__devices__ = None
@property
def devices(self):
'''
Returns a dictionary of devices, keyed on Degoo ID, containing the name of the device.
Top level folders in the Degoo Filesystem are called devices.
TODO: Degoo's web interface does not currently allow creation of devices even when licensed to.
Thus we have no way of working out an API call that does so and we're stuck with devices they
give us (even when licensed to have as many as you like).
'''
if self.__devices__ is None:
devices = {}
root = self.getFileChildren3(0)
for d in root:
if d['CategoryName'] == "Device":
devices[int(d['DeviceID'])] = d['Name']
self.__devices__ = devices
return devices
else:
return self.__devices__
###########################################################################
# # Login
def login(self, username=None, password=<PASSWORD>):
'''
Logs into a Degoo account.
The login is lasting, i.e. does not seem to expire (not subject to any autologout)
The reply provides a couple of keys that must be provided with each subsequent
API call, as authentication, to prove we're logged in. These are written in JSON
format to keys_file which is by default: ~/.config/degoo/keys.json
TODO: Support logout as well (which will POST a logout request and remove these keys)
:returns: True if successful, False if not
'''
CREDS = {}
if username and password:
CREDS = {"Username": username, "Password": password}
elif os.path.isfile(self.cred_file):
with open(self.cred_file, "r") as file:
CREDS = json.loads(file.read())
if CREDS:
response | |
- np.outer(np.ones((self.X_scaled.shape[0], 1)), self.X_scaled[:,d]))
def getNumParams(self): return 2
def getTrainKernel(self, params):
self.checkParams(params)
ell = np.exp(params[0])
p = np.exp(params[1])
#compute d2
if (self.K_sq is None): d2 = sq_dist(self.X_scaled.T / ell) #precompute squared distances
else: d2 = self.K_sq / ell**2
#compute dp
dp = self.dp/p
K = np.exp(-d2 / 2.0)
return np.cos(2*np.pi*dp)*K
def deriveKernel(self, params, i):
self.checkParamsI(params, i)
ell = np.exp(params[0])
p = np.exp(params[1])
#compute d2
if (self.K_sq is None): d2 = sq_dist(self.X_scaled.T / ell) #precompute squared distances
else: d2 = self.K_sq / ell**2
#compute dp
dp = self.dp/p
K = np.exp(-d2 / 2.0)
if (i==0): return d2*K*np.cos(2*np.pi*dp)
elif (i==1): return 2*np.pi*dp*np.sin(2*np.pi*dp)*K
else: raise Exception('invalid parameter index:' + str(i))
def getTrainTestKernel(self, params, Xtest):
self.checkParams(params)
ell = np.exp(params[0])
p = np.exp(params[1])
Xtest_scaled = Xtest/np.sqrt(Xtest.shape[1])
d2 = sq_dist(self.X_scaled.T/ell, Xtest_scaled.T/ell) #precompute squared distances
#compute dp
dp = np.zeros(d2.shape)
for d in range(self.X_scaled.shape[1]):
dp += (np.outer(self.X_scaled[:,d], np.ones((1, Xtest_scaled.shape[0]))) - np.outer(np.ones((self.X_scaled.shape[0], 1)), Xtest_scaled[:,d]))
dp /= p
K = np.exp(-d2 / 2.0)
return np.cos(2*np.pi*dp)*K
def getTestKernelDiag(self, params, Xtest):
self.checkParams(params)
return np.ones(Xtest.shape[0])
class RQKernel(Kernel):
def __init__(self, X):
Kernel.__init__(self)
self.X_scaled = X/np.sqrt(X.shape[1])
if (X.shape[1] >= X.shape[0] or True): self.K_sq = sq_dist(self.X_scaled.T)
else: self.K_sq = None
def getNumParams(self): return 2
def getTrainKernel(self, params):
self.checkParams(params)
ell = np.exp(params[0])
alpha = np.exp(params[1])
if (self.K_sq is None): D2 = sq_dist(self.X_scaled.T / ell) #precompute squared distances
else: D2 = self.K_sq / ell**2
return (1+0.5*D2/alpha)**(-alpha)
def deriveKernel(self, params, i):
self.checkParamsI(params, i)
ell = np.exp(params[0])
alpha = np.exp(params[1])
if (self.K_sq is None): D2 = sq_dist(self.X_scaled.T / ell) #precompute squared distances
else: D2 = self.K_sq / ell**2
if (i==0): return (1+0.5*D2/alpha)**(-alpha-1)*D2
elif (i==1):
K = (1+0.5*D2/alpha)
return K**(-alpha) * (0.5*D2/K - alpha*(np.log(K)))
else: raise Exception('invalid parameter index')
def getTrainTestKernel(self, params, Xtest):
self.checkParams(params)
ell = np.exp(params[0])
alpha = np.exp(params[1])
D2 = sq_dist(self.X_scaled.T/ell, (Xtest/np.sqrt(Xtest.shape[1])).T/ell) #precompute squared distances
return (1+0.5*D2/alpha)**(-alpha)
def getTestKernelDiag(self, params, Xtest):
self.checkParams(params)
return np.ones(Xtest.shape[0])
class RBFRegionsKernel(Kernel):
def __init__(self, X, regions):
Kernel.__init__(self)
self.X_scaled = X.copy()
for r in regions: self.X_scaled[:, r] /= np.sqrt(np.sum(r))
self.regions = regions
self.K_sq = []
for r in regions: self.K_sq.append(sq_dist(self.X_scaled[:,r].T))
def getNumParams(self): return len(self.regions)
def getTrainKernel(self, params):
self.checkParams(params)
if (self.sameParams(params)): return self.cache['getTrainKernel']
if ('K_sq_scaled' not in list(self.cache.keys())): self.cache['K_sq_scaled'] = [None for i in range(self.getNumParams())]
ell = np.exp(params)
K = 0
for i in range(self.getNumParams()):
if (self.sameParams(params, i)): K += self.cache['K_sq_scaled'][i]
else:
self.cache['K_sq_scaled'][i] = self.K_sq[i] / ell[i]**2
K += self.cache['K_sq_scaled'][i]
K_exp = np.exp(-K / 2.0)
self.cache['getTrainKernel'] = K_exp
self.saveParams(params)
return K_exp
def deriveKernel(self, params, i):
self.checkParamsI(params, i)
K_exp = self.getTrainKernel(params)
#ell = np.exp(params)
#return K_exp*(self.K_sq[i] / ell[i]**2)
return K_exp * self.cache['K_sq_scaled'][i]
def getTrainTestKernel(self, params, Xtest):
self.checkParams(params)
ell = np.exp(params)
K = 0
for r_i, r in enumerate(self.regions):
Xtest_r = Xtest[:,r]/np.sqrt(np.sum(r))
K += sq_dist(self.X_scaled[:,r].T, Xtest_r.T) / ell[r_i]**2
return np.exp(-K / 2.0)
def getTestKernelDiag(self, params, Xtest):
self.checkParams(params)
return np.ones(Xtest.shape[0])
class AdditiveKernel(Kernel):
def __init__(self, kernels, n):
Kernel.__init__(self)
self.kernels = kernels
self.n = n
self.prevKdimParams = None
self.prevEEParams = None
self.prevHyp0Params = None
def getNumParams(self): return len(self.kernels) + np.sum([k.getNumParams() for k in self.kernels])
def getTrainKernel(self, params):
self.checkParams(params)
if (self.sameParams(params)): return self.cache['getTrainKernel']
params_kernels = params[len(self.kernels):]
EE = self.getEE(params_kernels)
K=0
for i in range(len(self.kernels)):
K += self.getScaledE(params, i, EE)
self.cache['getTrainKernel'] = K
self.saveParams(params)
return K
def deriveKernel(self, params, i):
self.checkParamsI(params, i)
params_kernels = params[len(self.kernels):]
#sf2 derivatives
if (i < len(self.kernels)):
EE = self.getEE(params_kernels)
#if (i==2): Z = 2*np.exp(2*params[i]) * EE[:,:,i+1]; print i, Z[:5, :5]; sys.exit(0)
return 2*np.exp(2*params[i]) * EE[:,:,i+1]
#params_kernel derivatives
else:
params_ind = len(self.kernels)
for k_i, k in enumerate(self.kernels):
numHyp = k.getNumParams()
if (i not in range(params_ind, params_ind+numHyp)):
params_ind += numHyp
continue
#now we found our kernel
dKj = k.deriveKernel(params[params_ind:params_ind+numHyp], i-params_ind)
Kd = self.Kdim(params_kernels)
range1 = np.array(range(0,k_i), dtype=np.int)
range2 = np.array(range(k_i+1, len(self.kernels)), dtype=np.int)
Kd_nocov = Kd[:, :, np.concatenate((range1, range2))]
E = elsympol(Kd_nocov, len(self.kernels)-1) #R-1th elementary sym polyn
K=0
for ii in range(len(self.kernels)):
K += E[:,:,ii]*np.exp(2*params[ii])
#if (i==5): Z = dKj * K; print i, Z[:5, :5]; sys.exit(0)
return dKj * K
raise Exception('Invalid parameter')
def getTrainTestKernel(self, params, Xtest):
self.checkParams(params)
params_kernels = params[len(self.kernels):]
#compute Kd and EE
Kd = np.zeros((self.n, Xtest[0].shape[0], len(self.kernels)))
params_ind = 0
kernel_paramsArr = params[len(self.kernels):]
for k_i, k in enumerate(self.kernels):
numHyp = k.getNumParams()
kernelParams_range = np.array(range(params_ind, params_ind+numHyp), dtype=np.int)
kernel_params = kernel_paramsArr[kernelParams_range]
Kd[:,:,k_i] = k.getTrainTestKernel(kernel_params, Xtest[k_i])
params_ind += numHyp
EE = elsympol(Kd, len(self.kernels))
#compute K
K=0
for i in range(len(self.kernels)): K += np.exp(2*params[i]) * EE[:,:,i+1]
return K
def getTestKernelDiag(self, params, Xtest):
self.checkParams(params)
params_kernels = params[len(self.kernels):]
#compute Kd and EE
Kd = np.zeros((Xtest[0].shape[0], 1, len(self.kernels)))
params_ind = 0
kernel_paramsArr = params[len(self.kernels):]
for k_i, k in enumerate(self.kernels):
numHyp = k.getNumParams()
kernelParams_range = np.array(range(params_ind, params_ind+numHyp), dtype=np.int)
kernel_params = kernel_paramsArr[kernelParams_range]
Kd[:,0,k_i] = k.getTestKernelDiag(kernel_params, Xtest[k_i])
params_ind += numHyp
EE = elsympol(Kd, len(self.kernels))
#compute K
K=0
for i in range(len(self.kernels)): K += np.exp(2*params[i]) * EE[:,:,i+1]
return K
def getEE(self, EEParams):
if (self.prevEEParams is not None):
if (EEParams.shape[0] == 0 or np.max(np.abs(EEParams-self.prevEEParams < self.epsilon))): return self.cache['EE']
Kd = self.Kdim(EEParams)
EE = elsympol(Kd, len(self.kernels))
self.prevEEParams = EEParams.copy()
self.cache['EE'] = EE
return EE
def getScaledE(self, params, i, E):
if (self.prevHyp0Params is not None and np.abs(self.prevHyp0Params[i]-params[i]) < self.epsilon): return self.cache['E_scaled'][i]
if ('E_scaled' not in list(self.cache.keys())): self.cache['E_scaled'] = [None for j in range(len(self.kernels))]
for j in range(len(self.kernels)):
if (self.prevHyp0Params is not None and np.abs(self.prevHyp0Params[j]-params[j]) < self.epsilon): continue
E_scaled = E[:,:,j+1]*np.exp(2*params[j])
self.cache['E_scaled'][j] = E_scaled
self.prevHyp0Params = params.copy()
return self.cache['E_scaled'][i]
def Kdim(self, kdimParams):
if (self.prevKdimParams is not None and np.max(np.abs(kdimParams-self.prevKdimParams)) < self.epsilon): return self.cache['Kdim']
K = np.zeros((self.n, self.n, len(self.kernels)))
params_ind = 0
for k_i, k in enumerate(self.kernels):
numHyp = k.getNumParams()
kernelParams_range = np.array(range(params_ind, params_ind+numHyp), dtype=np.int)
kernel_params = kdimParams[kernelParams_range]
if ((numHyp == 0 and 'Kdim' in self.cache) or (numHyp>0 and self.prevKdimParams is not None and np.max(np.abs(kernel_params-self.prevKdimParams[kernelParams_range])) < self.epsilon)):
K[:,:,k_i] = self.cache['Kdim'][:,:,k_i]
else:
K[:,:,k_i] = k.getTrainKernel(kernel_params)
params_ind += numHyp
self.prevKdimParams = kdimParams.copy()
self.cache['Kdim'] = K
return K
class MaternKernel(Kernel):
def __init__(self, X):
Kernel.__init__(self)
self.X_scaled = X/np.sqrt(X.shape[1])
if (X.shape[1] >= X.shape[0] or True): self.K_sq = sq_dist(self.X_scaled.T * np.sqrt(self.d))
else: self.K_sq = None
self.m = lambda t,f: f(t)*np.exp(-t)
self.dm = lambda t,df: df(t)*np.exp(-t)*t
def getNumParams(self): return 1
def getTrainKernel(self, params):
self.checkParams(params)
if (self.sameParams(params)): return self.cache['getTrainKernel']
ell = np.exp(params[0])
if (self.K_sq is None): K = sq_dist(self.X_scaled.T * np.sqrt(self.d)/ell) #precompute squared distances
else: K = self.K_sq / ell**2
self.cache['K_sq_scaled'] = K
K_exp = self.m(np.sqrt(K), self.f)
self.cache['getTrainKernel'] = K_exp
self.saveParams(params)
return K_exp
def deriveKernel(self, params, i):
self.checkParamsI(params, i)
self.getTrainKernel(params) #make sure that cache is updated
return self.dm(np.sqrt(self.cache['K_sq_scaled']), self.df)
def getTrainTestKernel(self, params, Xtest):
self.checkParams(params)
ell = np.exp(params[0])
K = sq_dist(self.X_scaled.T*np.sqrt(self.d)/ell, ((Xtest/np.sqrt(Xtest.shape[1])).T)*np.sqrt(self.d)/ell) #precompute squared distances
return self.m(np.sqrt(K), self.f)
def getTestKernelDiag(self, params, Xtest):
self.checkParams(params)
return self.m(np.zeros(Xtest.shape[0]), self.f)
class Matern1Kernel(MaternKernel):
def __init__(self, X):
self.d = 1
self.f = lambda t: 1
self.df = lambda t: 1
MaternKernel.__init__(self, X)
class Matern3Kernel(MaternKernel):
def __init__(self, X):
self.d = 3
self.f = lambda t: 1+t
self.df = lambda t: t
MaternKernel.__init__(self, X)
class Matern5Kernel(MaternKernel):
def __init__(self, X):
self.d = 5
self.f = lambda t: 1+t*(1+t/3.0)
self.df = lambda t: t*(1+t)/3.0
MaternKernel.__init__(self, X)
class PPKernel(Kernel):
def __init__(self, X):
Kernel.__init__(self)
self.X_scaled = X/np.sqrt(X.shape[1])
if (X.shape[1] >= X.shape[0] or True): self.K_sq = sq_dist(self.X_scaled.T)
else: self.K_sq = None
self.j = np.floor(X.shape[1]/2.0)+self.v+1
self.pp = lambda r,j,v,f: np.maximum(1-r, 0)**(j+v) * self.f(r,j)
self.dpp = lambda r,j,v,f: np.maximum(1-r, 0)**(j+v-1) * r * ((j+v)*f(r,j) - np.maximum(1-r,0)*self.df(r,j))
def getNumParams(self): return 1
def getTrainKernel(self, params):
self.checkParams(params)
if (self.sameParams(params)): return self.cache['getTrainKernel']
ell = np.exp(params[0])
if (self.K_sq is None): K = sq_dist(self.X_scaled.T / ell) #precompute squared distances
else: K = self.K_sq / ell**2
self.cache['K_sq_scaled'] | |
matrix:
[3 4 5]
Notice how the basis for the same subspace is different if we
merely use the ``subspace`` command.
::
sage: W2 = W.subspace([[3,4,5]]); W2
Vector space of degree 3 and dimension 1 over Finite Field of size 7
Basis matrix:
[1 6 4]
Nonetheless the two subspaces are equal (as mathematical objects)::
sage: W1 == W2
True
"""
return self.submodule_with_basis(gens, check=check, already_echelonized=already_echelonized)
def complement(self):
"""
Return the complement of ``self`` in the
:meth:`~sage.modules.free_module.FreeModule_ambient_field.ambient_vector_space`.
EXAMPLES::
sage: V = QQ^3
sage: V.complement()
Vector space of degree 3 and dimension 0 over Rational Field
Basis matrix:
[]
sage: V == V.complement().complement()
True
sage: W = V.span([[1, 0, 1]])
sage: X = W.complement(); X
Vector space of degree 3 and dimension 2 over Rational Field
Basis matrix:
[ 1 0 -1]
[ 0 1 0]
sage: X.complement() == W
True
sage: X + W == V
True
Even though we construct a subspace of a subspace, the
orthogonal complement is still done in the ambient vector
space `\QQ^3`::
sage: V = QQ^3
sage: W = V.subspace_with_basis([[1,0,1],[-1,1,0]])
sage: X = W.subspace_with_basis([[1,0,1]])
sage: X.complement()
Vector space of degree 3 and dimension 2 over Rational Field
Basis matrix:
[ 1 0 -1]
[ 0 1 0]
All these complements are only done with respect to the inner
product in the usual basis. Over finite fields, this means
we can get complements which are only isomorphic to a vector
space decomposition complement.
sage: F2 = GF(2,x)
sage: V = F2^6
sage: W = V.span([[1,1,0,0,0,0]])
sage: W
Vector space of degree 6 and dimension 1 over Finite Field of size 2
Basis matrix:
[1 1 0 0 0 0]
sage: W.complement()
Vector space of degree 6 and dimension 5 over Finite Field of size 2
Basis matrix:
[1 1 0 0 0 0]
[0 0 1 0 0 0]
[0 0 0 1 0 0]
[0 0 0 0 1 0]
[0 0 0 0 0 1]
sage: W.intersection(W.complement())
Vector space of degree 6 and dimension 1 over Finite Field of size 2
Basis matrix:
[1 1 0 0 0 0]
"""
# Check simple cases
if self.dimension() == 0:
return self.ambient_vector_space()
if self.dimension() == self.ambient_vector_space().dimension():
return self.submodule([])
return self.basis_matrix().right_kernel()
def vector_space(self, base_field=None):
"""
Return the vector space associated to self. Since self is a vector
space this function simply returns self, unless the base field is
different.
EXAMPLES::
sage: V = span([[1,2,3]],QQ); V
Vector space of degree 3 and dimension 1 over Rational Field
Basis matrix:
[1 2 3]
sage: V.vector_space()
Vector space of degree 3 and dimension 1 over Rational Field
Basis matrix:
[1 2 3]
"""
if base_field is None:
return self
return self.change_ring(base_field)
def zero_submodule(self):
"""
Return the zero submodule of self.
EXAMPLES::
sage: (QQ^4).zero_submodule()
Vector space of degree 4 and dimension 0 over Rational Field
Basis matrix:
[]
"""
return self.zero_subspace()
def zero_subspace(self):
"""
Return the zero subspace of self.
EXAMPLES::
sage: (QQ^4).zero_subspace()
Vector space of degree 4 and dimension 0 over Rational Field
Basis matrix:
[]
"""
return self.submodule([], check=False, already_echelonized=True)
def linear_dependence(self, vectors, zeros='left', check=True):
r"""
Returns a list of vectors giving relations of linear dependence for the input list of vectors.
Can be used to check linear independence of a set of vectors.
INPUT:
- ``vectors`` - A list of vectors, all from the same vector space.
- ``zeros`` - default: ``'left'`` - ``'left'`` or ``'right'`` as a general
preference for where zeros are located in the returned coefficients
- ``check`` - default: ``True`` - if ``True`` each item in the list ``vectors``
is checked for membership in ``self``. Set to ``False`` if you
can be certain the vectors come from the vector space.
OUTPUT:
Returns a list of vectors. The scalar entries of each vector provide
the coefficients for a linear combination of the input vectors that
will equal the zero vector in ``self``. Furthermore, the returned list
is linearly independent in the vector space over the same base field
with degree equal to the length of the list ``vectors``.
The linear independence of ``vectors`` is equivalent to the returned
list being empty, so this provides a test - see the examples below.
The returned vectors are always independent, and with ``zeros`` set to
``'left'`` they have 1's in their first non-zero entries and a qualitative
disposition to having zeros in the low-index entries. With ``zeros`` set
to ``'right'`` the situation is reversed with a qualitative disposition
for zeros in the high-index entries.
If the vectors in ``vectors`` are made the rows of a matrix `V` and
the returned vectors are made the rows of a matrix `R`, then the
matrix product `RV` is a zero matrix of the proper size. And
`R` is a matrix of full rank. This routine uses kernels of
matrices to compute these relations of linear dependence,
but handles all the conversions between sets of vectors
and matrices. If speed is important, consider working with
the appropriate matrices and kernels instead.
EXAMPLES:
We begin with two linearly independent vectors, and add three
non-trivial linear combinations to the set. We illustrate
both types of output and check a selected relation of linear
dependence. ::
sage: v1 = vector(QQ, [2, 1, -4, 3])
sage: v2 = vector(QQ, [1, 5, 2, -2])
sage: V = QQ^4
sage: V.linear_dependence([v1,v2])
[
<BLANKLINE>
]
sage: v3 = v1 + v2
sage: v4 = 3*v1 - 4*v2
sage: v5 = -v1 + 2*v2
sage: L = [v1, v2, v3, v4, v5]
sage: relations = V.linear_dependence(L, zeros='left')
sage: relations
[
(1, 0, 0, -1, -2),
(0, 1, 0, -1/2, -3/2),
(0, 0, 1, -3/2, -7/2)
]
sage: v2 + (-1/2)*v4 + (-3/2)*v5
(0, 0, 0, 0)
sage: relations = V.linear_dependence(L, zeros='right')
sage: relations
[
(-1, -1, 1, 0, 0),
(-3, 4, 0, 1, 0),
(1, -2, 0, 0, 1)
]
sage: z = sum([relations[2][i]*L[i] for i in range(len(L))])
sage: z == zero_vector(QQ, 4)
True
A linearly independent set returns an empty list,
a result that can be tested. ::
sage: v1 = vector(QQ, [0,1,-3])
sage: v2 = vector(QQ, [4,1,0])
sage: V = QQ^3
sage: relations = V.linear_dependence([v1, v2]); relations
[
<BLANKLINE>
]
sage: relations == []
True
Exact results result from exact fields. We start with three
linearly independent vectors and add in two linear combinations
to make a linearly dependent set of five vectors. ::
sage: F = FiniteField(17)
sage: v1 = vector(F, [1, 2, 3, 4, 5])
sage: v2 = vector(F, [2, 4, 8, 16, 15])
sage: v3 = vector(F, [1, 0, 0, 0, 1])
sage: (F^5).linear_dependence([v1, v2, v3]) == []
True
sage: L = [v1, v2, v3, 2*v1+v2, 3*v2+6*v3]
sage: (F^5).linear_dependence(L)
[
(1, 0, 16, 8, 3),
(0, 1, 2, 0, 11)
]
sage: v1 + 16*v3 + 8*(2*v1+v2) + 3*(3*v2+6*v3)
(0, 0, 0, 0, 0)
sage: v2 + 2*v3 + 11*(3*v2+6*v3)
(0, 0, 0, 0, 0)
sage: (F^5).linear_dependence(L, zeros='right')
[
(15, 16, 0, 1, 0),
(0, 14, 11, 0, 1)
]
TESTS:
With ``check=True`` (the default) a mismatch between vectors
and the vector space is caught. ::
sage: v1 = vector(RR, [1,2,3])
sage: v2 = vector(RR, [1,2,3,4])
sage: (RR^3).linear_dependence([v1,v2], check=True)
Traceback (most recent call last):
...
ValueError: vector (1.00000000000000, 2.00000000000000, 3.00000000000000, 4.00000000000000) is not an element of Vector space of dimension 3 over Real Field with 53 bits of precision
The ``zeros`` keyword is checked. ::
sage: (QQ^3).linear_dependence([vector(QQ,[1,2,3])], zeros='bogus')
Traceback (most recent call last):
...
ValueError: 'zeros' keyword must be 'left' or 'right', not 'bogus'
An empty input set is linearly independent, vacuously. ::
sage: (QQ^3).linear_dependence([]) == []
True
"""
if check:
for v in vectors:
if not v in self:
raise ValueError('vector %s is not an element of %s' % (v, self))
if zeros == 'left':
basis = 'echelon'
elif zeros == 'right':
basis = | |
schema
class AlterObjectFragment(AlterObjectOrFragment[so.Object_T]):
def apply(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
# AlterObjectFragment must be executed in the context
# of a parent AlterObject command.
scls = self.get_parent_op(context).scls
self.scls = cast(so.Object_T, scls)
schema = self._alter_begin(schema, context)
schema = self._alter_innards(schema, context)
schema = self._alter_finalize(schema, context)
return schema
@classmethod
def get_parent_op(
cls,
context: CommandContext,
) -> ObjectCommand[so.Object]:
op = context.current().op
assert isinstance(op, ObjectCommand)
return op
class RenameObject(AlterObjectFragment[so.Object_T]):
_delta_action = 'rename'
astnode = qlast.Rename
new_name = struct.Field(sn.Name)
def is_data_safe(self) -> bool:
# Renames are always data-safe.
return True
def get_verb(self) -> str:
return 'rename'
def get_friendly_description(
self,
*,
parent_op: Optional[Command] = None,
schema: Optional[s_schema.Schema] = None,
object: Any = None,
object_desc: Optional[str] = None,
) -> str:
object_desc = self.get_friendly_object_name_for_description(
parent_op=parent_op,
schema=schema,
object=object,
object_desc=object_desc,
)
mcls = self.get_schema_metaclass()
new_name = mcls.get_displayname_static(self.new_name)
return f"rename {object_desc} to '{new_name}'"
def _fix_referencing_expr(
self,
schema: s_schema.Schema,
cmd: ObjectCommand[so.Object],
fn: str,
context: CommandContext,
expr: s_expr.Expression,
) -> s_expr.Expression:
from edb.ir import ast as irast
# Recompile the expression with reference tracking on so that we
# can clean up the ast.
field = cmd.get_schema_metaclass().get_field(fn)
compiled = cmd.compile_expr_field(
schema, context, field, expr,
track_schema_ref_exprs=True)
assert isinstance(compiled.irast, irast.Statement)
assert compiled.irast.schema_ref_exprs is not None
# Now that the compilation is done, try to do the fixup.
new_shortname = sn.shortname_from_fullname(self.new_name)
old_shortname = sn.shortname_from_fullname(self.classname).name
for ref in compiled.irast.schema_ref_exprs.get(self.scls, []):
if isinstance(ref, qlast.Ptr):
ref = ref.ptr
assert isinstance(ref, (qlast.ObjectRef, qlast.FunctionCall)), (
f"only support object refs and func calls but got {ref}")
if isinstance(ref, qlast.FunctionCall):
ref.func = ((new_shortname.module, new_shortname.name)
if isinstance(new_shortname, sn.QualName)
else new_shortname.name)
elif (
isinstance(ref, qlast.ObjectRef)
and ref.name == old_shortname
):
ref.name = new_shortname.name
if (
isinstance(new_shortname, sn.QualName)
and new_shortname.module != "__"
):
ref.module = new_shortname.module
# say as_fragment=True as a hack to avoid renormalizing it
out = s_expr.Expression.from_ast(
compiled.qlast, schema, modaliases={}, as_fragment=True)
return out
def _alter_begin(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
scls = self.scls
context.renames[self.classname] = self.new_name
context.renamed_objs.add(scls)
vn = scls.get_verbosename(schema)
schema = self._propagate_if_expr_refs(
schema,
context,
action=f'rename {vn}',
fixer=self._fix_referencing_expr,
)
if not context.canonical:
self.set_attribute_value(
'name',
value=self.new_name,
orig_value=self.classname,
)
return super()._alter_begin(schema, context)
def _alter_innards(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
if not context.canonical:
self._canonicalize(schema, context, self.scls)
return super()._alter_innards(schema, context)
def init_rename_branch(
self,
ref: so.Object,
new_ref_name: sn.Name,
schema: s_schema.Schema,
context: CommandContext,
) -> Command:
ref_root, ref_alter, _ = ref.init_delta_branch(
schema, context, AlterObject)
ref_alter.add(
ref.init_delta_command(
schema,
RenameObject,
new_name=new_ref_name,
),
)
return ref_root
def _canonicalize(
self,
schema: s_schema.Schema,
context: CommandContext,
scls: so.Object_T,
) -> None:
mcls = self.get_schema_metaclass()
for refdict in mcls.get_refdicts():
all_refs = set(
scls.get_field_value(schema, refdict.attr).objects(schema)
)
ref: so.Object
for ref in all_refs:
ref_name = ref.get_name(schema)
quals = list(sn.quals_from_fullname(ref_name))
assert isinstance(self.new_name, sn.QualName)
quals[0] = str(self.new_name)
shortname = sn.shortname_from_fullname(ref_name)
new_ref_name = sn.QualName(
name=sn.get_specialized_name(shortname, *quals),
module=self.new_name.module,
)
self.add(self.init_rename_branch(
ref,
new_ref_name,
schema=schema,
context=context,
))
def _get_ast(
self,
schema: s_schema.Schema,
context: CommandContext,
*,
parent_node: Optional[qlast.DDLOperation] = None,
) -> Optional[qlast.DDLOperation]:
astnode = self._get_ast_node(schema, context)
ref = self._deparse_name(schema, context, self.new_name)
ref.itemclass = None
orig_ref = self._deparse_name(schema, context, self.classname)
# Ha, ha! Do it recursively to force any renames in children!
self._log_all_renames(context)
if (orig_ref.module, orig_ref.name) != (ref.module, ref.name):
return astnode(new_name=ref) # type: ignore
else:
return None
@classmethod
def _cmd_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: CommandContext,
) -> RenameObject[so.Object_T]:
parent_ctx = context.current()
parent_op = parent_ctx.op
assert isinstance(parent_op, ObjectCommand)
parent_class = parent_op.get_schema_metaclass()
rename_class = get_object_command_class_or_die(
RenameObject, parent_class)
return rename_class._rename_cmd_from_ast(schema, astnode, context)
@classmethod
def _rename_cmd_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: CommandContext,
) -> RenameObject[so.Object_T]:
assert isinstance(astnode, qlast.Rename)
parent_ctx = context.current()
parent_op = parent_ctx.op
assert isinstance(parent_op, ObjectCommand)
parent_class = parent_op.get_schema_metaclass()
rename_class = get_object_command_class_or_die(
RenameObject, parent_class)
new_name = cls._classname_from_ast(schema, astnode, context)
# Populate the early_renames map of the context as we go, since
# in-flight renames will affect the generated names of later
# operations.
context.early_renames[parent_op.classname] = new_name
return rename_class(
classname=parent_op.classname,
new_name=new_name,
)
class AlterObject(AlterObjectOrFragment[so.Object_T], Generic[so.Object_T]):
_delta_action = 'alter'
#: If True, apply the command only if the object exists.
if_exists = struct.Field(bool, default=False)
#: If True, only apply changes to properties, not "real" schema changes
metadata_only = struct.Field(bool, default=False)
def get_verb(self) -> str:
return 'alter'
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: CommandContext,
) -> Command:
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
assert isinstance(cmd, AlterObject)
if getattr(astnode, 'abstract', False):
cmd.set_attribute_value('abstract', True)
return cmd
def _get_ast(
self,
schema: s_schema.Schema,
context: CommandContext,
*,
parent_node: Optional[qlast.DDLOperation] = None,
) -> Optional[qlast.DDLOperation]:
node = super()._get_ast(schema, context, parent_node=parent_node)
if (node is not None and hasattr(node, 'commands') and
not node.commands):
# Alter node without subcommands. Occurs when all
# subcommands have been filtered out of DDL stream,
# so filter it out as well.
node = None
return node
def canonicalize_alter_from_external_ref(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> None:
"""Canonicalize an ALTER command triggered by a modification of a
an object referred to by an expression in this object."""
pass
def apply(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
if not context.canonical and self.if_exists:
scls = self.get_object(schema, context, default=None)
if scls is None:
context.current().op.discard(self)
return schema
else:
scls = self.get_object(schema, context)
self.scls = scls
with self.new_context(schema, context, scls):
schema = self._alter_begin(schema, context)
schema = self._alter_innards(schema, context)
schema = self._alter_finalize(schema, context)
return schema
class DeleteObject(ObjectCommand[so.Object_T], Generic[so.Object_T]):
_delta_action = 'delete'
#: If True, apply the command only if the object exists.
if_exists = struct.Field(bool, default=False)
#: If True, apply the command only if the object has no referrers
#: in the schema.
if_unused = struct.Field(bool, default=False)
def get_verb(self) -> str:
return 'drop'
def is_data_safe(self) -> bool:
# Deletions are only safe if the entire object class
# has been declared as data-safe.
return self.get_schema_metaclass()._data_safe
def _delete_begin(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
from . import ordering
self._validate_legal_command(schema, context)
if not context.canonical:
schema = self.populate_ddl_identity(schema, context)
schema = self.canonicalize_attributes(schema, context)
if not context.get_value(('delcanon', self)):
commands = self._canonicalize(schema, context, self.scls)
root = DeltaRoot()
root.update(commands)
root = ordering.linearize_delta(root, schema, schema)
self.update(root.get_subcommands())
return schema
def _canonicalize(
self,
schema: s_schema.Schema,
context: CommandContext,
scls: so.Object_T,
) -> List[Command]:
mcls = self.get_schema_metaclass()
commands: List[Command] = []
for refdict in mcls.get_refdicts():
deleted_refs = set()
all_refs = set(
scls.get_field_value(schema, refdict.attr).objects(schema)
)
refcmds = cast(
Tuple[ObjectCommand[so.Object], ...],
self.get_subcommands(metaclass=refdict.ref_cls),
)
for op in refcmds:
deleted_ref: so.Object = schema.get(op.classname)
deleted_refs.add(deleted_ref)
# Add implicit Delete commands for any local refs not
# deleted explicitly.
for ref in all_refs - deleted_refs:
op = ref.init_delta_command(schema, DeleteObject)
assert isinstance(op, DeleteObject)
subcmds = op._canonicalize(schema, context, ref)
op.update(subcmds)
commands.append(op)
# Record the fact that DeleteObject._canonicalize
# was called on this object to guard against possible
# duplicate calls.
context.store_value(('delcanon', self), True)
return commands
def _delete_innards(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
for op in self.get_subcommands(metaclass=so.Object):
schema = op.apply(schema, context=context)
return schema
def _delete_finalize(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
ref_strs = []
if not context.canonical and not context.disable_dep_verification:
refs = schema.get_referrers(self.scls)
ctx = context.current()
assert ctx is not None
orig_schema = ctx.original_schema
if refs:
for ref in refs:
if (not context.is_deleting(ref)
and ref.is_blocking_ref(orig_schema, self.scls)):
ref_strs.append(
ref.get_verbosename(orig_schema, with_parent=True))
if ref_strs:
vn = self.scls.get_verbosename(orig_schema, with_parent=True)
dn = self.scls.get_displayname(orig_schema)
detail = '; '.join(f'{ref_str} depends on {dn}'
for ref_str in ref_strs)
raise errors.SchemaError(
f'cannot drop {vn} because '
f'other objects in the schema depend on it',
details=detail,
)
schema = schema.delete(self.scls)
return schema
def _has_outside_references(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> bool:
# Check if the subject of this command has any outside references
# minus any current expiring refs and minus structural child refs
# (e.g. source backref in pointers of an object type).
refs = [
ref
for ref in schema.get_referrers(self.scls)
if not ref.is_parent_ref(schema, self.scls)
and not context.is_deleting(ref)
]
return bool(refs)
def apply(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
if self.if_exists:
scls = self.get_object(schema, context, default=None)
if scls is None:
context.current().op.discard(self)
return schema
else:
scls = self.get_object(schema, context)
self.scls = scls
with self.new_context(schema, context, scls):
if (
not self.canonical
and self.if_unused
and self._has_outside_references(schema, context)
):
parent_ctx = context.parent()
if parent_ctx is not None:
parent_ctx.op.discard(self)
return schema
schema = self._delete_begin(schema, context)
schema = self._delete_innards(schema, context)
schema = self._delete_finalize(schema, context)
return | |
'draft' and payment.payment_method_code in self._get_method_codes_needing_bank_account()
@api.depends('partner_id')
def _compute_partner_bank_id(self):
''' The default partner_bank_id will be the first available on the partner. '''
for pay in self:
available_partner_bank_accounts = pay.partner_id.bank_ids.filtered(lambda x: x.company_id in (False, pay.company_id))
if available_partner_bank_accounts:
if pay.partner_bank_id not in available_partner_bank_accounts:
pay.partner_bank_id = available_partner_bank_accounts[0]._origin
else:
pay.partner_bank_id = False
@api.depends('partner_id', 'destination_account_id', 'journal_id')
def _compute_is_internal_transfer(self):
for payment in self:
is_partner_ok = payment.partner_id == payment.journal_id.company_id.partner_id
is_account_ok = payment.destination_account_id and payment.destination_account_id == payment.journal_id.company_id.transfer_account_id
payment.is_internal_transfer = is_partner_ok and is_account_ok
@api.depends('payment_type', 'journal_id')
def _compute_payment_method_id(self):
''' Compute the 'payment_method_id' field.
This field is not computed in '_compute_payment_method_fields' because it's a stored editable one.
'''
for pay in self:
if pay.payment_type == 'inbound':
available_payment_methods = pay.journal_id.inbound_payment_method_ids
else:
available_payment_methods = pay.journal_id.outbound_payment_method_ids
# Select the first available one by default.
if pay.payment_method_id in available_payment_methods:
pay.payment_method_id = pay.payment_method_id
elif available_payment_methods:
pay.payment_method_id = available_payment_methods[0]._origin
else:
pay.payment_method_id = False
@api.depends('payment_type',
'journal_id.inbound_payment_method_ids',
'journal_id.outbound_payment_method_ids')
def _compute_payment_method_fields(self):
for pay in self:
if pay.payment_type == 'inbound':
pay.available_payment_method_ids = pay.journal_id.inbound_payment_method_ids
else:
pay.available_payment_method_ids = pay.journal_id.outbound_payment_method_ids
pay.hide_payment_method = len(pay.available_payment_method_ids) == 1 and pay.available_payment_method_ids.code == 'manual'
@api.depends('journal_id')
def _compute_currency_id(self):
for pay in self:
pay.currency_id = pay.journal_id.currency_id or pay.journal_id.company_id.currency_id
@api.depends('is_internal_transfer')
def _compute_partner_id(self):
for pay in self:
if pay.is_internal_transfer:
pay.partner_id = pay.journal_id.company_id.partner_id
elif pay.partner_id == pay.journal_id.company_id.partner_id:
pay.partner_id = False
else:
pay.partner_id = pay.partner_id
@api.depends('journal_id', 'partner_id', 'partner_type', 'is_internal_transfer')
def _compute_destination_account_id(self):
self.destination_account_id = False
for pay in self:
if pay.is_internal_transfer:
pay.destination_account_id = pay.journal_id.company_id.transfer_account_id
elif pay.partner_type == 'customer':
# Receive money from invoice or send money to refund it.
if pay.partner_id:
pay.destination_account_id = pay.partner_id.with_company(pay.company_id).property_account_receivable_id
else:
pay.destination_account_id = self.env['account.account'].search([
('company_id', '=', pay.company_id.id),
('internal_type', '=', 'receivable'),
('deprecated', '=', False),
], limit=1)
elif pay.partner_type == 'supplier':
# Send money to pay a bill or receive money to refund it.
if pay.partner_id:
pay.destination_account_id = pay.partner_id.with_company(pay.company_id).property_account_payable_id
else:
pay.destination_account_id = self.env['account.account'].search([
('company_id', '=', pay.company_id.id),
('internal_type', '=', 'payable'),
('deprecated', '=', False),
], limit=1)
@api.depends('partner_bank_id', 'amount', 'ref', 'currency_id', 'journal_id', 'move_id.state',
'payment_method_id', 'payment_type')
def _compute_qr_code(self):
for pay in self:
if pay.state in ('draft', 'posted') \
and pay.partner_bank_id \
and pay.payment_method_id.code == 'manual' \
and pay.payment_type == 'outbound' \
and pay.currency_id:
if pay.partner_bank_id:
qr_code = pay.partner_bank_id.build_qr_code_url(pay.amount, pay.ref, pay.ref, pay.currency_id, pay.partner_id)
else:
qr_code = None
if qr_code:
pay.qr_code = '''
<br/>
<img class="border border-dark rounded" src="{qr_code}"/>
<br/>
<strong class="text-center">{txt}</strong>
'''.format(txt = _('Scan me with your banking app.'),
qr_code = qr_code)
continue
pay.qr_code = None
@api.depends('move_id.line_ids.matched_debit_ids', 'move_id.line_ids.matched_credit_ids')
def _compute_stat_buttons_from_reconciliation(self):
''' Retrieve the invoices reconciled to the payments through the reconciliation (account.partial.reconcile). '''
stored_payments = self.filtered('id')
if not stored_payments:
self.reconciled_invoice_ids = False
self.reconciled_invoices_count = 0
self.reconciled_bill_ids = False
self.reconciled_bills_count = 0
self.reconciled_statement_ids = False
self.reconciled_statements_count = 0
return
self.env['account.move'].flush()
self.env['account.move.line'].flush()
self.env['account.partial.reconcile'].flush()
self._cr.execute('''
SELECT
payment.id,
ARRAY_AGG(DISTINCT invoice.id) AS invoice_ids,
invoice.move_type
FROM account_payment payment
JOIN account_move move ON move.id = payment.move_id
JOIN account_move_line line ON line.move_id = move.id
JOIN account_partial_reconcile part ON
part.debit_move_id = line.id
OR
part.credit_move_id = line.id
JOIN account_move_line counterpart_line ON
part.debit_move_id = counterpart_line.id
OR
part.credit_move_id = counterpart_line.id
JOIN account_move invoice ON invoice.id = counterpart_line.move_id
JOIN account_account account ON account.id = line.account_id
WHERE account.internal_type IN ('receivable', 'payable')
AND payment.id IN %(payment_ids)s
AND line.id != counterpart_line.id
AND invoice.move_type in ('out_invoice', 'out_refund', 'in_invoice', 'in_refund', 'out_receipt', 'in_receipt')
GROUP BY payment.id, invoice.move_type
''', {
'payment_ids': tuple(stored_payments.ids)
})
query_res = self._cr.dictfetchall()
self.reconciled_invoice_ids = self.reconciled_invoices_count = False
self.reconciled_bill_ids = self.reconciled_bills_count = False
for res in query_res:
pay = self.browse(res['id'])
if res['move_type'] in self.env['account.move'].get_sale_types(True):
pay.reconciled_invoice_ids += self.env['account.move'].browse(res.get('invoice_ids', []))
pay.reconciled_invoices_count = len(res.get('invoice_ids', []))
else:
pay.reconciled_bill_ids += self.env['account.move'].browse(res.get('invoice_ids', []))
pay.reconciled_bills_count = len(res.get('invoice_ids', []))
self._cr.execute('''
SELECT
payment.id,
ARRAY_AGG(DISTINCT counterpart_line.statement_id) AS statement_ids
FROM account_payment payment
JOIN account_move move ON move.id = payment.move_id
JOIN account_journal journal ON journal.id = move.journal_id
JOIN account_move_line line ON line.move_id = move.id
JOIN account_account account ON account.id = line.account_id
JOIN account_partial_reconcile part ON
part.debit_move_id = line.id
OR
part.credit_move_id = line.id
JOIN account_move_line counterpart_line ON
part.debit_move_id = counterpart_line.id
OR
part.credit_move_id = counterpart_line.id
WHERE (account.id = journal.payment_debit_account_id OR account.id = journal.payment_credit_account_id)
AND payment.id IN %(payment_ids)s
AND line.id != counterpart_line.id
AND counterpart_line.statement_id IS NOT NULL
GROUP BY payment.id
''', {
'payment_ids': tuple(stored_payments.ids)
})
query_res = dict((payment_id, statement_ids) for payment_id, statement_ids in self._cr.fetchall())
for pay in self:
statement_ids = query_res.get(pay.id, [])
pay.reconciled_statement_ids = [(6, 0, statement_ids)]
pay.reconciled_statements_count = len(statement_ids)
# -------------------------------------------------------------------------
# ONCHANGE METHODS
# -------------------------------------------------------------------------
@api.onchange('posted_before', 'state', 'journal_id', 'date')
def _onchange_journal_date(self):
# Before the record is created, the move_id doesn't exist yet, and the name will not be
# recomputed correctly if we change the journal or the date, leading to inconsitencies
if not self.move_id:
self.name = False
# -------------------------------------------------------------------------
# CONSTRAINT METHODS
# -------------------------------------------------------------------------
@api.constrains('payment_method_id')
def _check_payment_method_id(self):
''' Ensure the 'payment_method_id' field is not null.
Can't be done using the regular 'required=True' because the field is a computed editable stored one.
'''
for pay in self:
if not pay.payment_method_id:
raise ValidationError(_("Please define a payment method on your payment."))
# -------------------------------------------------------------------------
# LOW-LEVEL METHODS
# -------------------------------------------------------------------------
@api.model_create_multi
def create(self, vals_list):
# OVERRIDE
write_off_line_vals_list = []
for vals in vals_list:
# Hack to add a custom write-off line.
write_off_line_vals_list.append(vals.pop('write_off_line_vals', None))
# Force the move_type to avoid inconsistency with residual 'default_move_type' inside the context.
vals['move_type'] = 'entry'
# Force the computation of 'journal_id' since this field is set on account.move but must have the
# bank/cash type.
if 'journal_id' not in vals:
vals['journal_id'] = self._get_default_journal().id
# Since 'currency_id' is a computed editable field, it will be computed later.
# Prevent the account.move to call the _get_default_currency method that could raise
# the 'Please define an accounting miscellaneous journal in your company' error.
if 'currency_id' not in vals:
journal = self.env['account.journal'].browse(vals['journal_id'])
vals['currency_id'] = journal.currency_id.id or journal.company_id.currency_id.id
payments = super().create(vals_list)
for i, pay in enumerate(payments):
write_off_line_vals = write_off_line_vals_list[i]
# Write payment_id on the journal entry plus the fields being stored in both models but having the same
# name, e.g. partner_bank_id. The ORM is currently not able to perform such synchronization and make things
# more difficult by creating related fields on the fly to handle the _inherits.
# Then, when partner_bank_id is in vals, the key is consumed by account.payment but is never written on
# account.move.
to_write = {'payment_id': pay.id}
for k, v in vals_list[i].items():
if k in self._fields and self._fields[k].store and k in pay.move_id._fields and pay.move_id._fields[k].store:
to_write[k] = v
if 'line_ids' not in vals_list[i]:
to_write['line_ids'] = [(0, 0, line_vals) for line_vals in pay._prepare_move_line_default_vals(write_off_line_vals=write_off_line_vals)]
pay.move_id.write(to_write)
return payments
def write(self, vals):
# OVERRIDE
res = super().write(vals)
self._synchronize_to_moves(set(vals.keys()))
return res
def unlink(self):
# OVERRIDE to unlink the inherited account.move (move_id field) as well.
moves = self.with_context(force_delete=True).move_id
res = super().unlink()
moves.unlink()
return res
@api.depends('move_id.name')
def name_get(self):
return [(payment.id, payment.move_id.name or _('Draft Payment')) for payment in self]
# -------------------------------------------------------------------------
# SYNCHRONIZATION account.payment <-> account.move
# -------------------------------------------------------------------------
def _synchronize_from_moves(self, changed_fields):
''' Update the account.payment regarding its related account.move.
Also, check both models are still consistent.
:param changed_fields: A set containing all modified fields on account.move.
'''
if self._context.get('skip_account_move_synchronization'):
return
for pay in self.with_context(skip_account_move_synchronization=True):
# After the migration to 14.0, the journal entry could be shared between the account.payment and the
# account.bank.statement.line. In that case, the synchronization will only be made with the statement line.
if pay.move_id.statement_line_id:
continue
move = pay.move_id
move_vals_to_write = {}
payment_vals_to_write = {}
if 'journal_id' in changed_fields:
if pay.journal_id.type not in ('bank', 'cash'):
raise UserError(_("A payment must always belongs to a bank or cash journal."))
if 'line_ids' in changed_fields:
all_lines = move.line_ids
liquidity_lines, counterpart_lines, writeoff_lines = pay._seek_for_lines()
if len(liquidity_lines) != 1 or len(counterpart_lines) != 1:
raise UserError(_(
"The journal entry %s reached an invalid state relative to its payment.\n"
"To be consistent, the journal entry must always contains:\n"
"- one journal item involving the outstanding payment/receipts account.\n"
"- one journal item involving a receivable/payable account.\n"
"- optional journal items, all sharing the same account.\n\n"
) % move.display_name)
if writeoff_lines and len(writeoff_lines.account_id) != 1:
raise UserError(_(
"The journal entry %s reached an invalid state relative to its payment.\n"
"To be consistent, all the write-off journal items must share the same account."
) % move.display_name)
if any(line.currency_id != all_lines[0].currency_id for line in all_lines):
raise UserError(_(
| |
<filename>devstack/tools/nsxv3_cleanup.py
# Copyright 2015 VMware Inc
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import optparse
from oslo_serialization import jsonutils
import requests
import six.moves.urllib.parse as urlparse
import sqlalchemy as sa
from vmware_nsx.db import nsx_models
requests.packages.urllib3.disable_warnings()
class NeutronNsxDB(object):
def __init__(self, db_connection):
super(NeutronNsxDB, self).__init__()
engine = sa.create_engine(db_connection)
self.session = sa.orm.session.sessionmaker()(bind=engine)
def query_all(self, column, model):
return list(set([r[column] for r in self.session.query(model).all()]))
def get_logical_ports(self):
return self.query_all('nsx_port_id',
nsx_models.NeutronNsxPortMapping)
def get_nsgroups(self):
return self.query_all('nsx_id',
nsx_models.NeutronNsxSecurityGroupMapping)
def get_firewall_sections(self):
return self.query_all('nsx_id',
nsx_models.NeutronNsxFirewallSectionMapping)
def get_logical_routers(self):
return self.query_all('nsx_id',
nsx_models.NeutronNsxRouterMapping)
def get_logical_switches(self):
return self.query_all('nsx_id',
nsx_models.NeutronNsxNetworkMapping)
def get_logical_dhcp_servers(self):
return self.query_all('nsx_service_id',
nsx_models.NeutronNsxServiceBinding)
class NSXClient(object):
"""Base NSX REST client"""
API_VERSION = "v1"
NULL_CURSOR_PREFIX = '0000'
def __init__(self, host, username, password, db_connection):
self.host = host
self.username = username
self.password = password
self.version = None
self.endpoint = None
self.content_type = "application/json"
self.accept_type = "application/json"
self.verify = False
self.secure = True
self.interface = "json"
self.url = None
self.headers = None
self.api_version = NSXClient.API_VERSION
self.neutron_db = (NeutronNsxDB(db_connection)
if db_connection else None)
self.__set_headers()
def __set_endpoint(self, endpoint):
self.endpoint = endpoint
def get_endpoint(self):
return self.endpoint
def __set_content_type(self, content_type):
self.content_type = content_type
def get_content_type(self):
return self.content_type
def __set_accept_type(self, accept_type):
self.accept_type = accept_type
def get_accept_type(self):
return self.accept_type
def __set_api_version(self, api_version):
self.api_version = api_version
def get_api_version(self):
return self.api
def __set_url(self, api=None, secure=None, host=None, endpoint=None):
api = self.api_version if api is None else api
secure = self.secure if secure is None else secure
host = self.host if host is None else host
endpoint = self.endpoint if endpoint is None else endpoint
http_type = 'https' if secure else 'http'
self.url = '%s://%s/api/%s%s' % (http_type, host, api, endpoint)
def get_url(self):
return self.url
def __set_headers(self, content=None, accept=None):
content_type = self.content_type if content is None else content
accept_type = self.accept_type if accept is None else accept
auth_cred = self.username + ":" + self.password
auth = base64.b64encode(auth_cred)
headers = {}
headers['Authorization'] = "Basic %s" % auth
headers['Content-Type'] = content_type
headers['Accept'] = accept_type
# allow admin user to delete entities created
# under openstack principal identity
headers['X-Allow-Overwrite'] = 'true'
self.headers = headers
def get(self, endpoint=None, params=None):
"""
Basic query method for json API request
"""
self.__set_url(endpoint=endpoint)
response = requests.get(self.url, headers=self.headers,
verify=self.verify, params=params)
return response
def get_list_results(self, endpoint=None, params=None):
"""
Query method for json API get for list (takes care of pagination)
"""
self.__set_url(endpoint=endpoint)
response = requests.get(self.url, headers=self.headers,
verify=self.verify, params=params).json()
results = response['results']
missing = response['result_count'] - len(results)
cursor = response.get('cursor', self.NULL_CURSOR_PREFIX)
op = '&' if urlparse.urlparse(self.url).query else '?'
url = self.url + op + 'cursor='
# we will enter the loop if response does not fit into single page
while missing > 0 and not cursor.startswith(self.NULL_CURSOR_PREFIX):
response = requests.get(url + cursor, headers=self.headers,
verify=self.verify, params=params).json()
cursor = response.get('cursor', self.NULL_CURSOR_PREFIX)
missing -= len(response['results'])
results += response['results']
return results
def put(self, endpoint=None, body=None):
"""
Basic put API method on endpoint
"""
self.__set_url(endpoint=endpoint)
response = requests.put(self.url, headers=self.headers,
verify=self.verify, data=jsonutils.dumps(body))
return response
def delete(self, endpoint=None, params=None):
"""
Basic delete API method on endpoint
"""
self.__set_url(endpoint=endpoint)
response = requests.delete(self.url, headers=self.headers,
verify=self.verify, params=params)
return response
def post(self, endpoint=None, body=None):
"""
Basic post API method on endpoint
"""
self.__set_url(endpoint=endpoint)
response = requests.post(self.url, headers=self.headers,
verify=self.verify,
data=jsonutils.dumps(body))
return response
def get_transport_zones(self):
"""
Retrieve all transport zones
"""
return self.get_list_results(endpoint="/transport-zones")
def get_logical_ports(self):
"""
Retrieve all logical ports on NSX backend
"""
return self.get_list_results(endpoint="/logical-ports")
def get_os_logical_ports(self):
"""
Retrieve all logical ports created from OpenStack
"""
lports = self.get_os_resources(
self.get_logical_ports())
if self.neutron_db:
db_lports = self.neutron_db.get_logical_ports()
lports = [lp for lp in lports if lp['id'] in db_lports]
return lports
def update_logical_port_attachment(self, lports):
"""
In order to delete logical ports, we need to detach
the VIF attachment on the ports first.
"""
for p in lports:
p['attachment'] = None
endpoint = "/logical-ports/%s" % p['id']
response = self.put(endpoint=endpoint, body=p)
if response.status_code != requests.codes.ok:
print("ERROR: Failed to update lport %s" % p['id'])
def _remove_port_from_exclude_list(self, p):
try:
endpoint = ('/firewall/excludelist?action=remove_member&'
'object_id=%s' % p['id'])
self.post(endpoint)
except Exception:
pass
def _cleanup_logical_ports(self, lports):
# logical port vif detachment
self.update_logical_port_attachment(lports)
for p in lports:
# delete this port from the exclude list (if in it)
self._remove_port_from_exclude_list(p)
endpoint = '/logical-ports/%s' % p['id']
response = self.delete(endpoint=endpoint)
if response.status_code == requests.codes.ok:
print("Successfully deleted logical port %s" % p['id'])
else:
print("ERROR: Failed to delete lport %s, response code %s" %
(p['id'], response.status_code))
def cleanup_os_logical_ports(self):
"""
Delete all logical ports created by OpenStack
"""
os_lports = self.get_os_logical_ports()
print("Number of OS Logical Ports to be deleted: %s" % len(os_lports))
self._cleanup_logical_ports(os_lports)
def get_os_resources(self, resources):
"""
Get all logical resources created by OpenStack
"""
os_resources = [r for r in resources if 'tags' in r
for tag in r['tags']
if 'os-api-version' in tag.values()]
return os_resources
def get_logical_switches(self):
"""
Retrieve all logical switches on NSX backend
"""
return self.get_list_results(endpoint="/logical-switches")
def get_os_logical_switches(self):
"""
Retrieve all logical switches created from OpenStack
"""
lswitches = self.get_os_resources(
self.get_logical_switches())
if self.neutron_db:
db_lswitches = self.neutron_db.get_logical_switches()
lswitches = [ls for ls in lswitches
if ls['id'] in db_lswitches]
return lswitches
def get_lswitch_ports(self, ls_id):
"""
Return all the logical ports that belong to this lswitch
"""
lports = self.get_logical_ports()
return [p for p in lports if p['logical_switch_id'] == ls_id]
def cleanup_os_logical_switches(self):
"""
Delete all logical switches created from OpenStack
"""
lswitches = self.get_os_logical_switches()
print("Number of OS Logical Switches to be deleted: %s" %
len(lswitches))
for ls in lswitches:
# Check if there are still ports on switch and blow them away
# An example here is a metadata proxy port (this is not stored
# in the DB so we are unable to delete it when reading ports
# from the DB)
lports = self.get_lswitch_ports(ls['id'])
if lports:
print("Number of orphan OS Logical Ports to be "
"deleted: %s" % len(lports))
self._cleanup_logical_ports(lports)
endpoint = '/logical-switches/%s' % ls['id']
response = self.delete(endpoint=endpoint)
if response.status_code == requests.codes.ok:
print("Successfully deleted logical switch %s-%s" %
(ls['display_name'], ls['id']))
else:
print("Failed to delete lswitch %s-%s, and response is %s" %
(ls['display_name'], ls['id'], response.status_code))
def get_firewall_sections(self):
"""
Retrieve all firewall sections
"""
return self.get_list_results(endpoint="/firewall/sections")
def get_os_firewall_sections(self):
"""
Retrieve all firewall sections created from OpenStack
"""
fw_sections = self.get_os_resources(
self.get_firewall_sections())
if self.neutron_db:
db_sections = self.neutron_db.get_firewall_sections()
fw_sections = [fws for fws in fw_sections
if fws['id'] in db_sections]
return fw_sections
def cleanup_os_firewall_sections(self):
"""
Cleanup all firewall sections created from OpenStack
"""
fw_sections = self.get_os_firewall_sections()
print("Number of OS Firewall Sections to be deleted: %s" %
len(fw_sections))
for fw in fw_sections:
endpoint = "/firewall/sections/%s?cascade=true" % fw['id']
response = self.delete(endpoint=endpoint)
if response.status_code == requests.codes.ok:
print("Successfully deleted firewall section %s" %
fw['display_name'])
else:
print("Failed to delete firewall section %s" %
fw['display_name'])
def get_ns_groups(self):
"""
Retrieve all NSGroups on NSX backend
"""
ns_groups = self.get_os_resources(
self.get_list_results(endpoint="/ns-groups"))
if self.neutron_db:
db_nsgroups = self.neutron_db.get_nsgroups()
ns_groups = [nsg for nsg in ns_groups
if nsg['id'] in db_nsgroups]
return ns_groups
def cleanup_os_ns_groups(self):
"""
Cleanup all NSGroups created from OpenStack plugin
"""
ns_groups = self.get_ns_groups()
print("Number of OS NSGroups to be deleted: %s" % len(ns_groups))
for nsg in ns_groups:
endpoint = "/ns-groups/%s?force=true" % nsg['id']
response = self.delete(endpoint=endpoint)
if response.status_code == requests.codes.ok:
print("Successfully deleted NSGroup: %s" % nsg['display_name'])
else:
print("Failed to delete NSGroup: %s" % nsg['display_name'])
def get_switching_profiles(self):
"""
Retrieve all Switching Profiles on NSX backend
"""
return self.get_list_results(endpoint="/switching-profiles")
def get_os_switching_profiles(self):
"""
Retrieve all Switching Profiles created from OpenStack
"""
sw_profiles = self.get_os_resources(
self.get_switching_profiles())
if self.neutron_db:
sw_profiles = []
return sw_profiles
def cleanup_os_switching_profiles(self):
"""
Cleanup all Switching Profiles created from OpenStack plugin
"""
sw_profiles = self.get_os_switching_profiles()
print("Number of OS SwitchingProfiles to be deleted: %s" %
len(sw_profiles))
for swp in sw_profiles:
endpoint = "/switching-profiles/%s" % swp['id']
response = self.delete(endpoint=endpoint)
if response.status_code == requests.codes.ok:
print("Successfully deleted Switching Profile: %s" %
swp['display_name'])
else:
print("Failed to delete Switching Profile: %s" %
swp['display_name'])
def get_logical_routers(self, tier=None):
"""
Retrieve | |
parse_source_value(self._raw_data.get('renderamt', 255))
@property
def rendercolor(self):
return parse_int_vector(self._raw_data.get('rendercolor', "255 255 255"))
@property
def rendermode(self):
return self._raw_data.get('rendermode', "5")
class func_reflective_glass(func_brush):
pass
class env_particle_performance_monitor(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class npc_puppet(Studiomodel, BaseNPC, Parentname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def animationtarget(self):
return self._raw_data.get('animationtarget', "")
@property
def attachmentname(self):
return self._raw_data.get('attachmentname', "")
class point_gamestats_counter(EnableDisable, Origin, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def Name(self):
return self._raw_data.get('name', None)
class func_instance(Angles):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def targetname(self):
return self._raw_data.get('targetname', None)
@property
def file(self):
return self._raw_data.get('file', None)
@property
def fixup_style(self):
return self._raw_data.get('fixup_style', "2")
@property
def replace01(self):
return self._raw_data.get('replace01', None)
@property
def replace02(self):
return self._raw_data.get('replace02', None)
@property
def replace03(self):
return self._raw_data.get('replace03', None)
@property
def replace04(self):
return self._raw_data.get('replace04', None)
@property
def replace05(self):
return self._raw_data.get('replace05', None)
@property
def replace06(self):
return self._raw_data.get('replace06', None)
@property
def replace07(self):
return self._raw_data.get('replace07', None)
@property
def replace08(self):
return self._raw_data.get('replace08', None)
@property
def replace09(self):
return self._raw_data.get('replace09', None)
@property
def replace10(self):
return self._raw_data.get('replace10', None)
class func_instance_parms(Base):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def parm1(self):
return self._raw_data.get('parm1', None)
@property
def parm2(self):
return self._raw_data.get('parm2', None)
@property
def parm3(self):
return self._raw_data.get('parm3', None)
@property
def parm4(self):
return self._raw_data.get('parm4', None)
@property
def parm5(self):
return self._raw_data.get('parm5', None)
@property
def parm6(self):
return self._raw_data.get('parm6', None)
@property
def parm7(self):
return self._raw_data.get('parm7', None)
@property
def parm8(self):
return self._raw_data.get('parm8', None)
@property
def parm9(self):
return self._raw_data.get('parm9', None)
@property
def parm10(self):
return self._raw_data.get('parm10', None)
class func_instance_io_proxy(Base):
icon_sprite = "editor/func_instance_parms.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def targetname(self):
return self._raw_data.get('targetname', None)
class TalkNPC(BaseNPC):
@property
def UseSentence(self):
return self._raw_data.get('usesentence', None)
@property
def UnUseSentence(self):
return self._raw_data.get('unusesentence', None)
@property
def DontUseSpeechSemaphore(self):
return self._raw_data.get('dontusespeechsemaphore', "0")
class PlayerCompanion(BaseNPC):
@property
def AlwaysTransition(self):
return self._raw_data.get('alwaystransition', "No")
@property
def DontPickupWeapons(self):
return self._raw_data.get('dontpickupweapons', "No")
@property
def GameEndAlly(self):
return self._raw_data.get('gameendally', "No")
class RappelNPC(BaseNPC):
@property
def waitingtorappel(self):
return self._raw_data.get('waitingtorappel', "No")
class trigger_physics_trap(Angles, Trigger):
@property
def dissolvetype(self):
return self._raw_data.get('dissolvetype', "Energy")
class trigger_weapon_dissolve(Trigger):
@property
def emittername(self):
return self._raw_data.get('emittername', "")
class trigger_weapon_strip(Trigger):
@property
def KillWeapons(self):
return self._raw_data.get('killweapons', "No")
class npc_crow(BaseNPC):
model_ = "models/crow.mdl"
@property
def deaf(self):
return self._raw_data.get('deaf', "0")
class npc_seagull(BaseNPC):
model_ = "models/seagull.mdl"
@property
def deaf(self):
return self._raw_data.get('deaf', "0")
class npc_pigeon(BaseNPC):
model_ = "models/pigeon.mdl"
@property
def deaf(self):
return self._raw_data.get('deaf', "0")
class npc_bullseye(BaseNPC, Parentname):
icon_sprite = "editor/bullseye.vmt"
@property
def health(self):
return parse_source_value(self._raw_data.get('health', 35))
@property
def minangle(self):
return self._raw_data.get('minangle', "360")
@property
def mindist(self):
return self._raw_data.get('mindist', "0")
@property
def autoaimradius(self):
return parse_source_value(self._raw_data.get('autoaimradius', 0))
class npc_enemyfinder(BaseNPC, Parentname):
@property
def FieldOfView(self):
return self._raw_data.get('fieldofview', "0.2")
@property
def MinSearchDist(self):
return parse_source_value(self._raw_data.get('minsearchdist', 0))
@property
def MaxSearchDist(self):
return parse_source_value(self._raw_data.get('maxsearchdist', 2048))
@property
def freepass_timetotrigger(self):
return parse_source_value(self._raw_data.get('freepass_timetotrigger', 0))
@property
def freepass_duration(self):
return parse_source_value(self._raw_data.get('freepass_duration', 0))
@property
def freepass_movetolerance(self):
return parse_source_value(self._raw_data.get('freepass_movetolerance', 120))
@property
def freepass_refillrate(self):
return parse_source_value(self._raw_data.get('freepass_refillrate', 0.5))
@property
def freepass_peektime(self):
return parse_source_value(self._raw_data.get('freepass_peektime', 0))
@property
def StartOn(self):
return self._raw_data.get('starton', "1")
class env_gunfire(Targetname, Parentname, EnableDisable):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def target(self):
return self._raw_data.get('target', "")
@property
def minburstsize(self):
return parse_source_value(self._raw_data.get('minburstsize', 2))
@property
def maxburstsize(self):
return parse_source_value(self._raw_data.get('maxburstsize', 7))
@property
def minburstdelay(self):
return parse_source_value(self._raw_data.get('minburstdelay', 2))
@property
def maxburstdelay(self):
return parse_source_value(self._raw_data.get('maxburstdelay', 5))
@property
def rateoffire(self):
return parse_source_value(self._raw_data.get('rateoffire', 10))
@property
def spread(self):
return self._raw_data.get('spread', "5")
@property
def bias(self):
return self._raw_data.get('bias', "1")
@property
def collisions(self):
return self._raw_data.get('collisions', "0")
@property
def shootsound(self):
return self._raw_data.get('shootsound', "Weapon_AR2.NPC_Single")
@property
def tracertype(self):
return self._raw_data.get('tracertype', "AR2TRACER")
class ai_goal_operator(Targetname, EnableDisable):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def actor(self):
return self._raw_data.get('actor', "")
@property
def target(self):
return self._raw_data.get('target', "")
@property
def contexttarget(self):
return self._raw_data.get('contexttarget', "")
@property
def state(self):
return self._raw_data.get('state', "0")
@property
def moveto(self):
return self._raw_data.get('moveto', "1")
class info_darknessmode_lightsource(Targetname, EnableDisable):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def LightRadius(self):
return parse_source_value(self._raw_data.get('lightradius', 256.0))
class monster_generic(BaseNPC):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def model(self):
return self._raw_data.get('model', None)
@property
def body(self):
return parse_source_value(self._raw_data.get('body', 0))
class generic_actor(BaseNPC, Parentname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def model(self):
return self._raw_data.get('model', None)
@property
def hull_name(self):
return self._raw_data.get('hull_name', "Human")
class cycler_actor(BaseNPC):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def model(self):
return self._raw_data.get('model', None)
@property
def Sentence(self):
return self._raw_data.get('sentence', "")
class npc_maker(BaseNPCMaker):
icon_sprite = "editor/npc_maker.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def NPCType(self):
return self._raw_data.get('npctype', None)
@property
def NPCTargetname(self):
return self._raw_data.get('npctargetname', None)
@property
def NPCSquadname(self):
return self._raw_data.get('npcsquadname', None)
@property
def NPCHintGroup(self):
return self._raw_data.get('npchintgroup', None)
@property
def additionalequipment(self):
return self._raw_data.get('additionalequipment', "0")
class player_control(Targetname):
pass
class BaseScripted(Targetname, Parentname, Angles):
@property
def m_iszEntity(self):
return self._raw_data.get('m_iszentity', None)
@property
def m_iszIdle(self):
return self._raw_data.get('m_iszidle', "")
@property
def m_iszEntry(self):
return self._raw_data.get('m_iszentry', "")
@property
def m_iszPlay(self):
return self._raw_data.get('m_iszplay', "")
@property
def m_iszPostIdle(self):
return self._raw_data.get('m_iszpostidle', "")
@property
def m_iszCustomMove(self):
return self._raw_data.get('m_iszcustommove', "")
@property
def m_bLoopActionSequence(self):
return self._raw_data.get('m_bloopactionsequence', "0")
@property
def m_bNoBlendedMovement(self):
return self._raw_data.get('m_bnoblendedmovement', "0")
@property
def m_bSynchPostIdles(self):
return self._raw_data.get('m_bsynchpostidles', "0")
@property
def m_flRadius(self):
return parse_source_value(self._raw_data.get('m_flradius', 0))
@property
def m_flRepeat(self):
return parse_source_value(self._raw_data.get('m_flrepeat', 0))
@property
def m_fMoveTo(self):
return self._raw_data.get('m_fmoveto', "1")
@property
def m_iszNextScript(self):
return self._raw_data.get('m_isznextscript', None)
@property
def m_bIgnoreGravity(self):
return self._raw_data.get('m_bignoregravity', "0")
@property
def m_bDisableNPCCollisions(self):
return self._raw_data.get('m_bdisablenpccollisions', "0")
class scripted_sentence(Targetname):
icon_sprite = "editor/scripted_sentence.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def sentence(self):
return self._raw_data.get('sentence', "")
@property
def entity(self):
return self._raw_data.get('entity', None)
@property
def delay(self):
return self._raw_data.get('delay', "0")
@property
def radius(self):
return parse_source_value(self._raw_data.get('radius', 512))
@property
def refire(self):
return self._raw_data.get('refire', "3")
@property
def listener(self):
return self._raw_data.get('listener', None)
@property
def volume(self):
return self._raw_data.get('volume', "10")
@property
def attenuation(self):
return self._raw_data.get('attenuation', "0")
class scripted_target(Targetname, Parentname):
icon_sprite = "editor/info_target.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def StartDisabled(self):
return self._raw_data.get('startdisabled', "1")
@property
def m_iszEntity(self):
return self._raw_data.get('m_iszentity', None)
@property
def m_flRadius(self):
return parse_source_value(self._raw_data.get('m_flradius', 0))
@property
def MoveSpeed(self):
return parse_source_value(self._raw_data.get('movespeed', 5))
@property
def PauseDuration(self):
return parse_source_value(self._raw_data.get('pauseduration', 0))
@property
def EffectDuration(self):
return parse_source_value(self._raw_data.get('effectduration', 2))
@property
def target(self):
return self._raw_data.get('target', None)
class ai_relationship(Targetname):
icon_sprite = "editor/ai_relationship.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def subject(self):
return self._raw_data.get('subject', "")
@property
def target(self):
return self._raw_data.get('target', "")
@property
def disposition(self):
return self._raw_data.get('disposition', "3")
@property
def radius(self):
return parse_source_value(self._raw_data.get('radius', 0))
@property
def rank(self):
return parse_source_value(self._raw_data.get('rank', 0))
@property
def StartActive(self):
return self._raw_data.get('startactive', "0")
@property
def Reciprocal(self):
return self._raw_data.get('reciprocal', "0")
class ai_ally_manager(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def maxallies(self):
return parse_source_value(self._raw_data.get('maxallies', 5))
@property
def maxmedics(self):
return parse_source_value(self._raw_data.get('maxmedics', 1))
class LeadGoalBase(Targetname):
@property
def actor(self):
return self._raw_data.get('actor', None)
@property
def goal(self):
return self._raw_data.get('goal', None)
@property
def WaitPointName(self):
return self._raw_data.get('waitpointname', None)
@property
def WaitDistance(self):
return parse_source_value(self._raw_data.get('waitdistance', None))
@property
def LeadDistance(self):
return parse_source_value(self._raw_data.get('leaddistance', 64))
@property
def RetrieveDistance(self):
return parse_source_value(self._raw_data.get('retrievedistance', 96))
@property
def SuccessDistance(self):
return parse_source_value(self._raw_data.get('successdistance', 0))
@property
def Run(self):
return self._raw_data.get('run', "0")
@property
def Retrieve(self):
return self._raw_data.get('retrieve', "1")
@property
def ComingBackWaitForSpeak(self):
return self._raw_data.get('comingbackwaitforspeak', "1")
@property
def RetrieveWaitForSpeak(self):
return self._raw_data.get('retrievewaitforspeak', "1")
@property
def DontSpeakStart(self):
return self._raw_data.get('dontspeakstart', "0")
@property
def LeadDuringCombat(self):
return self._raw_data.get('leadduringcombat', "0")
@property
def GagLeader(self):
return self._raw_data.get('gagleader', "0")
@property
def AttractPlayerConceptModifier(self):
return self._raw_data.get('attractplayerconceptmodifier', "")
@property
def WaitOverConceptModifier(self):
return self._raw_data.get('waitoverconceptmodifier', "")
@property
def ArrivalConceptModifier(self):
return self._raw_data.get('arrivalconceptmodifier', "")
@property
def PostArrivalConceptModifier(self):
return self._raw_data.get('postarrivalconceptmodifier', None)
@property
def SuccessConceptModifier(self):
return self._raw_data.get('successconceptmodifier', "")
@property
def FailureConceptModifier(self):
return self._raw_data.get('failureconceptmodifier', "")
@property
def ComingBackConceptModifier(self):
return self._raw_data.get('comingbackconceptmodifier', "")
@property
def RetrieveConceptModifier(self):
return self._raw_data.get('retrieveconceptmodifier', "")
class ai_goal_lead(LeadGoalBase):
icon_sprite = "editor/ai_goal_lead.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def SearchType(self):
return self._raw_data.get('searchtype', "0")
class ai_goal_lead_weapon(LeadGoalBase):
icon_sprite = "editor/ai_goal_lead.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def WeaponName(self):
return self._raw_data.get('weaponname', "weapon_bugbait")
@property
def MissingWeaponConceptModifier(self):
return self._raw_data.get('missingweaponconceptmodifier', None)
@property
def SearchType(self):
return self._raw_data.get('searchtype', "0")
class FollowGoal(Targetname):
@property
def actor(self):
return self._raw_data.get('actor', None)
@property
def goal(self):
return self._raw_data.get('goal', None)
@property
def SearchType(self):
return self._raw_data.get('searchtype', "0")
@property
def StartActive(self):
return self._raw_data.get('startactive', "0")
@property
def MaximumState(self):
return self._raw_data.get('maximumstate', "1")
@property
def Formation(self):
return self._raw_data.get('formation', "0")
class ai_goal_follow(FollowGoal):
icon_sprite = "editor/ai_goal_follow.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class ai_goal_injured_follow(FollowGoal):
icon_sprite = "editor/ai_goal_follow.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class env_detail_controller(Angles):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def fademindist(self):
return parse_source_value(self._raw_data.get('fademindist', 400))
@property
def fademaxdist(self):
return parse_source_value(self._raw_data.get('fademaxdist', 1200))
class env_global(EnvGlobal):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def globalstate(self):
return self._raw_data.get('globalstate', None)
class BaseCharger(Angles, Targetname, BaseFadeProp):
pass
class item_healthcharger(BaseCharger):
model_ = "models/props_blackmesa/health_charger.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def charge(self):
return parse_source_value(self._raw_data.get('charge', 50))
@property
def skintype(self):
return self._raw_data.get('skintype', "0")
class item_suitcharger(BaseCharger):
model_ = "models/props_blackmesa/hev_charger.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def charge(self):
return parse_source_value(self._raw_data.get('charge', 75))
@property
def skintype(self):
return self._raw_data.get('skintype', "0")
class | |
+ str(status) )
# Remove unused
tempMat["data"] = tempMat["data"][0:idx.value]
tempMat["row"] = tempMat["row"][0:idx.value]
tempMat["col"] = tempMat["col"][0:idx.value]
return tempMat
def acquireSmallerMatrix( COOMat ):
"""
Make matrix smaller by removing zero rows
:param COOMat: The large sparse matrix.
:return The compressed large sparse matrix. Stored as a smaller sparse matrix with all uneccessary rows removed, and an indexing of which rows these are.
"""
# Get all unique rows
uniqueRows = np.unique( COOMat.row, return_index=True, return_inverse = True )
# Change row index to the new indexing
COOMat.row = uniqueRows[2]
# Resize matrix to remove uneccesary part
COOMat.resize( (uniqueRows[0].size, COOMat.shape[1]) )
# Store matrix and original row index
return { "CSRMatrix" : COOMat.tocsr(), "originalRow" : uniqueRows[0] }
def mapTriVals2Mat( matrix, vector, N ):
"""
Map values at triangles to a system matrix on the nodes.
:param matrix: A matrix acquired using computeGeneric or any of the wrapper of it, viz. computeM, computeB, computeG, and computeU.
:param vector: A vector of constants for each simplex.
:param N: The shape of the output matrix.
:return A N[0] x N[1] matrix to use for the system of linear equations in the finite element method.
"""
if np.isscalar(N):
N = N * np.array([1,1])
if np.isscalar(vector):
vector = vector * np.ones(matrix["CSRMatrix"].shape[1], dtype="float64" )
# Compute from simplex to basis
out = matrix["CSRMatrix"] * vector
if N[1] == 1:
# Create sparse matrix of output
out = sparse.coo_matrix( \
( out, \
( matrix["originalRow"].astype(np.uintc), \
np.zeros( (matrix["originalRow"].size) ).astype(np.uintc) ) ), \
shape = N )
else:
# Create sparse matrix of output
out = sparse.coo_matrix( \
( out, \
( np.floor( matrix["originalRow"] / N[0]).astype(np.uintc), \
(matrix["originalRow"] % N[0]).astype(np.uintc) ) ), \
shape = N )
return out.tocsr()
class abstractDeformed(FEM):
"""
Generic FEM child class for the deformed Matern models.
The childParams parameter completely decides the model.
"""
# parameers of inherited model
childParams = None
# Dictionary of what matMaps parameters to calculate
matMapsCalculate = None
matMapsCalculateEdges = None
@abc.abstractmethod
def paramsFunction(self):
# Function for computing FEM params from child params
return
def __init__( self, mesh, childParams, nu, sigma, mu = 0, libPath = None, BCDirichlet = None, BCRobin = None, sourceCoeff = None, factorize = True ):
# Acquire necesarry maps from mesh cells to system matrices
if sourceCoeff is not None:
self.matMapsCalculate.append('U')
# Acquire necessary maps from mesh edges to system matrices
if BCRobin is not None:
self.matMapsCalculateEdges = []
if np.any(BCRobin[:, 0] != 0):
self.matMapsCalculateEdges.append('U')
if np.any(BCRobin[:, 1] != 0):
self.matMapsCalculateEdges.append('M')
# Parent init
super(abstractDeformed, self).__init__(mesh, \
matMapsCalculate = self.matMapsCalculate, \
matMapsCalculateEdges = self.matMapsCalculateEdges, \
libPath = libPath\
)
# Update system
self.updateSystem( childParams = childParams, nu = nu, mu = mu, sigma = sigma, sourceCoeff = sourceCoeff, BCRobin = BCRobin, BCDirichlet = BCDirichlet, factorize = factorize )
return
def copy(self):
out = type(self)( None, None, None, None)
out = super(abstractDeformed, self).copyParent(out)
out.childParams = self.childParams
out.matMapsCalculate = self.matMapsCalculate
out.matMapsCalculateEdges = self.matMapsCalculateEdges
return out
def updateSystem( self, childParams, nu, sigma, mu = None, BCDirichlet = None, BCRobin = None, sourceCoeff = None, factorize = True ):
if self.mesh == None:
return
# Setup system
self.childParams = childParams
self.nu = nu
d = self.mesh.topD
alpha = nu + d / 2.0
tau = np.sqrt( special.gamma(nu) / ( special.gamma(alpha) * (4 * np.pi)**(d/2) ) ) / ( sigma )
if np.isscalar(tau):
tau = tau * np.ones((self.mesh.N))
MCoeff, BCoeff, GCoeff = self.paramsFunction( )
# Parent init
super(abstractDeformed, self).updateSystem( \
MCoeff = MCoeff, \
tau = tau, nu = nu, mu = mu, \
BCoeff = BCoeff, \
GCoeff = GCoeff, \
sourceCoeff = sourceCoeff, \
BCRobin = BCRobin, \
BCDirichlet = BCDirichlet, \
factorize = factorize \
)
return
class MaternFEM(abstractDeformed):
"""
Class representing the classical matern model.
"""
matMapsCalculate = ['M', 'G']
matMapsCalculateEdges = None
def paramsFunction( self ):
"""
Defines the standard Matérn SPDE model parameterized by a correlation range ('r').
:return The coefficients for the M, B, and G matrices.
"""
if self.childParams is None:
raise Exception("No r-parameter given")
r = self.childParams
if isinstance( r, dict):
r = r["r"]
d = self.mesh.embD
alpha = self.nu + d / 2
logGSqrt = - d * np.log( r/np.sqrt(8*self.nu) )
GInv = ( np.exp( - 2 / d * logGSqrt) * np.eye(d) ).flatten()
MCoeff = np.exp( 1/alpha * logGSqrt )
BCoeff = None
GCoeff = [None] * GInv.size
for iterGInv in range(GInv.size):
if GInv[iterGInv] != 0:
GCoeff[iterGInv] = MCoeff * GInv[iterGInv]
return (MCoeff, BCoeff, GCoeff)
class anisotropicMaternFEM(abstractDeformed):
"""
Class representing the anisotropic matern model in two dimensions.
"""
matMapsCalculate = ['M', 'G']
matMapsCalculateEdges = None
def paramsFunction( self ):
"""
Generate FEM model for anisotropic Matérn model in two dimensions given an angle of the main direction ('angle') and two correlation ranges ('r').
:return The coefficients for the M, B, and G matrices.
"""
if not self.mesh.embD == 2:
raise Exception("Current class only defined for manifolds embedded in R^2!")
if self.childParams is None:
raise Exception("No parameters given")
if not isinstance( self.childParams, dict):
raise Exception("Parameters were not given in dictionary format")
alpha = self.nu + self.mesh.topD / 2
logGSqrt, GInv = orthVectorsToG( angleToVecs2D(self.childParams["angle"]).transpose(), self.childParams["r"] / np.sqrt(8*self.nu) )
# Set FEM parameters
MCoeff = np.exp( 1/alpha * logGSqrt )
BCoeff = None
GCoeff = [None] * (self.mesh.embD**2)
if GInv is not None:
for iterGInv in range(self.mesh.embD**2):
if GInv[iterGInv] != 0:
GCoeff[iterGInv] = MCoeff * GInv[iterGInv]
return (MCoeff, BCoeff, GCoeff)
class nonStatFEM(abstractDeformed):
"""
Class representing the general deformed Matern model.
"""
matMapsCalculate = ['M', 'G']
matMapsCalculateEdges = None
def paramsFunction( self ):
"""
Function to map child parameters to FEM parameters.
In the member object 'childParams', either a function is provided under the name 'f' or it is assumed that the parameters 'logGSqrt' and 'GInv' are provided.
If a function was provided under the name 'f', this function should take the dictionary self.childParams as its argument.
It should output a tuple corresponding to logGSqrt and GInv.
logGSqrt is the logarithm of the squared determinant of G
GInv is the inverse of G
:return: a tuple corresponding to the coefficients of M, B, and G.
"""
if self.childParams is None:
raise Exception("No parameters given")
if not isinstance( self.childParams, dict):
raise Exception("Parameters were not given in dictionary format")
# Compute kappa and H
logGSqrt = None
GInv = None
if "f" in self.childParams:
logGSqrt, GInv = self.childParams["f"]( self.childParams )
else:
logGSqrt = self.childParams["logGSqrt"]
GInv = self.childParams["GInv"]
alpha = self.nu + self.mesh.topD / 2
# Set FEM parameters
MCoeff = np.exp( 1/alpha * logGSqrt )
BCoeff = None
GCoeff = [None] * (self.mesh.embD**2)
if GInv is not None:
for iterGInv in range(self.mesh.embD**2):
if np.any(GInv[iterGInv] != 0):
GCoeff[iterGInv] = MCoeff * GInv[iterGInv]
return (MCoeff, BCoeff, GCoeff)
def angleToVecs2D( angle ):
"""
:param angle: An angle [radians] for the main direction (from x-axis).
:return A 2D-array which columns are two orthogonal unit vectors, the first pointing in the direction of angle
"""
if isinstance( angle, np.ndarray):
rotMat = np.stack( ( np.cos(angle), -np.sin(angle), np.sin(angle), np.cos(angle) ) )
if angle.size > 0:
rotMat = rotMat.transpose((1,0)).reshape( (angle.size, 2, 2) )
else:
rotMat = rotMat.reshape( (2, 2) )
else:
rotMat = np.array( [ [ np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)] ] )
vectors = np.matmul( rotMat, np.eye(2) )
return vectors
def tangentVectorsOnSphere( points, northPole = np.array([0.0,0.0,1.0]) ):
"""
Acquire a basis for the tangent space at given points | |
<filename>models/00_tables.py
# -*- coding: utf-8 -*-
"""
Global tables and re-usable fields
"""
# =============================================================================
# Import models
#
from s3.s3model import S3Model
import eden as models
current.models = models
current.s3db = s3db = S3Model()
# Explicit import statements to have them reload automatically in debug mode
import eden.asset
import eden.auth
import eden.cms
import eden.delphi
import eden.doc
import eden.dvi
import eden.event
import eden.fire
import eden.gis
import eden.hms
import eden.hrm
import eden.inv
import eden.irs
import eden.member
import eden.msg
import eden.ocr
import eden.org
import eden.patient
import eden.pr
import eden.sit
import eden.proc
import eden.project
import eden.req
import eden.scenario
import eden.supply
import eden.support
import eden.survey
import eden.sync
import eden.vehicle
# =============================================================================
# Import S3 meta fields into global namespace
#
from s3.s3fields import *
# =============================================================================
# Record authorship meta-fields
# Author of a record
s3_meta_created_by = S3ReusableField("created_by", db.auth_user,
readable=False,
writable=False,
requires=None,
default=session.auth.user.id
if auth.is_logged_in()
else None,
represent=s3_user_represent,
ondelete="RESTRICT")
# Last author of a record
s3_meta_modified_by = S3ReusableField("modified_by", db.auth_user,
readable=False,
writable=False,
requires=None,
default=session.auth.user.id
if auth.is_logged_in()
else None,
update=session.auth.user.id
if auth.is_logged_in()
else None,
represent=s3_user_represent,
ondelete="RESTRICT")
def s3_authorstamp():
return (s3_meta_created_by(),
s3_meta_modified_by())
# =============================================================================
# Record ownership meta-fields
# Individual user who owns the record
s3_meta_owned_by_user = S3ReusableField("owned_by_user", db.auth_user,
readable=False,
writable=False,
requires=None,
default=session.auth.user.id
if auth.is_logged_in()
else None,
represent=lambda id: \
id and s3_user_represent(id) or UNKNOWN_OPT,
ondelete="RESTRICT")
# Role of users who collectively own the record
s3_meta_owned_by_group = S3ReusableField("owned_by_group", "integer",
readable=False,
writable=False,
requires=None,
default=None,
represent=s3_auth_group_represent)
# Role of the Organisation the record belongs to
s3_meta_owned_by_organisation = S3ReusableField("owned_by_organisation", "integer",
readable=False,
writable=False,
requires=None,
default=None,
represent=s3_auth_group_represent)
# Person Entity owning the record
s3_meta_owned_by_entity = S3ReusableField("owned_by_entity", "integer",
readable=False,
writable=False,
requires=None,
default=None,
# use a lambda here as we don't
# want the model to be loaded yet
represent=lambda val: \
s3db.pr_pentity_represent(val))
def s3_ownerstamp():
return (s3_meta_owned_by_user(),
s3_meta_owned_by_group(),
s3_meta_owned_by_organisation(),
s3_meta_owned_by_entity())
# Make available for S3Models
s3.ownerstamp = s3_ownerstamp
# =============================================================================
def s3_timestamp():
return (s3_meta_created_on(),
s3_meta_modified_on(),
)
# Make available for S3Models
s3.timestamp = s3_timestamp
# =============================================================================
# Common meta-fields
# @todo: can this be moved into s3fields.py?
#
def s3_meta_fields():
fields = (s3_meta_uuid(),
s3_meta_mci(),
s3_meta_deletion_status(),
s3_meta_deletion_fk(),
s3_meta_created_on(),
s3_meta_modified_on(),
s3_meta_created_by(),
s3_meta_modified_by(),
s3_meta_owned_by_user(),
s3_meta_owned_by_group(),
s3_meta_owned_by_organisation(),
s3_meta_owned_by_entity())
return fields
# Make available for S3Models
s3.meta_fields = s3_meta_fields
# =============================================================================
response.s3.all_meta_field_names = [field.name for field in
[s3_meta_uuid(),
s3_meta_mci(),
s3_meta_deletion_status(),
s3_meta_deletion_fk(),
s3_meta_created_on(),
s3_meta_modified_on(),
s3_meta_created_by(),
s3_meta_modified_by(),
s3_meta_owned_by_user(),
s3_meta_owned_by_group(),
s3_meta_owned_by_organisation(),
s3_meta_owned_by_entity(),
]]
# =============================================================================
# Reusable field for scheduler task links
#
scheduler_task_id = S3ReusableField("scheduler_task_id",
"reference %s" % s3base.S3Task.TASK_TABLENAME,
ondelete="CASCADE")
s3.scheduler_task_id = scheduler_task_id
# =============================================================================
# Reusable roles fields for map layer permissions management (GIS)
role_required = S3ReusableField("role_required", db.auth_group,
sortby="role",
requires = IS_NULL_OR(IS_ONE_OF(db,
"auth_group.id",
"%(role)s",
zero=T("Public"))),
widget = S3AutocompleteWidget(
"auth",
"group",
fieldname="role"),
represent = s3_auth_group_represent,
label = T("Role Required"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Role Required"),
T("If this record should be restricted then select which role is required to access the record here."))),
ondelete = "RESTRICT")
roles_permitted = S3ReusableField("roles_permitted", 'list:reference auth_group',
sortby="role",
requires = IS_NULL_OR(IS_ONE_OF(db,
"auth_group.id",
"%(role)s",
multiple=True)),
# @ToDo
#widget = S3CheckboxesWidget(lookup_table_name = "auth_group",
# lookup_field_name = "role",
# multiple = True),
represent = s3_auth_group_represent,
label = T("Roles Permitted"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Roles Permitted"),
T("If this record should be restricted then select which role(s) are permitted to access the record here."))),
ondelete = "RESTRICT")
# Make available for S3Models
s3.role_required = role_required
s3.roles_permitted = roles_permitted
# =============================================================================
# Other reusable fields
# -----------------------------------------------------------------------------
# Reusable comments field to include in other table definitions
s3_comments = S3ReusableField("comments", "text",
label = T("Comments"),
widget = s3_comments_widget,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Comments"),
T("Please use this field to record any additional information, including a history of the record if it is updated."))))
s3.comments = s3_comments
# -----------------------------------------------------------------------------
# Reusable currency field to include in other table definitions
#
# @ToDo: Move to a Finance module
#
currency_type_opts = deployment_settings.get_fin_currencies()
default_currency = deployment_settings.get_fin_currency_default()
currency_type = S3ReusableField("currency_type", "string",
length = 3,
#notnull=True,
requires = IS_IN_SET(currency_type_opts.keys(),
zero=None),
default = default_currency,
label = T("Currency"),
#represent = lambda opt: \
# currency_type_opts.get(opt, UNKNOWN_OPT),
writable = deployment_settings.get_fin_currency_writable())
response.s3.currency_type = currency_type
# =============================================================================
# Lx
#
# These fields are populated onaccept from location_id
# - for many reads to fewer writes, this is faster than Virtual Fields
#
# Labels that vary by country are set by gis.update_table_hierarchy_labels()
#
address_L4 = S3ReusableField("L4",
#label=gis.get_location_hierarchy("L4"),
readable=False,
writable=False)
address_L3 = S3ReusableField("L3",
#label=gis.get_location_hierarchy("L3"),
readable=False,
writable=False)
address_L2 = S3ReusableField("L2",
#label=gis.get_location_hierarchy("L2"),
readable=False,
writable=False)
address_L1 = S3ReusableField("L1",
#label=gis.get_location_hierarchy("L1"),
readable=False,
writable=False)
address_L0 = S3ReusableField("L0",
# L0 Location Name never varies except with a Translation
label=T("Country"),
readable=False,
writable=False)
# -----------------------------------------------------------------------------
def lx_fields():
# return multiple reusable fields
fields = (
address_L4(),
address_L3(),
address_L2(),
address_L1(),
address_L0(),
)
return fields
s3.lx_fields = lx_fields
# -----------------------------------------------------------------------------
# Hide Lx fields in Create forms
# inc list_create (list_fields over-rides)
def lx_hide(table):
table.L4.readable = False
table.L3.readable = False
table.L2.readable = False
table.L1.readable = False
table.L0.readable = False
return
s3.lx_hide = lx_hide
# -----------------------------------------------------------------------------
def lx_onvalidation(form):
"""
Write the Lx fields from the Location
- used by pr_person, hrm_training, irs_ireport
@ToDo: Allow the reverse operation.
If these fields are populated then create/update the location
"""
vars = form.vars
if "location_id" in vars and vars.location_id:
table = s3db.gis_location
query = (table.id == vars.location_id)
location = db(query).select(table.name,
table.level,
table.parent,
table.path,
limitby=(0, 1)).first()
if location:
if location.level == "L0":
vars.L0 = location.name
elif location.level == "L1":
vars.L1 = location.name
if location.parent:
query = (table.id == location.parent)
country = db(query).select(table.name,
limitby=(0, 1)).first()
if country:
vars.L0 = country.name
else:
# Get Names of ancestors at each level
vars = gis.get_parent_per_level(vars,
vars.location_id,
feature=location,
ids=False,
names=True)
s3.lx_onvalidation = lx_onvalidation
# -----------------------------------------------------------------------------
def lx_update(table, record_id):
"""
Write the Lx fields from the Location
- used by hrm_human_resource & pr_address
@ToDo: Allow the reverse operation.
If these fields are populated then create/update the location
"""
if "location_id" in table:
ltable = s3db.gis_location
query = (table.id == record_id) & \
(ltable.id == table.location_id)
location = db(query).select(ltable.id,
ltable.name,
ltable.level,
ltable.parent,
ltable.path,
limitby=(0, 1)).first()
if location:
vars = Storage()
if location.level == "L0":
vars.L0 = location.name
elif location.level == "L1":
vars.L1 = location.name
if location.parent:
query = (ltable.id == location.parent)
country = db(query).select(ltable.name,
limitby=(0, 1)).first()
if country:
vars.L0 = country.name
else:
# Get Names of ancestors at each level
vars = gis.get_parent_per_level(vars,
location.id,
feature=location,
ids=False,
names=True)
# Update record
db(table.id == record_id).update(**vars)
s3.lx_update = lx_update
# =============================================================================
# Addresses
#
# These fields are populated onaccept from location_id
#
# @ToDo: Add Postcode to gis.update_table_hierarchy_labels()
#
address_building_name = S3ReusableField("building_name",
label=T("Building Name"),
readable=False,
writable=False)
address_address = S3ReusableField("address",
label=T("Address"),
readable=False,
writable=False)
address_postcode = S3ReusableField("postcode",
label=deployment_settings.get_ui_label_postcode(),
readable=False,
writable=False)
# -----------------------------------------------------------------------------
def address_fields():
# return multiple reusable fields
fields = (
address_building_name(),
address_address(),
address_postcode(),
address_L4(),
address_L3(),
address_L2(),
address_L1(),
address_L0(),
)
return fields
s3.address_fields = address_fields
# -----------------------------------------------------------------------------
# Hide Address fields in Create forms
# inc list_create (list_fields over-rides)
def address_hide(table):
table.building_name.readable = False
table.address.readable = False
table.L4.readable = False
table.L3.readable = False
table.L2.readable = False
table.L1.readable = False
table.L0.readable = False
table.postcode.readable = False
return
s3.address_hide = address_hide
# -----------------------------------------------------------------------------
def address_onvalidation(form):
"""
Write the Address fields from the Location
- used by pr_address, org_office & cr_shelter
@ToDo: Allow the reverse operation.
If these fields are populated then create/update the location
"""
vars = form.vars
if "location_id" in vars and vars.location_id:
table = s3db.gis_location
# Read Postcode & Street Address
query = (table.id == vars.location_id)
location = db(query).select(table.addr_street,
table.addr_postcode,
table.name,
table.level,
table.parent,
table.path,
limitby=(0, 1)).first()
if location:
vars.address = location.addr_street
vars.postcode = location.addr_postcode
if location.level == "L0":
vars.L0 = location.name
elif location.level == "L1":
vars.L1 = location.name
if location.parent:
query = (table.id == location.parent)
country = db(query).select(table.name,
limitby=(0, 1)).first()
if country:
vars.L0 = country.name
else:
if location.level is None:
vars.building_name = location.name
# Get Names of ancestors at each level
vars = gis.get_parent_per_level(vars,
vars.location_id,
feature=location,
ids=False,
names=True)
s3.address_onvalidation = address_onvalidation
# -----------------------------------------------------------------------------
def address_update(table, record_id):
"""
Write the Address fields from the Location
- used by asset_asset
@ToDo: Allow the reverse operation.
If these fields are populated then create/update the location
"""
if "location_id" in table:
ltable = s3db.gis_location
# Read Postcode & Street Address
query = (table.id == record_id) & \
(ltable.id == table.location_id)
location = db(query).select(ltable.id,
ltable.addr_street,
ltable.addr_postcode,
ltable.name,
ltable.level,
ltable.parent,
ltable.path,
limitby=(0, 1)).first()
if location:
vars = Storage()
vars.address = location.addr_street
vars.postcode = location.addr_postcode
if location.level == "L0":
vars.L0 = location.name
elif location.level == "L1":
vars.L1 = location.name
if location.parent:
query = (ltable.id == location.parent)
country = db(query).select(ltable.name,
limitby=(0, 1)).first()
if country:
vars.L0 = country.name
else:
if location.level is None:
vars.building_name = location.name
# Get Names of ancestors at each level
vars = gis.get_parent_per_level(vars,
location.id,
feature=location,
ids=False,
names=True)
# Update record
db(table.id == record_id).update(**vars)
s3.address_update = address_update
# =============================================================================
# Default CRUD strings
#
ADD_RECORD = T("Add Record")
LIST_RECORDS = T("List Records")
s3.crud_strings = Storage(
title_create = ADD_RECORD,
title_display = T("Record Details"),
title_list = LIST_RECORDS,
title_update = T("Edit Record"),
title_search = T("Search Records"),
subtitle_create = T("Add New Record"),
subtitle_list = T("Available Records"),
label_list_button = LIST_RECORDS,
label_create_button | |
# Copyright (c) 2013, 2018 National Technology and Engineering Solutions of Sandia, LLC.
# Under the terms of Contract DE-NA0003525 with National Technology and Engineering Solutions
# of Sandia, LLC, the U.S. Government retains certain rights in this software.
# standard library
import os
import hashlib
import pickle
import time
import base64
import inspect
import queue
import threading
# 3rd party library
import cherrypy
# local imports
import slycat.web.server
# public exports from this module
__all__ = ["CacheError", "Cache"]
# error catching for the cache
class CacheError(Exception):
"""
generic cached object error
"""
pass
class TimeError(CacheError):
"""
time error used for when the time is in the wrong format
"""
pass
class LifetimeError(CacheError):
"""
extension of the cached error where the lifetime
of the cache object has expired
"""
pass
# a cached object consists of a value and an expiration
# as well as a thread lock
class CachedObjectWrapper(object):
"""
class used to wrap any object placed in the cache
"""
# lock on cached object
__lock = threading.Lock()
def __init__(self, value, expiration=None):
"""
creates a cached object with a cached items and an expiration
:param value: item being wrapped
:param expiration: time until the item is expire
:return: not used
"""
self._value = value
self._expiration = expiration
@property
def lock(self):
"""
threading.Lock() used to control crud operations to the cache.
:return:
"""
return self.__lock
@property
def value(self):
"""
returns the object that is being wrapped by the cache
:return: object
"""
return self._value
@property
def expiration(self):
"""
return the expiration time for the cached object, could return none
if there is no expiration
:return: expiration object
"""
return self._expiration
@expiration.setter
def expiration(self,expiration):
"""
set the expiration time for the cached object, could return none
if there is no expiration
:return: expiration object
"""
self._expiration = expiration
def expired(self):
"""
return true or false as to weather the object is expired or not
returns false if none
:return: boolean
"""
if self.expiration is None:
expired = False
else:
expired = (self.expiration < time.time())
return expired
class Cache(object):
"""
decorator class used to cache
"""
# lock on entire cache
_lock = threading.Lock()
def __init__(self, fs_cache_path=None, **kwargs):
"""
takes a filepath and and the following time stamps
- years (31,556,900 seconds per year)
- months (2,629,740 seconds per month)
- weeks (604,800 seconds per week)
- days (86,400 seconds per day)
- hours (3600 seconds per hour)
- minutes (60 seconds per minute)
- seconds
- None
:param path: path as a string to the
:param kwargs: time stamp
"""
if kwargs:
self._init_expire_time = self.to_seconds(**kwargs)
# we need a time greater than 0
if self._init_expire_time <= 0:
msg = "[CACHE] Lifetime (%s seconds) is 0 or less." % self._init_expire_time
cherrypy.log.error(msg)
raise LifetimeError(msg)
else:
# no expiration time
self._init_expire_time = None
# set up an in memory cache
self._loaded = {}
# set path for file system
if fs_cache_path:
self._fs_cache_path = os.path.abspath(fs_cache_path)
# make cache directory unless it already exists
if not os.path.exists(self._fs_cache_path):
os.makedirs(self._fs_cache_path)
else:
self._fs_cache_path = None
def check_fs_path(self):
"""
This function is used to set the file path as it does
not exist when the cache is created in the server/__init__.py
:return:
"""
# creates slycat web server cache, if it doesn't already exist
if not self._fs_cache_path:
cherrypy.log.error("[CACHE] %s is the cache location." %
(slycat.web.server.config["slycat-web-server"]["cache-store"]))
self._fs_cache_path = os.path.abspath(
slycat.web.server.config["slycat-web-server"]["cache-store"])
if not os.path.exists(self._fs_cache_path):
os.makedirs(self._fs_cache_path)
def __getitem__(self, key):
"""
get the item from the cache
:param key: hashed key for item in cache
:return: value associate with key or None if not found
"""
# check for slycat path
self.check_fs_path()
# is item in cache?
if key in self:
# get hash and value
digest = self.digest_hash(key)
value = self._loaded[digest].value
expired = self._loaded[digest].expired()
# if expired, erase and return None
if expired:
self.expire(digest)
return None
else:
return None
# cherrypy.log.error("[CACHE] Retrieving %s from cache." % str(digest))
return value
def __setitem__(self, key, value):
"""
set the key:value in the cache. if it is already in
the cache it gets replaced by new value
:param key: hashed representation of the function
:param value: stored result from the function
:return: not used
"""
# create slycat file path if it doesn't exist
self.check_fs_path()
# get hash and path
digest_hash = self.digest_hash(key)
path = os.path.join(self._fs_cache_path, digest_hash)
# if item exists, erase it
if (digest_hash in self._loaded) or os.path.exists(path):
self.expire(digest_hash)
# create new copy in cache
cached_contents = CachedObjectWrapper(value, expiration=self.cached_item_expire_time())
self.write(cached_contents, path)
self._loaded[digest_hash] = cached_contents
# cherrypy.log.error ("[CACHE] Added %s to cache." % str(digest_hash))
def __delitem__(self, digest_hash):
"""
Removes the hash keyed object from memory
but not from the filesystem.
see function expire to remove from both
:param key: item to be removed from memory
:return: not used
"""
# check slycat path
self.check_fs_path()
if digest_hash in self._loaded:
del self._loaded[digest_hash]
else:
msg = "[CACHE] Cannot delete object at %s -- not loaded in memory" % str(digest_hash)
raise CacheError(msg)
def __contains__(self, item):
"""
check if item is in the cache, true if in the cache
false otherwise
:param item: item to search for in cache
:return: boolean
"""
# check for slycat path
self.check_fs_path()
# create hash from item
digest = self.digest_hash(item)
# get the item from the cache
if digest in self._loaded:
value = self._loaded[digest]
# item was not in memory, check file system
else:
try:
value = self._load(digest, item)
except CacheError:
# item was not in the cache or the file system
return False
# check if it has expired
if value.expired():
# cherrypy.log.error("[CACHE] value is expired for %s." % str(item))
# contents were expired so we should delete them and return false
self.expire(digest)
return False
return True
def __call__(self, f):
"""
This is the decorator cache call
:param f: function to be wrapped
:return: results of the function either from
the cache or the function itself
"""
# retrieve function id?
function_meta_data = inspect.getmembers(f)
try:
fid = (function_meta_data.__name__, inspect.getargspec(f))
except (AttributeError, TypeError):
fid = (f.__name__, repr(type(f)))
def _f(*args, **kwargs):
key = (fid, args, kwargs)
# check if we have cached the result
if key in self:
result = self[key]
# adding a null guard
if result is None:
# cherrypy.log.error("[CACHE] Cache key error adding object to cache.")
result = f(*args, **kwargs)
self[key] = result
# we have not cached the result so lets get it
else:
# cherrypy.log.error("[CACHE] NOT found in cache")
result = f(*args, **kwargs)
self[key] = result
return result
return _f
def expire(self, digest_hash):
"""
Permanently removes the item, both in the memory and in the filesystem.
"""
# remove from filesystem
if digest_hash in self.fs_keys:
self._remove(digest_hash)
# remove from memoruy
if digest_hash in self.v_keys:
try:
del self[digest_hash]
except CacheError as e:
cherrypy.log.error("[CACHE] error deleting item %s" % str(e))
def _remove(self, digest):
"""
Removes the cache item keyed by `key` from the file system.
"""
path = os.path.join(self._fs_cache_path, digest)
if os.path.exists(path):
try:
os.remove(path)
except:
msg = "[CACHE] No object for key `%s` stored." % str(path)
cherrypy.log.error(msg)
else:
msg = "[CACHE] No object for key `%s` stored." % str(path)
cherrypy.log.error(msg)
def unload(self, k):
"""
Removes the object keyed by k
from virtual memory only.
:param k:
:return:
"""
digest = self.digest_hash(k)
if digest in self._loaded:
del(self._loaded[digest])
def load(self, key):
"""
Causes the object keyed by `k` to be loaded from the
file system and returned. It therefore causes this object
to reside in memory (if it exists in the cache).
"""
return self[key]
def _load(self, digest, k):
"""
Loads the :class:`CacheObject` keyed by `k` from the
file system (residing in a file named by `digest`)
and returns the object.
This method is part of the implementation of :class:`FSCache`,
so don't use it as part of the API.
"""
# load from file, if possible
path = os.path.join(self._fs_cache_path, digest)
if os.path.exists(path):
# cherrypy.log.error("[CACHE] %s fs path cache found" % (path))
contents = self.read(path)
else:
msg = "[CACHE] Object for key `%s` does not exist." % (k,)
raise CacheError(msg)
# store in cache
self._loaded[digest] = | |
<gh_stars>0
# Copyright (c) 2020. CSIRO Australia.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import argparse
import ast
import csv
import json
import logging
import math
import os
import statistics
import time
from argparse import Namespace
import sys
from collections import defaultdict
from copy import deepcopy
from datetime import datetime
from pathlib import Path
import re
from typing import Dict, Optional
from tqdm import tqdm
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from OpenKI import logger, logging_formatter
from OpenKI.Constants import MODEL_ARGS_GROUP, DATA_SOURCES, TRAIN_DATA_HANDLERS, RELATION_SCORERS, EVAL_DATA_HANDLERS, \
EVALUATORS, BEST_MODEL_LABEL, OPTIMIZERS, FINAL_MODEL_LABEL, NEGATIVE_SAMPLE_STRATEGIES, EVALUATOR_NAMES, \
TEXT_ENCODINGS, WORD_EMBEDDINGS, TEXT_ENCODING_AGGREGATIONS, STATIC_STATE_RE, IGNORE_OPENKI_EMBEDS, \
ENTITY_WORD_EMBEDS, PREDICATE_WORD_EMBEDS, FC_TANH_THEN_SUM, CONCAT_THEN_FC_TANH, CONCAT_THEN_FC_RELU, \
FASTTEXT_EMBEDDING, EMBEDS_CACHE_FILES, NO_EMBEDDING, update_state_dict_names, \
ENTITY_WORD_DELIMITERS, BERT_EMBEDDING, RANDOM_EMBEDDING, DEGENERATE_MODEL_MARKER, DATA_VARIANTS, \
CACHE_FILE_VARIANT_SUFFIXES, NYT_FB_ENT_SUFFIX, CACHE_FILE_VARIANTS, STATE_DICT_SCORER_KEY_RE, OPTIMIZER_PARAMS
from OpenKI.LossFunctions import PairwiseRankingLoss
from OpenKI.OpenKI_Data import OpenKIMemoryIndexedTrainDataLoader
from OpenKI.RelationScorers import NeighbouringRelationsEntityEncoder, EntityNeighbourhoodScorer, QueryRelationScorer, \
DualAttentionRelationScorer, EModelEntityEncoder, MultipleRelationScorer, average_aggregator, ConcatFcCombiner, \
RelationEncoder, SumCombiner, average_aggregator_normed
from OpenKI.TextEncoders import CachedEmbeddings
from OpenKI.UtilityFunctions import update_args, diff_args
def main_loop(args, action_groups):
start_time = time.time()
def save_args(report_text="", report=True, extra_label=""):
nonlocal last_tensorboard_args
with open(output_folder / (args.file_name_base + extra_label + "args.txt"), "w") as f_args_:
json.dump(vars(args), f_args_, indent=4)
if report:
logger.info(f"saved args file {args.file_name_base}{extra_label}args.txt {report_text}")
if tensorboard is not None:
args_delta = {dif[0]: dif[2] if dif[3] != "removed" else "removed"
for dif in diff_args(last_tensorboard_args, args)}
if 'eval_next' in args_delta:
del args_delta['eval_next']
if args_delta:
tensorboard.add_text("args", re.sub(r'(^|\n)', r'\1 ', json.dumps(args_delta, indent=4)),
global_step=args.epoch) # indent 4 spaces for verbatim formatting in tensorboard
last_tensorboard_args = deepcopy(args)
main_scorer = None
tensorboard = None
if args.load_model is not None:
name_match = None
if args.load_model_newname_regex is not None:
name_match = re.match(args.load_model_newname_regex, args.load_model)
if name_match is not None:
name_groups = name_match.groups()
name_start = name_groups[0]
name_end = name_groups[-1] if len(name_groups) > 1 else ""
if name_end is None:
name_end = ""
file_name_base = name_start + name_end + "_"
else:
file_name_base = f"{args.load_model}_"
if args.jobid is not None:
file_name_base += f"{args.jobid}_"
else:
if args.jobid is None:
args.jobid = f"OKI-{datetime.now()}".replace(' ', '_')
file_name_base = f"{args.label}_{args.jobid}_"
while Path(file_name_base + "args.txt").exists():
file_name_base += "~"
args.file_name_base = file_name_base
output_folder = Path(args.output_folder)
(output_folder / args.file_name_base).parent.mkdir(parents=True, exist_ok=True)
logger_file_handler = logging.FileHandler(output_folder / f"{args.file_name_base}.log")
logger_file_handler.setLevel(logging.INFO)
logger_file_handler.setFormatter(logging_formatter)
logger.addHandler(logger_file_handler)
logger.info(f"--epochs is {args.epochs} before merging args.")
logger.info("------------------------------ New Run ---------------------------------")
logger.info(f"logger to file {args.file_name_base}.log")
model_file_name = ""
if args.load_model is not None:
if args.run_to_load:
model_file_name = f"{args.load_model}model_{args.run_to_load}.pt"
# Load and update the program arguments
new_args = args
try:
with open(output_folder / (new_args.load_model + new_args.run_to_load + "args.txt")) as f_args:
args = Namespace(**json.load(f_args))
logger.info(f"loaded run specific args for {new_args.run_to_load}")
except FileNotFoundError:
logger.warning(f'{(new_args.load_model + new_args.run_to_load + "args.txt")} not found! Trying without run')
with open(output_folder / (new_args.load_model+"args.txt")) as f_args:
args = Namespace(**json.load(f_args))
logger.info(f"loaded generic run args {(new_args.load_model+'args.txt')}")
if new_args.load_model != args.file_name_base:
logger.warning(f"file name base mismatch: '{new_args.load_model}' passed, '{args.file_name_base}' found in "
f"args file!")
update_args(args, new_args, action_groups, exclude=(MODEL_ARGS_GROUP,), silent=("load_model", "run_to_load"),
force=new_args.force_default_args + ["train", "test", "validate", "print_args_only", "run_to_load"])
args.file_name_base = new_args.file_name_base
# # for backward compatibility to add newly included program arguments
if getattr(args, "print_args_only", None) is None:
args.print_args_only = False
if getattr(args, "last_epoch_loss", None) is None:
args.last_epoch_loss = None
if getattr(args, "epoch", None) is None:
args.epoch = 0
if getattr(args, "tensorboard_dir", None) is None:
tensorboard_dir = Path("runs") / new_args.run_to_load # likely candidate for previous folder
args.tensorboard_dir = str(tensorboard_dir)
else:
tensorboard_dir = Path(args.tensorboard_dir)
if getattr(args, "output_folder", None) is None:
args.output_folder = "output"
if getattr(args, "embed_dim_pairs", None) is None:
args.embed_dim_pairs = new_args.embed_dim_pairs
if getattr(args, "data_folder", None) is None:
if getattr(args, "nyt_folder", None) is not None:
dataset_folders = (args.nyt_folder,
getattr(args, 'reverb_folder', None),
getattr(args, 'nyt_folder', None))
assert getattr(args, "reverb_folder", None) is None, \
f"Which folder, nyt, reverb or data_folder? " \
f"{' or '.join(dataset_folders)}"
assert DATA_SOURCES[1] == "nyt", "DATA_SOURCES list has been changed! Second element is not 'nyt'!"
args.data_folder = args.nyt_folder
args.data_source = DATA_SOURCES[1] # this should be nyt...
args.nyt_folder = None
elif getattr(args, "reverb_folder", None) is not None:
assert DATA_SOURCES[0] == "reverb", "DATA_SOURCES list has been changed! First element is not 'reverb'!"
args.data_folder = args.reverb_folder
args.data_source = DATA_SOURCES[0] # this should be reverb...
args.reverb_folder = None
elif getattr(args, "nyt_folder", None) is not None or getattr(args, "reverb_folder", None) is not None:
old_folders = ' or '.join(folder for folder in (getattr(args, 'nyt_folder', None),
getattr(args, 'reverb_folder', None)) if folder is not None)
logger.warning(f"Overriding old folder ({old_folders}) with specified data-folder {args.data_folder}")
if getattr(args, "eval_with", None) is None:
args.eval_with = []
if getattr(args, "text_encodings", None) is None:
args.text_encodings = None
elif "ENE-entity-word-embeds" in args.text_encodings:
i = args.text_encodings.index("ENE-entity-word-embeds")
args.text_encodings[i] = f"ENE-entity-word-embeds,concat-then-FC-relu"
elif type(args.text_encodings) is list:
for te_i, te in enumerate(args.text_encodings):
if type(te) is not list:
break
if te[0] == "ENE-entity-word-embeds":
args.text_encodings[te_i][0] = ENTITY_WORD_EMBEDS
if getattr(args, "eval_next", None) is None:
args.eval_next = True
if getattr(args, "embeds_on_cpu", None) is None:
args.embeds_on_cpu = None
if getattr(args, "single_gpu", None) is None:
args.single_gpu = True # before we had the option, it was always on single gpu
if getattr(args, "text_embeds_static", None) is None:
logger.info(f"text_embeds_static was not present, setting to None!")
args.text_embeds_static = None # it looks like early versions did not have static text embeddings!
if getattr(args, "no_learned_relation_embeds", None) is None:
args.no_learned_relation_embeds = False
if getattr(args, "sqrt_normed_neighbour_ag", None) is None:
args.sqrt_normed_neighbour_ag = False
if getattr(args, "check-nans", None) is None:
args.check_nans = False
if getattr(args, "layer_norm_on_fc", None) is None:
args.layer_norm_on_fc = False
if getattr(args, "no_activation_on_fc", None) is None:
args.no_activation_on_fc = False
if getattr(args, "degenerate_epochs", None) is None:
args.degenerate_epochs = []
args.first_degenerate_epoch = None
if getattr(args, "schedule_to_epoch", None) is None:
args.schedule_to_epoch = None
if getattr(args, "lr_final", None) is None:
args.lr_final = None
if getattr(args, "batch_size_final", None) is None:
args.batch_size_final = None
# if getattr(args, "batch_size_by_lr_schedule", None) is None:
# args.batch_size_by_lr_schedule = False
if getattr(args, "predicate_text_flag", None) is None:
args.predicate_text_flag = False
if getattr(args, "opt_params", None) is None:
args.opt_params = None
if getattr(args, "stop_on_no_best_for", None) is None:
# noinspection PyTypeHints
args.stop_on_no_best_for: Optional[int] = None
if getattr(args, "with_entailments_to", None) is None:
args.with_entailments_to = None
if getattr(args, "untyped_entailments", None) is None:
args.untyped_entailments = False
if getattr(args, "score_triples", None) is None:
args.score_triples = None
if getattr(args, "score_triples_entity_by_index", None) is None:
args.score_triples_entity_by_index = None
if getattr(args, "score_triples_relation_by_index", None) is None:
args.score_triples_relation_by_index = None
output_folder = Path(args.output_folder)
torch_device = torch.device(args.device)
if torch_device.index is None:
torch_device = torch.device(args.device, torch.cuda.current_device())
last_tensorboard_args = deepcopy(args)
else:
if not args.no_cuda:
torch_device = torch.device("cuda", torch.cuda.current_device())
else:
torch_device = torch.device("cpu")
args.device = str(torch_device)
args.last_epoch_loss = None
args.epoch = 0
tensorboard_dir = Path("runs") / file_name_base
args.tensorboard_dir = str(tensorboard_dir)
args.eval_next = True
args.degenerate_epochs = []
args.first_degenerate_epoch = None
last_tensorboard_args = argparse.Namespace() # empty for first run so all args get dumped to tensorboard
if args.data_folder is None:
args.data_folder = f"data/{args.data_source}"
if args.detect_anomaly:
args.check_nans = True
assert getattr(args, 'data_folder', None) is not None, f"No data folder specified!"
# setup model statistic for dev scoring and args entry for score values
if args.eval_with is None or len(args.eval_with) == 0:
args.eval_with = ("AUC_PR" if args.data_source in ("nyt", "nyt_ccg") else "MAP_ranking_pairs",)
if args.eval_with[0] == "AUC_PR":
best_models_by_map = False
if getattr(args, "max_AUC", None) is None:
args.max_AUC = 0.
elif args.eval_with[0].startswith("MAP"):
best_models_by_map = True
if getattr(args, "max_MAP", None) is None:
args.max_MAP = 0.
else:
raise NotImplementedError(f"Unable to determine best model statistic from '{args.eval_with[0]}'")
# Extract entailment threshold and whether tye're untyped from label string (or None if not present)
# ...dev-NC_0.20_untyped
if "use_entailments" | |
This method
is called by System.Windows.Automation.Peers.AutomationPeer.GetAcceleratorKey.
Returns: The System.Windows.Automation.AutomationProperties.AcceleratorKey that is
returned by
System.Windows.Automation.AutomationProperties.GetAcceleratorKey(System.Windows.
DependencyObject).
"""
pass
def GetAccessKeyCore(self, *args): #cannot find CLR method
"""
GetAccessKeyCore(self: UIElementAutomationPeer) -> str
Gets the access key for the System.Windows.UIElement that is associated with
this System.Windows.Automation.Peers.UIElementAutomationPeer.This method is
called by System.Windows.Automation.Peers.AutomationPeer.GetAccessKey.
Returns: The access key for the System.Windows.UIElement that is associated with this
System.Windows.Automation.Peers.UIElementAutomationPeer.
"""
pass
def GetAutomationControlTypeCore(self, *args): #cannot find CLR method
"""
GetAutomationControlTypeCore(self: SliderAutomationPeer) -> AutomationControlType
Gets the control type for the System.Windows.Controls.Slider that is associated
with this System.Windows.Automation.Peers.SliderAutomationPeer. Called by
System.Windows.Automation.Peers.AutomationPeer.GetAutomationControlType.
Returns: System.Windows.Automation.Peers.AutomationControlType.Slider
"""
pass
def GetAutomationIdCore(self, *args): #cannot find CLR method
"""
GetAutomationIdCore(self: FrameworkElementAutomationPeer) -> str
Gets the string that uniquely identifies the System.Windows.FrameworkElement
that is associated with this
System.Windows.Automation.Peers.FrameworkElementAutomationPeer. Called by
System.Windows.Automation.Peers.AutomationPeer.GetAutomationId.
Returns: The automation identifier for the element associated with the
System.Windows.Automation.Peers.FrameworkElementAutomationPeer, or
System.String.Empty if there isn't an automation identifier.
"""
pass
def GetBoundingRectangleCore(self, *args): #cannot find CLR method
"""
GetBoundingRectangleCore(self: UIElementAutomationPeer) -> Rect
Gets the System.Windows.Rect that represents the bounding rectangle of the
System.Windows.UIElement that is associated with this
System.Windows.Automation.Peers.UIElementAutomationPeer. This method is called
by System.Windows.Automation.Peers.AutomationPeer.GetBoundingRectangle.
Returns: The System.Windows.Rect that contains the coordinates of the element.
Optionally, if the element is not both a System.Windows.Interop.HwndSource and
a System.Windows.PresentationSource, this method returns
System.Windows.Rect.Empty.
"""
pass
def GetChildrenCore(self, *args): #cannot find CLR method
"""
GetChildrenCore(self: UIElementAutomationPeer) -> List[AutomationPeer]
Gets the collection of child elements of the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer.
This method is called by
System.Windows.Automation.Peers.AutomationPeer.GetChildren.
Returns: A list of child System.Windows.Automation.Peers.AutomationPeer elements.
"""
pass
def GetClassNameCore(self, *args): #cannot find CLR method
"""
GetClassNameCore(self: SliderAutomationPeer) -> str
Gets the name of the System.Windows.Controls.Slider that is associated with
this System.Windows.Automation.Peers.SliderAutomationPeer. Called by
System.Windows.Automation.Peers.AutomationPeer.GetClassName.
Returns: A string that contains "Slider".
"""
pass
def GetClickablePointCore(self, *args): #cannot find CLR method
"""
GetClickablePointCore(self: SliderAutomationPeer) -> Point
Called by System.Windows.Automation.Peers.AutomationPeer.GetClickablePoint.
Returns: A System.Windows.Point containing System.Double.NaN, System.Double.NaN; the
only clickable points in a System.Windows.Controls.TabControl are the child
System.Windows.Controls.TabItem elements.
"""
pass
def GetHelpTextCore(self, *args): #cannot find CLR method
"""
GetHelpTextCore(self: FrameworkElementAutomationPeer) -> str
Gets the string that describes the functionality of the
System.Windows.ContentElement that is associated with this
System.Windows.Automation.Peers.ContentElementAutomationPeer. Called by
System.Windows.Automation.Peers.AutomationPeer.GetHelpText.
Returns: The help text, usually from the System.Windows.Controls.ToolTip, or
System.String.Empty if there is no help text.
"""
pass
def GetHostRawElementProviderCore(self, *args): #cannot find CLR method
"""
GetHostRawElementProviderCore(self: AutomationPeer) -> HostedWindowWrapper
Tells UI Automation where in the UI Automation tree to place the hwnd being
hosted by a Windows Presentation Foundation (WPF) element.
Returns: This method returns the hosted hwnd to UI Automation for controls that host
hwnd objects.
"""
pass
def GetItemStatusCore(self, *args): #cannot find CLR method
"""
GetItemStatusCore(self: UIElementAutomationPeer) -> str
Gets a string that communicates the visual status of the
System.Windows.UIElement that is associated with this
System.Windows.Automation.Peers.UIElementAutomationPeer. This method is called
by System.Windows.Automation.Peers.AutomationPeer.GetItemStatus.
Returns: The string that contains the
System.Windows.Automation.AutomationProperties.ItemStatus that is returned by
System.Windows.Automation.AutomationProperties.GetItemStatus(System.Windows.Depe
ndencyObject).
"""
pass
def GetItemTypeCore(self, *args): #cannot find CLR method
"""
GetItemTypeCore(self: UIElementAutomationPeer) -> str
Gets a human-readable string that contains the item type that the
System.Windows.UIElement for this
System.Windows.Automation.Peers.UIElementAutomationPeer represents. This method
is called by System.Windows.Automation.Peers.AutomationPeer.GetItemType.
Returns: The string that contains the
System.Windows.Automation.AutomationProperties.ItemType that is returned by
System.Windows.Automation.AutomationProperties.GetItemType(System.Windows.Depend
encyObject).
"""
pass
def GetLabeledByCore(self, *args): #cannot find CLR method
"""
GetLabeledByCore(self: UIElementAutomationPeer) -> AutomationPeer
Gets the System.Windows.Automation.Peers.AutomationPeer for the element that is
targeted to the System.Windows.UIElement for this
System.Windows.Automation.Peers.UIElementAutomationPeer. This method is called
by System.Windows.Automation.Peers.AutomationPeer.GetLabeledBy.
Returns: The System.Windows.Automation.Peers.AutomationPeer for the element that is
targeted to the System.Windows.UIElement for this
System.Windows.Automation.Peers.UIElementAutomationPeer.
"""
pass
def GetLocalizedControlTypeCore(self, *args): #cannot find CLR method
"""
GetLocalizedControlTypeCore(self: AutomationPeer) -> str
When overridden in a derived class, is called by
System.Windows.Automation.Peers.AutomationPeer.GetLocalizedControlType.
Returns: The type of the control.
"""
pass
def GetNameCore(self, *args): #cannot find CLR method
"""
GetNameCore(self: FrameworkElementAutomationPeer) -> str
Gets the text label of the System.Windows.ContentElement that is associated
with this System.Windows.Automation.Peers.ContentElementAutomationPeer. Called
by System.Windows.Automation.Peers.AutomationPeer.GetName.
Returns: The text label of the element that is associated with this automation peer.
"""
pass
def GetOrientationCore(self, *args): #cannot find CLR method
"""
GetOrientationCore(self: UIElementAutomationPeer) -> AutomationOrientation
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer is
laid out in a specific direction. This method is called by
System.Windows.Automation.Peers.AutomationPeer.GetOrientation.
Returns: The System.Windows.Automation.Peers.AutomationOrientation.None enumeration
value.
"""
pass
def GetPeerFromPointCore(self, *args): #cannot find CLR method
""" GetPeerFromPointCore(self: AutomationPeer, point: Point) -> AutomationPeer """
pass
def HasKeyboardFocusCore(self, *args): #cannot find CLR method
"""
HasKeyboardFocusCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer
currently has keyboard input focus. This method is called by
System.Windows.Automation.Peers.AutomationPeer.HasKeyboardFocus.
Returns: true if the element has keyboard input focus; otherwise, false.
"""
pass
def IsContentElementCore(self, *args): #cannot find CLR method
"""
IsContentElementCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer is
an element that contains data that is presented to the user. This method is
called by System.Windows.Automation.Peers.AutomationPeer.IsContentElement.
Returns: true.
"""
pass
def IsControlElementCore(self, *args): #cannot find CLR method
"""
IsControlElementCore(self: UIElementAutomationPeer) -> bool
Gets or sets a value that indicates whether the System.Windows.UIElement that
is associated with this System.Windows.Automation.Peers.UIElementAutomationPeer
is understood by the end user as interactive. Optionally, the user might
understand the System.Windows.UIElement as contributing to the logical
structure of the control in the GUI. This method is called by
System.Windows.Automation.Peers.AutomationPeer.IsControlElement.
Returns: true.
"""
pass
def IsEnabledCore(self, *args): #cannot find CLR method
"""
IsEnabledCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer
can accept keyboard focus. This method is called by
System.Windows.Automation.Peers.AutomationPeer.IsKeyboardFocusable.
Returns: A boolean that contains the value of System.Windows.UIElement.IsEnabled.
"""
pass
def IsKeyboardFocusableCore(self, *args): #cannot find CLR method
"""
IsKeyboardFocusableCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer
can accept keyboard focus. This method is called by
System.Windows.Automation.Peers.AutomationPeer.IsKeyboardFocusable.
Returns: true if the element is focusable by the keyboard; otherwise false.
"""
pass
def IsOffscreenCore(self, *args): #cannot find CLR method
"""
IsOffscreenCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer is
off the screen. This method is called by
System.Windows.Automation.Peers.AutomationPeer.IsOffscreen.
Returns: true if the element is not on the screen; otherwise, false.
"""
pass
def IsPasswordCore(self, *args): #cannot find CLR method
"""
IsPasswordCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer
contains protected content. This method is called by
System.Windows.Automation.Peers.AutomationPeer.IsPassword.
Returns: false.
"""
pass
def IsRequiredForFormCore(self, *args): #cannot find CLR method
"""
IsRequiredForFormCore(self: UIElementAutomationPeer) -> bool
Gets a value that indicates whether the System.Windows.UIElement that is
associated with this System.Windows.Automation.Peers.UIElementAutomationPeer is
required to be completed on a form. This method is called by
System.Windows.Automation.Peers.AutomationPeer.IsRequiredForForm.
Returns: A boolean that contains the value that is returned by
System.Windows.Automation.AutomationProperties.GetIsRequiredForForm(System.Windo
ws.DependencyObject), if it's set; otherwise false.
"""
pass
def PeerFromProvider(self, *args): #cannot find CLR method
"""
PeerFromProvider(self: AutomationPeer, provider: IRawElementProviderSimple) -> AutomationPeer
Gets an System.Windows.Automation.Peers.AutomationPeer for the specified
System.Windows.Automation.Provider.IRawElementProviderSimple proxy.
provider: The class that implements
System.Windows.Automation.Provider.IRawElementProviderSimple.
Returns: The System.Windows.Automation.Peers.AutomationPeer.
"""
pass
| |
# -*- coding: utf-8 -*-
"""The DataSource class contains all needed information for importing, processing, and saving data.
@author: <NAME>
Created on Jul 31, 2020
"""
import itertools
import numpy as np
import pandas as pd
from . import utils
from .excel_writer import ExcelWriterHandler
from .functions import CalculationFunction, PreprocessFunction, SummaryFunction
class DataSource:
"""
Used to give default settings for importing data and various functions based on the source.
Parameters
----------
name : str
The name of the DataSource. Used when displaying the DataSource in a GUI.
functions : list or tuple, optional
A list or tuple of various Function objects (:class:`.CalculationFunction` or
:class:`.PreprocessFunction` or :class:`.SummaryFunction`) that will be used to
process data for the DataSource. The order the Functions are performed in is
as follows: PreprocessFunctions, CalculationFunctions, SummaryFunctions,
with functions of the same type being performed in the same order as input.
column_labels : tuple(str) or list(str), optional
A list/tuple of strings that will be used to label columns in the
Excel output, and to label the pandas DataFrame columns for the data.
column_numbers : tuple(int) or list(int), optional
The indices of the columns to import from raw data files.
start_row : int, optional
The first row of data to use when importing from raw data files.
end_row : int, optional
The last row of data to use when importing from raw data files.
Counts up from the last row, so the last row is 0, the second
to last row is 1, etc.
separator : str, optional
The separator or delimeter to use to separate data columns when
importing from raw data files. For example, ',' for csv files.
file_type : str, optional
The file extension associated with the data files for the DataSource.
For example, 'txt' or 'csv'.
num_files : int, optional
The number of data files per sample for the DataSource. Only used
when using keyword search for files.
unique_variables : list(str) or tuple(str), optional
The names of all columns from the imported raw data that are
needed for calculations. For example, if importing thermogravimetric
analysis (TGA) data, the unique_variables could be ['temperature', 'mass'].
unique_variable_indices : list(int) or tuple(int), optional
The indices of the columns within column_numbers that correspond with
each of the input unique_variables.
xy_plot_indices : list(int, int) or tuple(int, int), optional
The indices of the columns after processing that will be the default
columns for plotting in Excel.
figure_rcparams : dict, optional
A dictionary containing any changes to Matplotlib's rcParams to
use if fitting or plotting.
excel_writer_styles : dict(str, None or dict or str or openpyxl.styles.named_styles.NamedStyle), optional
A dictionary of styles used to format the output Excel workbook.
The following keys are used when writing data from files to Excel:
'header_even', 'header_odd', 'subheader_even', 'subheader_odd',
'columns_even', 'columns_odd'
The following keys are used when writing data fit results to Excel:
'fitting_header_even', 'fitting_header_odd', 'fitting_subheader_even',
'fitting_subheader_odd', 'fitting_columns_even', 'fitting_columns_odd',
'fitting_descriptors_even', 'fitting_descriptors_odd'
The values for the dictionaries must be either dictionaries, with
keys corresponding to keyword inputs for openpyxl's NamedStyle, or NamedStyle
objects.
excel_row_offset : int, optional
The first row to use when writing to Excel. A value of 0 would start
at row 1 in Excel, 1 would start at row 2, etc.
excel_column_offset : int, optional
The first column to use when writing to Excel. A value of 0 would
start at column 'A' in Excel, 1 would start at column 'B', etc.
entry_separation : int, optional
The number of blank columns to insert between data entries when writing
to Excel.
sample_separation : int, optional
The number of blank columns to insert between samples when writing
to Excel.
label_entries : bool, optional
If True, will add a number to the column labels for each
entry in a sample if there is more than one entry. For example, the
column label 'data' would become 'data, 1', 'data, 2', etc.
Attributes
----------
excel_styles : dict(dict)
A nested dictionary of dictionaries, used to create openpyxl
NamedStyle objects to format the output Excel file.
lengths : list
A list of lists of lists of integers, corresponding to the number of columns
in each individual entry in the total dataframes for the DataSource.
Used to split the concatted dataframe back into individual dataframes for
each dataset.
references : list
A list of dictionaries, with each dictionary containing the column numbers
for each unique variable and calculation for the merged dataframe of each
dataset.
"""
excel_styles = {
'header_even': {
'font': dict(size=12, bold=True),
'fill': dict(fill_type='solid', start_color='F9B381', end_color='F9B381'),
'border': dict(bottom=dict(style='thin')),
'alignment': dict(horizontal='center', vertical='center', wrap_text=True)
},
'header_odd': {
'font': dict(size=12, bold=True),
'fill': dict(fill_type='solid', start_color='73A2DB', end_color='73A2DB'),
'border': dict(bottom=dict(style='thin')),
'alignment': dict(horizontal='center', vertical='center', wrap_text=True)
},
'subheader_even': {
'font': dict(bold=True),
'fill': dict(fill_type='solid', start_color='FFEAD6', end_color='FFEAD6'),
'border': dict(bottom=dict(style='thin')),
'alignment': dict(horizontal='center', vertical='center', wrap_text=True)
},
'subheader_odd': {
'font': dict(bold=True),
'fill': dict(fill_type='solid', start_color='DBEDFF', end_color='DBEDFF'),
'border': dict(bottom=dict(style='thin')),
'alignment': dict(horizontal='center', vertical='center', wrap_text=True)
},
'columns_even': {
'fill': dict(fill_type='solid', start_color='FFEAD6', end_color='FFEAD6'),
'alignment': dict(horizontal='center', vertical='center'),
'number_format': '0.00'
},
'columns_odd': {
'fill': dict(fill_type='solid', start_color='DBEDFF', end_color='DBEDFF'),
'alignment': dict(horizontal='center', vertical='center'),
'number_format': '0.00'
},
**ExcelWriterHandler.styles
}
def __init__(
self,
name,
*,
functions=None,
column_labels=None,
column_numbers=None,
start_row=0,
end_row=0,
separator=None,
file_type=None,
num_files=1,
unique_variables=None,
unique_variable_indices=None,
xy_plot_indices=None,
figure_rcparams=None,
excel_writer_styles=None,
excel_row_offset=0,
excel_column_offset=0,
entry_separation=0,
sample_separation=0,
label_entries=True):
"""
Raises
------
ValueError
Raised if the input name is a blank string, or if either excel_row_offset
or excel_column_offset is < 0.
TypeError
Raised if one of the input functions is not a valid mcetl.FunctionBase
object.
IndexError
Raised if the number of data columns is less than the number of unique
variables.
"""
if name:
self.name = name
else:
raise ValueError('DataSource name cannot be a blank string.')
# attributes that will be set later
self.lengths = None # used for splitting merged datasets
self.references = None # used to reference columns within a dataframe
# _added_separators signifies that entry and sample separation columns were added
# to the merged dataset and will need to be removed in split_into_entries method
self._added_separators = False
self.start_row = start_row
self.end_row = end_row
self.file_type = file_type
self.num_files = num_files
self.sample_separation = sample_separation
self.entry_separation = entry_separation
self.label_entries = label_entries
self.column_labels = column_labels if column_labels is not None else []
self.figure_rcparams = figure_rcparams if figure_rcparams is not None else {}
self.separator = separator
# Ensures excel_row_offset and excel_column_offset are >= 0
if any(value < 0 for value in (excel_column_offset, excel_row_offset)):
raise ValueError('excel_column_offset and excel_row_offset must be >= 0.')
else:
self.excel_row_offset = excel_row_offset
self.excel_column_offset = excel_column_offset
if unique_variables is None:
self.unique_variables = []
elif isinstance(unique_variables, str):
self.unique_variables = [unique_variables]
else:
self.unique_variables = list(unique_variables)
if column_numbers is not None:
self.column_numbers = column_numbers
else:
self.column_numbers = list(range(len(self.unique_variables)))
# ensures the number of imported columns can accomodate all variables
if len(self.column_numbers) < len(self.unique_variables):
raise IndexError((
f'The number of columns specified for {self} '
'must be greater or equal to the number of unique variables.'
))
# sorts the functions by their usage
self.preprocess_functions = []
self.calculation_functions = []
self.sample_summary_functions = []
self.dataset_summary_functions = []
if functions is not None:
if not isinstance(functions, (list, tuple)):
functions = (functions,)
for function in functions:
if isinstance(function, SummaryFunction):
if function.sample_summary:
self.sample_summary_functions.append(function)
else:
self.dataset_summary_functions.append(function)
elif isinstance(function, CalculationFunction):
self.calculation_functions.append(function)
elif isinstance(function, PreprocessFunction):
self.preprocess_functions.append(function)
else:
raise TypeError(f'{function} is not a valid Function object.')
self._validate_target_columns()
# self._deleted_columns tracks how many columns will be deleted after
# preprocessing so the number of needed column labels can be adjusted
self._deleted_columns = sum(
len(cols) for func in self.preprocess_functions for cols in func.deleted_columns
)
# indices for each unique variable for data processing
if unique_variable_indices is None:
self.unique_variable_indices = list(range(len(self.unique_variables)))
elif isinstance(unique_variable_indices, (str, int)):
self.unique_variable_indices = [unique_variable_indices]
else:
self.unique_variable_indices = unique_variable_indices
# ensure all unique variables have a unique column index
unused_indices = (i for i in range(len(self.unique_variables)) if i not in self.unique_variable_indices)
for i in range(len(self.unique_variables)):
if i > len(self.unique_variable_indices) - 1:
self.unique_variable_indices.append(next(unused_indices))
# x and y indices for plotting
if isinstance(xy_plot_indices, (list, tuple)) and len(xy_plot_indices) >= 2:
self.x_plot_index = xy_plot_indices[0]
self.y_plot_index = xy_plot_indices[1]
else:
self.x_plot_index = 0
self.y_plot_index = 1
# sets styles for writing to Excel
self.excel_styles | |
<gh_stars>1-10
'''
Launches multiple simulations (simulation.py) in parallel. Each simulation is configured
using command line arguments. Configurations are generated based on factor matrices.
A factor matrix is specified by extending CommandBuilder and implementing all required methods.
Parameters:
[output file name] # Is written
'''
import itertools
import numpy as np
import subprocess
import sys
import configuration
####################
# CONFIGURATION ##
PARALLELITY = 5
RUNS = 1
####################
class CommandBuilder(object):
def __build_level_matrix_alt(self, factor_levels_map, center_level):
t = list(itertools.product(*[range(len(factor_levels_map[f][1])) for f in xrange(len(factor_levels_map))]))
if center_level:
c = np.array([2 for _ in xrange(len(factor_levels_map))])
t.append(c)
t = np.transpose(np.array(t))
return t
def build(self, filename, center_level):
# Which factor map to use
factor_levels_map = self.get_factors()
level_matrix = self.__build_level_matrix_alt(factor_levels_map, center_level)
print level_matrix
# List of commands to build
commands = []
# For all runs
for run in xrange(0, RUNS):
for col in xrange(len(level_matrix[0])):
# Treatment params (parameter) base
treatment_params = []
treatment_params_map = {}
# Build treatment params
for row in xrange(len(factor_levels_map)):
factor_name = factor_levels_map[row][0]
level_index = level_matrix[row][col]
factor_value = factor_levels_map[row][1][level_index]
# Add treatment params
treatment_params.append('-' + factor_name)
treatment_params.append(str(factor_value))
treatment_params_map[factor_name] = factor_value
# Add static params (might depend already set treatment_params)
treatment_params.extend(self.fill_static_params(run, treatment_params_map))
# Build execution command
execution_command = ["python", self.get_script_name(), '-f', filename, '-RUN', str(run)]
execution_command.extend(treatment_params)
print 'Command: %s' % execution_command
# Add command to list
commands.append(execution_command)
print 'Total number of commands: %i' % len(commands)
return commands
class KMandTControllerCB(CommandBuilder):
'''
Used for 2k full factorial design to find optimal settings
for the T and KM controllers in a MKI environment.
'''
# TController optimal setting used for benchmark
__factors = [
('THRESHOLD_OVERLOAD', (90, 95)),
('THRESHOLD_UNDERLOAD', (60, 80)),
('ALPHA', (0.05, 0.1)),
('K_VALUE', (50, 200)),
('INTERVAL', (60, 900)),
('BOTTOM_PERCENTILE', (0.1, 0.4)),
]
def get_script_name(self):
return 'src/balancer/simulation.py'
def fill_static_params(self, run, treatment_params_map):
return ['-SCRAMBLER', str(run)]
def get_factors(self):
return KMandTControllerCB.__factors
class TControllerCB(CommandBuilder):
'''
Run simulations with the T controller and optimal settings to
estimate its performance for a large set of time series in a MKI environment.
'''
# TController optimal setting used for benchmark
__factors = [
('THRESHOLD_OVERLOAD', (90,)),
('THRESHOLD_UNDERLOAD', (25,)),
('ALPHA', (0.05,)),
('K_VALUE', (170,)),
('INTERVAL', (400,)),
]
def get_script_name(self):
return 'src/balancer/simulation.py'
def fill_static_params(self, run, treatment_params_map):
return ['-SCRAMBLER', str(run)]
def get_factors(self):
return TControllerCB.__factors
class KMControllerCB(CommandBuilder):
'''
Run simulations with the KM controller and optimal settings to
estimate its performance for a large set of time series in a MKI environment.
'''
# KMController optimal setting used for benchmark
__factors = [
('THRESHOLD_OVERLOAD', (95,)),
('THRESHOLD_UNDERLOAD', (27,)),
('K_VALUE', (65,)),
('M_VALUE', (65,)),
('INTERVAL', (750,)),
]
def get_script_name(self):
return 'src/balancer/simulation.py'
def fill_static_params(self, run, treatment_params_map):
return ['-SCRAMBLER', str(run)]
def get_factors(self):
return KMControllerCB.__factors
class InitialPlacementCB(CommandBuilder):
'''
Used to find out if the choice of an initial placmenet strategy
has an impact on the average server cound and migrations of different
reactive controllers in MKI environments.
'''
__factors = [
('STRATEGY_INITIAL_PLACEMENT', ('round', 'firstFitVector', 'dotProduct', 'ssapv', 'firstFit')),
('INITIAL_DOMAINS', (10, 18, 50))
]
def get_script_name(self):
return 'src/balancer/simulation.py'
def fill_static_params(self, run, treatment_params_map):
return ['-SCRAMBLER', str(run)]
def get_factors(self):
return InitialPlacementCB.__factors
class PlacementLowerBoundSensitivityCB(CommandBuilder):
'''
Calculates the lower bound server demand for a set of 2k full
factorial designed schedule configurations and scheudle instances in a MKII
environment.
The goal is to find out which schedule configuration factors or/and
if the placement strategy have a significant impact on the competitive value
calculated by (heuristic peek server demand / optimal peek server demand).
'''
__factors = []
def __init__(self):
from schedule import schedule_builder
conf = schedule_builder.ScheduleConfigurationsSensitivity()
indices = conf.get_all_schedule_ids()
PlacementLowerBoundSensitivityCB.__factors.append(('SCHEDULE_ID', indices))
def get_script_name(self):
return 'src/ipmodels/min_reservation.py'
def fill_static_params(self, run, treatment_params_map):
# Determine schedule domain size set and set it
schedule = treatment_params_map['SCHEDULE_ID']
from schedule import schedule_builder
schedule = schedule_builder.load_schedule(schedule)
return ['-DOMAIN_SIZES_SET', str(schedule.get_domain_size_set())]
def get_factors(self):
return PlacementLowerBoundSensitivityCB.__factors
class PlacementOptimalSensitivityCB(CommandBuilder):
'''
Calculates the optimal peek server demand for a set of 2k full
factorial designed schedule configurations and scheudle instances in a MKII
environment.
The goal is to find out which schedule configuration factors or/and
if the placement strategy have a significant impact on the competitive value
calculated by (heuristic peek server demand / optimal peek server demand).
'''
__factors = []
def __init__(self):
from schedule import schedule_builder
conf = schedule_builder.ScheduleConfigurationsSensitivity()
indices = conf.get_all_schedule_ids()
PlacementOptimalSensitivityCB.__factors.append(('SCHEDULE_ID', indices))
def get_script_name(self):
return 'src/ipmodels/align_schedule.py'
def fill_static_params(self, run, treatment_params_map):
# Determine schedule domain size set and set it
schedule = treatment_params_map['SCHEDULE_ID']
from schedule import schedule_builder
schedule = schedule_builder.load_schedule(schedule)
return ['-DOMAIN_SIZES_SET', str(schedule.get_domain_size_set())]
def get_factors(self):
return PlacementOptimalSensitivityCB.__factors
class ReallocationPlacementInteractionCB(CommandBuilder):
'''
Used to compare different combinations of reallocation AND placement strategies.
For reallocation, kmcontrol and DSAPP are experimented together with each of the
following placement strategies: firstFit, worstFit, random, dotProduct
The goal is to find out which combination is preferable in terms of
1. migrations triggered
2. servers used
We also experiment without using any reallocation strategies. With and without reallocation
strategies, we can determine which effect a combination of reallocation can bring.
'''
__factors = [
('STRATEGY_PLACEMENT', ('firstFit', 'firstFitDemand', 'bestFit', 'bestFitDemand',
'worstFit', 'worstFitDemand',
'l2', 'l2Demand', 'random')),
('STRATEGY_REALLOCATION', ('dsapp', 'kmcontrol', 'None', 'tcontrol'))
]
# __factors = [
# ('STRATEGY_PLACEMENT', ('worstFitDemand', )),
# ('STRATEGY_REALLOCATION', ('dsapp', ))
# ]
def __init__(self):
from schedule import schedule_builder
conf = schedule_builder.ScheduleConfigurationsProduction()
indices = conf.get_all_schedule_ids()
ReallocationPlacementInteractionCB.__factors.append(('SCHEDULE_ID', indices))
def fill_static_params(self, run, treatment_params_map):
# Determine schedule domain size set and set it
schedule = treatment_params_map['SCHEDULE_ID']
from schedule import schedule_builder
schedule = schedule_builder.load_schedule(schedule)
return ['-DOMAIN_SIZES_SET', str(schedule.get_domain_size_set())]
def get_script_name(self):
return 'src/balancer/simulation.py'
def get_factors(self):
return ReallocationPlacementInteractionCB.__factors
class PlacementHeuristicSensitivityCB(CommandBuilder):
'''
Used to compare differnet placement strategies (heuristics) for a set of 2k full
factorial designed schedule configurations and scheudle instances in a MKII
environment.
The goal is to find out which schedule configuration factors or/and
if the placement strategy have a significant impact on the competitive value
calculated by (heuristic peek server demand / optimal peek server demand).
'''
__factors = [
('STRATEGY_PLACEMENT', ('firstFit', 'bestFit', 'dotProduct', 'nextFit', 'harmonic')),
]
def __init__(self):
from schedule import schedule_builder
conf = schedule_builder.ScheduleConfigurationsSensitivity()
indices = conf.get_all_schedule_ids()
PlacementHeuristicSensitivityCB.__factors.append(('SCHEDULE_ID', indices))
def get_script_name(self):
return 'src/balancer/simulation.py'
def fill_static_params(self, run, treatment_params_map):
# Determine schedule domain size set and set it
schedule = treatment_params_map['SCHEDULE_ID']
from schedule import schedule_builder
schedule = schedule_builder.load_schedule(schedule)
return ['-DOMAIN_SIZES_SET', str(schedule.get_domain_size_set())]
def get_factors(self):
return PlacementHeuristicSensitivityCB.__factors
class PlacementOptimalCB(CommandBuilder):
'''
Calculates the optimal peek server demand for all schedule instances
of schedule configurations used to run MKII experiments.
'''
__factors = []
def __init__(self):
ids = []
ids.extend(range(2000, 2005))
ids.extend(range(2100, 2105))
ids.extend(range(2200, 2205))
ids.extend(range(2300, 2305))
PlacementOptimalCB.__factors.append(('SCHEDULE_ID', ids))
def get_script_name(self):
return 'src/ipmodels/align_schedule.py'
def fill_static_params(self, run, treatment_params_map):
# Determine schedule domain size set and set it
schedule = treatment_params_map['SCHEDULE_ID']
from schedule import schedule_builder
schedule = schedule_builder.load_schedule(schedule)
return ['-DOMAIN_SIZES_SET', str(schedule.get_domain_size_set())]
def get_factors(self):
return PlacementOptimalCB.__factors
class PlacementHeuristicCB(CommandBuilder):
'''
Simulates the peek server demand of placement heuristics for all schedule instances
of schedule configurations used to run MKII experiments.
'''
__factors = [
('STRATEGY_PLACEMENT', ('firstFit', 'bestFit', 'dotProduct'))
]
def __init__(self):
ids = []
ids.extend(range(2000, 2005))
ids.extend(range(2100, 2105))
ids.extend(range(2200, 2205))
ids.extend(range(2300, 2305))
PlacementHeuristicCB.__factors.append(('SCHEDULE_ID', ids))
def get_script_name(self):
return 'src/balancer/simulation.py'
def fill_static_params(self, run, treatment_params_map):
# Determine schedule domain size set and set it
schedule = treatment_params_map['SCHEDULE_ID']
from schedule import schedule_builder
schedule = schedule_builder.load_schedule(schedule)
return ['-DOMAIN_SIZES_SET', str(schedule.get_domain_size_set())]
def get_factors(self):
return PlacementHeuristicCB.__factors
def spawn(command):
return subprocess.call(command)
def clear_file(filename, extension):
try:
filename = configuration.path(filename, extension)
print 'Clearing output file: %s' % (filename)
with open(filename):
pass
import shutil
print 'Creating backup of current results file'
shutil.copyfile(filename, '%s.bak' % filename)
print 'Removing existing results file'
import os
os.remove(filename)
except:
pass
if __name__ == '__main__':
# Read the filename from the command line arguments
filename = 'sloop'
if len(sys.argv) != 2:
| |
<reponame>GuiMarion/DeepJazz
import os
import pickle
import numpy as np
from music21 import corpus, converter, stream, note, duration, interval, instrument
from music21.analysis.floatingKey import FloatingKeyException
from tqdm import tqdm
from .Chords import *
NUM_VOICES = 3
SUBDIVISION = 4 # quarter note subdivision
BEAT_SIZE = 4
SOP = 0
BASS = 1
OCTAVE = 12
PACKAGE_DIR = os.path.realpath(os.path.dirname(__file__))
voice_ids_default = list(range(NUM_VOICES)) # soprano, alto, tenor, bass
SUBDIVISION = 4 # quarter note subdivision
SLUR_SYMBOL = '__'
START_SYMBOL = 'START'
END_SYMBOL = 'END'
def roots_to_input(song, length, index2note, note2index):
Output = []
chords = getChords(song)
roots = [chord.root for chord in chords]
colors = [chord.color for chord in chords]
# We add root of chords to index2note and note2index
for root in roots:
if root not in index2note[1].values():
new_index = len(index2note[1])
index2note[1].update({new_index: root})
note2index[1].update({root: new_index})
for chord in chords:
Output.append(note2index[1][chord.root])
for i in range(int(chord.duration*4 - 1)):
Output.append(note2index[1][SLUR_SYMBOL])
return Output
def colors_to_input(song, length, index2note, note2index):
Output = []
chords = getChords(song)
roots = [chord.root for chord in chords]
colors = [chord.color for chord in chords]
# We add color of chords to index2note and note2index
for color in colors:
if color not in index2note[2].values():
new_index = len(index2note[2])
index2note[2].update({new_index: color})
note2index[2].update({color: new_index})
for chord in chords:
Output.append(note2index[2][chord.color])
for i in range(int(chord.duration*4 - 1)):
Output.append(note2index[2][SLUR_SYMBOL])
return Output
def chorale_to_inputs(chorale, voice_ids, index2notes, note2indexes):
"""
:param chorale: music21 chorale
:param voice_ids:
:param index2notes:
:param note2indexes:
:return: (num_voices, time) matrix of indexes
"""
# we cannot assume all parts have the same length
length = int(chorale.duration.quarterLength * SUBDIVISION) # in 16th notes
inputs = []
# This is to separate chords and melody
instrument.partitionByInstrument(chorale)
# we feed input with the melody first
inputs.append(part_to_inputs(chorale.parts[1], length, index2notes[0], note2indexes[0]))
# We feed input with chords
inputs.append(roots_to_input(chorale, length, index2notes, note2indexes))
inputs.append(colors_to_input(chorale, length, index2notes, note2indexes))
output = np.array(inputs)
assert len(output.shape) == 2
return output
def standard_note(note_or_rest_string):
if note_or_rest_string == 'rest':
return note.Rest()
# treat other additional symbols as rests
if note_or_rest_string == START_SYMBOL or note_or_rest_string == END_SYMBOL:
return note.Rest()
if note_or_rest_string == SLUR_SYMBOL:
print('Warning: SLUR_SYMBOL used in standard_note')
return note.Rest()
else:
return note.Note(note_or_rest_string)
def _min_max_midi_pitch(note_strings):
"""
:param note_strings:
:return:
"""
all_notes = list(map(lambda note_string: standard_note(note_string),
note_strings))
min_pitch = min(list(
map(lambda n: n.pitch.midi if n.isNote else 128,
all_notes
)
)
)
max_pitch = max(list(
map(lambda n: n.pitch.midi if n.isNote else 0,
all_notes
)
)
)
return min_pitch, max_pitch
def standard_name(note_or_rest):
if isinstance(note_or_rest, note.Note):
return note_or_rest.nameWithOctave
if isinstance(note_or_rest, note.Rest):
return note_or_rest.name
if isinstance(note_or_rest, str):
return note_or_rest
def create_index_dicts(chorale_list, voice_ids=voice_ids_default):
"""
Returns two lists (index2notes, note2indexes) of size num_voices containing dictionaries
:param chorale_list:
:param voice_ids:
:param min_pitches:
:param max_pitches:
:return:
"""
# store all faisible notes
notes = ['A','B','C','D','E','F','G','A-','B-',\
'C-','D-','E-','F-','G-', 'A#','B#','C#','D#','E#','F#','G#']
voice_ranges = []
# We fill the voicerange of the melody with the exact teissiture of our dataset
MelodyRange = []
MelodyRange.append('rest')
MelodyRange.append(SLUR_SYMBOL)
MelodyRange.append(START_SYMBOL)
MelodyRange.append(END_SYMBOL)
r = getRange(chorale_list)
for i in range(int(r[0][-1]), int(r[1][-1])+1):
for elem in notes:
MelodyRange.append(elem+str(i))
voice_ranges.append(MelodyRange)
# # We fill the voicerange of the chords
chordsRootRange = []
chordsRootRange0 = ['A','B','C','D','E','F','G','A-','B-',\
'C-','D-','E-','F-','G_', 'A#','B#','C#','D#','E#','F#','G#']
chordsRootRange.append('rest')
chordsRootRange.append(SLUR_SYMBOL)
chordsRootRange.append(START_SYMBOL)
chordsRootRange.append(END_SYMBOL)
for elem in chordsRootRange0:
chordsRootRange.append(elem)
voice_ranges.append(chordsRootRange)
# # We fill the last voice containing the color of the chords
chordsColorRange = []
chordsColorRange0 = ['maj','min', "min#5", 'dim', '+', 'maj7',"min(maj7)", 'min7', '7',"7sus4", "7b5", "7+", 'dim7', 'm7b5', '9', 'm9',\
"min6", '6', 'maj9', "7b9", "7b5b9", "9", "sus49", "#59", "7#5b9", "#5#9", "7#9", "713", "7b5#9", "min11", '11', "7alt", "69", "min69", \
"9#11", "7#11", "7sus", "7sus43", '13']
chordsColorRange.append('rest')
chordsColorRange.append(SLUR_SYMBOL)
chordsColorRange.append(START_SYMBOL)
chordsColorRange.append(END_SYMBOL)
for elem in chordsColorRange0:
chordsColorRange.append(elem)
voice_ranges.append(chordsColorRange)
# create tables
index2notes = []
note2indexes = []
for voice_index, _ in enumerate(voice_ids):
l = list(voice_ranges[voice_index])
index2note = {}
note2index = {}
for k, n in enumerate(l):
index2note.update({k: n})
note2index.update({n: k})
index2notes.append(index2note)
note2indexes.append(note2index)
# Print indexes
#for elem in index2notes:
# print(elem)
return index2notes, note2indexes
def make_dataset(chorale_list, dataset_name, voice_ids=voice_ids_default,
transpose=False, metadatas=None):
X = []
X_metadatas = []
index2notes, note2indexes = create_index_dicts(chorale_list,
voice_ids=voice_ids)
for chorale_file in tqdm(chorale_list):
try:
chorale = converter.parse(chorale_file)
inputs = chorale_to_inputs(chorale, voice_ids=voice_ids, index2notes=index2notes, note2indexes=note2indexes)
X.append(inputs)
md = []
if metadatas:
for metadata in metadatas:
# todo add this
if metadata.is_global:
pass
else:
md.append(metadata.evaluate(chorale))
X_metadatas.append(md)
except (AssertionError, AttributeError):
print(chorale_file)
pass
dataset = (X, X_metadatas, voice_ids, index2notes, note2indexes, metadatas)
#metadatas[0].num_values = 16
#print("PDD",metadatas[0].generate(32))
pickle.dump(dataset, open(dataset_name, 'wb'), pickle.HIGHEST_PROTOCOL)
print(str(len(X)) + ' files written in ' + dataset_name)
def compute_min_max_pitches(file_list, voices=[0]):
"""
Removes wrong chorales
:param file_list:
:type voices: list containing voices ids
:returns: two lists min_p, max_p containing min and max pitches for each voice
"""
min_p, max_p = [128] * len(voices), [0] * len(voices)
to_remove = []
for file_name in file_list:
choral = converter.parse(file_name)
for k, voice_id in enumerate(voices):
try:
c = choral.parts[voice_id] # Retain only voice_id voice
l = list(map(lambda n: n.pitch.midi, c.flat.notes))
min_p[k] = min(min_p[k], min(l))
max_p[k] = max(max_p[k], max(l))
except AttributeError:
to_remove.append(file_name)
for file_name in set(to_remove):
file_list.remove(file_name)
return np.array(min_p), np.array(max_p)
def filter_file_list(file_list, num_voices=3):
"""
Only retain num_voices voices chorales
"""
l = []
for k, file_name in enumerate(file_list):
c = converter.parse(file_name)
print(k, file_name, " ", len(c.parts))
if len(c.parts) == num_voices:
l.append(file_name)
return l
def pickled_dataset_path(dataset_dir):
# last non-empty part is the dataset name
dataset_name = [el for el in dataset_dir.split('/') if el][-1]
return os.path.join(PACKAGE_DIR,
'DataRepresentations',
dataset_name + '.pickle')
def part_to_inputs(part, length, index2note, note2index):
"""
Can modify note2index and index2note!
:param part:
:param note2index:
:param index2note:
:return:
"""
list_notes = part.flat.notes
list_note_strings = [n.nameWithOctave for n in list_notes]
# add entries to dictionaries if not present should not be called, just here for debug
for note_name in list_note_strings:
if note_name not in index2note.values():
print("___________")
print("Illegaly adding entries to indexes, should never append,\ncheck create_index_dicts function. It should be missing tis note : ")
print(note_name)
print("___________")
print()
new_index = len(index2note)
index2note.update({new_index: note_name})
note2index.update({note_name: new_index})
j = 0
i = 0
t = np.zeros((length, 2))
is_articulated = True
list_notes_and_rests = part.flat.notesAndRests
num_notes = len(list_notes_and_rests)
while i < length:
if j < num_notes - 1:
if list_notes_and_rests[j + 1].offset > i / SUBDIVISION:
t[i, :] = [note2index[standard_name(list_notes_and_rests[j])],
is_articulated]
i += 1
is_articulated = False
else:
j += 1
is_articulated = True
else:
t[i, :] = [note2index[standard_name(list_notes_and_rests[j])],
is_articulated]
i += 1
is_articulated = False
return list(map(lambda pa: pa[0] if pa[1] else note2index[SLUR_SYMBOL], t))
def initialization(dataset_path=None, metadatas=None,
voice_ids=voice_ids_default):
from glob import glob
print('Creating dataset')
if dataset_path:
# We only keep files that are 2 voices (that's how we defined out dataset)
# For now we are only able to deal with midi files
fileList = filter_file_list(
glob(dataset_path + '/*.mid'),
num_voices=2)
pickled_dataset = pickled_dataset_path(dataset_path)
print(pickled_dataset)
# remove wrong chorales for teissiture reasons :
# TODO : Change this function in order to fit the best to the teissiture of Jazz songs
#min_pitches, max_pitches = compute_min_max_pitches(fileList,
# voices=voice_ids)
make_dataset(fileList, pickled_dataset,
voice_ids=voice_ids,
transpose=True,
metadatas=metadatas)
# This function help you to print a graphical representation of the data, for our project
# we use only two voices : the melody and the chords. So if you want to see if the process
# has been well executed you should use this one.
# It will print each bars on one line with the two voices whitout newline.
# If you want to print each voices separetly use PrintRepresentationSeparetly
def PrintRepresentation(pickled_dataset):
print()
print("Here is a graphical representation of the data")
print()
pickled_dataset = PACKAGE_DIR+"/DataRepresentations/" + pickled_dataset + ".pickle"
X, X_metadatas, voice_ids, index2notes, note2indexes, metadatas = pickle.load(
open(pickled_dataset, 'rb'))
#print("X : ")
#print(X)
#print()
#print("X_metadatas : ")
#print(X_metadatas)
#print()
#print("Voice_ids : ")
#print(voice_ids)
#print()
#print("index2notes : ")
#print(index2notes)
#print()
#print("note2indexes : ")
#print(note2indexes)
#print()
#print("metadatas : ")
#print(metadatas)
print()
print("This is One Song")
print()
for elem in X:
Compt = 0
for i in range(len(elem[0])):
if Compt == 16:
Compt = 0
print()
for e in range(i-16, i):
print(index2notes[1][int(elem[1][e])], end="")
print()
for e in range(i-16, i):
print(index2notes[2][int(elem[2][e])], end="")
print()
print()
print(index2notes[0][int(elem[0][i])].replace("-","b"), end="")
Compt+=1
print()
for e in range(len(elem[0])-16, len(elem[0])):
print(index2notes[1][int(elem[1][e])], end="")
print()
for e in range(len(elem[0])-16, len(elem[0])):
print(index2notes[2][int(elem[2][e])], end="")
print()
if (elem is not X[-1]):
print()
print("Another One")
print()
def PrintRepresentationSeparetly(pickled_dataset):
print()
print("Here is a graphical representation of the data")
print()
pickled_dataset = PACKAGE_DIR+"/DataRepresentations/" + pickled_dataset + ".pickle"
X, X_metadatas, voice_ids, index2notes, note2indexes, metadatas = pickle.load(
| |
1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
gridcolor
Sets the color of the grid lines.
gridwidth
Sets the width (in px) of the grid lines.
hoverformat
Sets the hover text formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
mirror
Determines if the axis lines or/and ticks are mirrored
to the opposite side of the plotting area. If True, the
axis lines are mirrored. If "ticks", the axis lines and
ticks are mirrored. If False, mirroring is disable. If
"all", axis lines are mirrored on all shared-axes
subplots. If "allticks", axis lines and ticks are
mirrored on all shared-axes subplots.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
range
Sets the range of this axis. If the axis `type` is
"log", then you must take the log of your desired range
(e.g. to set the range from 1 to 100, set the range
from 0 to 2). If the axis `type` is "date", it should
be date strings, like date data, though Date objects
and unix milliseconds will be accepted and converted to
strings. If the axis `type` is "category", it should be
numbers, using the scale where each category is
assigned a serial number from zero in the order it
appears.
rangemode
If "normal", the range is computed in relation to the
extrema of the input data. If *tozero*`, the range
extends to 0, regardless of the input data If
"nonnegative", the range is non-negative, regardless of
the input data. Applies only to linear axes.
separatethousands
If "true", even 4-digit integers are separated
showaxeslabels
Sets whether or not this axis is labeled
showbackground
Sets whether or not this axis' wall has a background
color.
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showgrid
Determines whether or not grid lines are drawn. If
True, the grid lines are drawn at every tick mark.
showline
Determines whether or not a line bounding this axis is
drawn.
showspikes
Sets whether or not spikes starting from data points to
this axis' wall are shown on hover.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
spikecolor
Sets the color of the spikes.
spikesides
Sets whether or not spikes extending from the
projection data points to this axis' wall boundaries
are shown on hover.
spikethickness
Sets the thickness (in px) of the spikes.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.layout.scene.ya
xis.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.layout.scen
e.yaxis.tickformatstopdefaults), sets the default
property values to use for elements of
layout.scene.yaxis.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.layout.scene.yaxis.Title`
instance or dict with compatible properties
titlefont
Deprecated: Please use layout.scene.yaxis.title.font
instead. Sets this axis' title font. Note that the
title's font used to be customized by the now
deprecated `titlefont` attribute.
type
Sets the axis type. By default, plotly attempts to
determined the axis type by looking into the data of
the traces that referenced the axis in question.
visible
A single toggle to hide the axis while preserving
interaction like dragging. Default is true when a
cheater plot is present on the axis, otherwise false
zeroline
Determines whether or not a line is drawn at along the
0 value of this axis. If True, the zero line is drawn
on top of the grid lines.
zerolinecolor
Sets the line color of the zero line.
zerolinewidth
Sets the width (in px) of the zero line.
Returns
-------
YAxis
"""
super(YAxis, self).__init__("yaxis")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.scene.YAxis
constructor must be | |
noqa: E501
"""Get contacts in a list # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_contacts_from_list_with_http_info(list_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: Id of the list (required)
:param datetime modified_since: Filter (urlencoded) the contacts modified after a given UTC date-time (YYYY-MM-DDTHH:mm:ss.SSSZ). Prefer to pass your timezone in date-time format for accurate result.
:param int limit: Number of documents per page
:param int offset: Index of the first document of the page
:return: GetContacts
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'modified_since', 'limit', 'offset'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_contacts_from_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling `get_contacts_from_list`") # noqa: E501
if 'limit' in params and params['limit'] > 500: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_contacts_from_list`, must be a value less than or equal to `500`") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['listId'] = params['list_id'] # noqa: E501
query_params = []
if 'modified_since' in params:
query_params.append(('modifiedSince', params['modified_since'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api-key', 'partner-key'] # noqa: E501
return self.api_client.call_api(
'/contacts/lists/{listId}/contacts', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetContacts', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_folder(self, folder_id, **kwargs): # noqa: E501
"""Returns a folder's details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_folder(folder_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int folder_id: id of the folder (required)
:return: GetFolder
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_folder_with_http_info(folder_id, **kwargs) # noqa: E501
else:
(data) = self.get_folder_with_http_info(folder_id, **kwargs) # noqa: E501
return data
def get_folder_with_http_info(self, folder_id, **kwargs): # noqa: E501
"""Returns a folder's details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_folder_with_http_info(folder_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int folder_id: id of the folder (required)
:return: GetFolder
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['folder_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_folder" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'folder_id' is set
if ('folder_id' not in params or
params['folder_id'] is None):
raise ValueError("Missing the required parameter `folder_id` when calling `get_folder`") # noqa: E501
collection_formats = {}
path_params = {}
if 'folder_id' in params:
path_params['folderId'] = params['folder_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api-key', 'partner-key'] # noqa: E501
return self.api_client.call_api(
'/contacts/folders/{folderId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetFolder', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_folder_lists(self, folder_id, **kwargs): # noqa: E501
"""Get lists in a folder # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_folder_lists(folder_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int folder_id: Id of the folder (required)
:param int limit: Number of documents per page
:param int offset: Index of the first document of the page
:return: GetFolderLists
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_folder_lists_with_http_info(folder_id, **kwargs) # noqa: E501
else:
(data) = self.get_folder_lists_with_http_info(folder_id, **kwargs) # noqa: E501
return data
def get_folder_lists_with_http_info(self, folder_id, **kwargs): # noqa: E501
"""Get lists in a folder # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_folder_lists_with_http_info(folder_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int folder_id: Id of the folder (required)
:param int limit: Number of documents per page
:param int offset: Index of the first document of the page
:return: GetFolderLists
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['folder_id', 'limit', 'offset'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_folder_lists" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'folder_id' is set
if ('folder_id' not in params or
params['folder_id'] is None):
raise ValueError("Missing the required parameter `folder_id` when calling `get_folder_lists`") # noqa: E501
if 'limit' in params and params['limit'] > 50: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_folder_lists`, must be a value less than or equal to `50`") # noqa: E501
collection_formats = {}
path_params = {}
if 'folder_id' in params:
path_params['folderId'] = params['folder_id'] # noqa: E501
query_params = []
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api-key', 'partner-key'] # noqa: E501
return self.api_client.call_api(
'/contacts/folders/{folderId}/lists', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetFolderLists', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_folders(self, limit, offset, **kwargs): # noqa: E501
"""Get all folders # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_folders(limit, offset, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int limit: Number of documents per page (required)
:param int offset: Index of the first document of the page (required)
:return: GetFolders
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_folders_with_http_info(limit, offset, **kwargs) # noqa: E501
else:
(data) = self.get_folders_with_http_info(limit, offset, **kwargs) # noqa: E501
return data
def get_folders_with_http_info(self, limit, offset, **kwargs): # noqa: E501
"""Get all folders # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_folders_with_http_info(limit, offset, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int limit: Number of documents per page (required)
:param int offset: Index of the first document of the page (required)
:return: GetFolders
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['limit', 'offset'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_folders" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'limit' is set
if ('limit' not in params or
params['limit'] | |
import numpy as np
from .base import OdeSolver, DenseOutput
from .common import (validate_max_step, validate_tol, select_initial_step,
norm, warn_extraneous, validate_first_step)
from . import dop853_coefficients
# Multiply steps computed from asymptotic behaviour of errors by this.
SAFETY = 0.9
MIN_FACTOR = 0.2 # Minimum allowed decrease in a step size.
MAX_FACTOR = 10 # Maximum allowed increase in a step size.
def rk_step(fun, t, y, f, h, A, B, C, K):
"""Perform a single Runge-Kutta step.
This function computes a prediction of an explicit Runge-Kutta method and
also estimates the error of a less accurate method.
Notation for Butcher tableau is as in [1]_.
Parameters
----------
fun : callable
Right-hand side of the system.
t : float
Current time.
y : ndarray, shape (n,)
Current state.
f : ndarray, shape (n,)
Current value of the derivative, i.e., ``fun(x, y)``.
h : float
Step to use.
A : ndarray, shape (n_stages, n_stages)
Coefficients for combining previous RK stages to compute the next
stage. For explicit methods the coefficients at and above the main
diagonal are zeros.
B : ndarray, shape (n_stages,)
Coefficients for combining RK stages for computing the final
prediction.
C : ndarray, shape (n_stages,)
Coefficients for incrementing time for consecutive RK stages.
The value for the first stage is always zero.
K : ndarray, shape (n_stages + 1, n)
Storage array for putting RK stages here. Stages are stored in rows.
The last row is a linear combination of the previous rows with
coefficients
Returns
-------
y_new : ndarray, shape (n,)
Solution at t + h computed with a higher accuracy.
f_new : ndarray, shape (n,)
Derivative ``fun(t + h, y_new)``.
References
----------
.. [1] <NAME>, <NAME>, "Solving Ordinary Differential
Equations I: Nonstiff Problems", Sec. II.4.
"""
K[0] = f
for s, (a, c) in enumerate(zip(A[1:], C[1:]), start=1):
dy = np.dot(K[:s].T, a[:s]) * h
K[s] = fun(t + c * h, y + dy)
y_new = y + h * np.dot(K[:-1].T, B)
f_new = fun(t + h, y_new)
K[-1] = f_new
return y_new, f_new
class RungeKutta(OdeSolver):
"""Base class for explicit Runge-Kutta methods."""
C = NotImplemented
A = NotImplemented
B = NotImplemented
E = NotImplemented
P = NotImplemented
order = NotImplemented
error_estimator_order = NotImplemented
n_stages = NotImplemented
def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
rtol=1e-3, atol=1e-6, vectorized=False,
first_step=None, **extraneous):
warn_extraneous(extraneous)
super(RungeKutta, self).__init__(fun, t0, y0, t_bound, vectorized,
support_complex=True)
self.y_old = None
self.max_step = validate_max_step(max_step)
self.rtol, self.atol = validate_tol(rtol, atol, self.n)
self.f = self.fun(self.t, self.y)
if first_step is None:
self.h_abs = select_initial_step(
self.fun, self.t, self.y, self.f, self.direction,
self.error_estimator_order, self.rtol, self.atol)
else:
self.h_abs = validate_first_step(first_step, t0, t_bound)
self.K = np.empty((self.n_stages + 1, self.n), dtype=self.y.dtype)
self.error_exponent = -1 / (self.error_estimator_order + 1)
self.h_previous = None
def _estimate_error(self, K, h):
return np.dot(K.T, self.E) * h
def _estimate_error_norm(self, K, h, scale):
return norm(self._estimate_error(K, h) / scale)
def _step_impl(self):
t = self.t
y = self.y
max_step = self.max_step
rtol = self.rtol
atol = self.atol
min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
if self.h_abs > max_step:
h_abs = max_step
elif self.h_abs < min_step:
h_abs = min_step
else:
h_abs = self.h_abs
step_accepted = False
step_rejected = False
while not step_accepted:
if h_abs < min_step:
return False, self.TOO_SMALL_STEP
h = h_abs * self.direction
t_new = t + h
if self.direction * (t_new - self.t_bound) > 0:
t_new = self.t_bound
h = t_new - t
h_abs = np.abs(h)
y_new, f_new = rk_step(self.fun, t, y, self.f, h, self.A,
self.B, self.C, self.K)
scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol
error_norm = self._estimate_error_norm(self.K, h, scale)
if error_norm < 1:
if error_norm == 0:
factor = MAX_FACTOR
else:
factor = min(MAX_FACTOR,
SAFETY * error_norm ** self.error_exponent)
if step_rejected:
factor = min(1, factor)
h_abs *= factor
step_accepted = True
else:
h_abs *= max(MIN_FACTOR,
SAFETY * error_norm ** self.error_exponent)
step_rejected = True
self.h_previous = h
self.y_old = y
self.t = t_new
self.y = y_new
self.h_abs = h_abs
self.f = f_new
return True, None
def _dense_output_impl(self):
Q = self.K.T.dot(self.P)
return RkDenseOutput(self.t_old, self.t, self.y_old, Q)
class RK23(RungeKutta):
"""Explicit Runge-Kutta method of order 3(2).
This uses the Bogacki-Shampine pair of formulas [1]_. The error is controlled
assuming accuracy of the second-order method, but steps are taken using the
third-order accurate formula (local extrapolation is done). A cubic Hermite
polynomial is used for the dense output.
Can be applied in the complex domain.
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here ``t`` is a scalar and there are two options for ndarray ``y``.
It can either have shape (n,), then ``fun`` must return array_like with
shape (n,). Or alternatively it can have shape (n, k), then ``fun``
must return array_like with shape (n, k), i.e. each column
corresponds to a single column in ``y``. The choice between the two
options is determined by `vectorized` argument (see below).
t0 : float
Initial time.
y0 : array_like, shape (n,)
Initial state.
t_bound : float
Boundary time - the integration won't continue beyond it. It also
determines the direction of the integration.
first_step : float or None, optional
Initial step size. Default is ``None`` which means that the algorithm
should choose.
max_step : float, optional
Maximum allowed step size. Default is np.inf, i.e., the step size is not
bounded and determined solely by the solver.
rtol, atol : float and array_like, optional
Relative and absolute tolerances. The solver keeps the local error
estimates less than ``atol + rtol * abs(y)``. Here, `rtol` controls a
relative accuracy (number of correct digits). But if a component of `y`
is approximately below `atol`, the error only needs to fall within
the same `atol` threshold, and the number of correct digits is not
guaranteed. If components of y have different scales, it might be
beneficial to set different `atol` values for different components by
passing array_like with shape (n,) for `atol`. Default values are
1e-3 for `rtol` and 1e-6 for `atol`.
vectorized : bool, optional
Whether `fun` is implemented in a vectorized fashion. Default is False.
Attributes
----------
n : int
Number of equations.
status : string
Current status of the solver: 'running', 'finished' or 'failed'.
t_bound : float
Boundary time.
direction : float
Integration direction: +1 or -1.
t : float
Current time.
y : ndarray
Current state.
t_old : float
Previous time. None if no steps were made yet.
step_size : float
Size of the last successful step. None if no steps were made yet.
nfev : int
Number evaluations of the system's right-hand side.
njev : int
Number of evaluations of the Jacobian. Is always 0 for this solver as it does not use the Jacobian.
nlu : int
Number of LU decompositions. Is always 0 for this solver.
References
----------
.. [1] <NAME>, <NAME>, "A 3(2) Pair of Runge-Kutta Formulas",
Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989.
"""
order = 3
error_estimator_order = 2
n_stages = 3
C = np.array([0, 1/2, 3/4])
A = np.array([
[0, 0, 0],
[1/2, 0, 0],
[0, 3/4, 0]
])
B = np.array([2/9, 1/3, 4/9])
E = np.array([5/72, -1/12, -1/9, 1/8])
P = np.array([[1, -4 / 3, 5 / 9],
[0, 1, -2/3],
[0, 4/3, -8/9],
[0, -1, 1]])
class RK45(RungeKutta):
"""Explicit Runge-Kutta method of order 5(4).
This uses the Dormand-Prince pair of formulas [1]_. The error is controlled
assuming accuracy of the fourth-order method accuracy, but steps are taken
using the fifth-order accurate formula (local extrapolation is done).
A quartic interpolation polynomial is used for the dense output [2]_.
Can be applied in the complex domain.
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here ``t`` is a scalar, and there are two options for the ndarray ``y``:
It can either have shape (n,); then ``fun`` must return array_like with
shape (n,). Alternatively it can have shape (n, k); then ``fun``
must return an array_like with shape (n, | |
= "Operating System :: Microsoft"
MSDOS = "Operating System :: Microsoft :: MS-DOS"
class Windows(object):
Windows = "Operating System :: Microsoft :: Windows"
Windows_10 = "Operating System :: Microsoft :: Windows :: Windows 10"
Windows_3_1_or_Earlier = "Operating System :: Microsoft :: Windows :: Windows 3.1 or Earlier"
Windows_7 = "Operating System :: Microsoft :: Windows :: Windows 7"
Windows_8 = "Operating System :: Microsoft :: Windows :: Windows 8"
Windows_8_1 = "Operating System :: Microsoft :: Windows :: Windows 8.1"
Windows_95_98_2000 = "Operating System :: Microsoft :: Windows :: Windows 95/98/2000"
Windows_CE = "Operating System :: Microsoft :: Windows :: Windows CE"
Windows_NT_2000 = "Operating System :: Microsoft :: Windows :: Windows NT/2000"
Windows_Server_2003 = "Operating System :: Microsoft :: Windows :: Windows Server 2003"
Windows_Server_2008 = "Operating System :: Microsoft :: Windows :: Windows Server 2008"
Windows_Vista = "Operating System :: Microsoft :: Windows :: Windows Vista"
Windows_XP = "Operating System :: Microsoft :: Windows :: Windows XP"
OS_Independent = "Operating System :: OS Independent"
OS_2 = "Operating System :: OS/2"
Other_OS = "Operating System :: Other OS"
PDA_Systems = "Operating System :: PDA Systems"
class POSIX(object):
POSIX = "Operating System :: POSIX"
AIX = "Operating System :: POSIX :: AIX"
class BSD(object):
BSD = "Operating System :: POSIX :: BSD"
BSD_OS = "Operating System :: POSIX :: BSD :: BSD/OS"
FreeBSD = "Operating System :: POSIX :: BSD :: FreeBSD"
NetBSD = "Operating System :: POSIX :: BSD :: NetBSD"
OpenBSD = "Operating System :: POSIX :: BSD :: OpenBSD"
GNU_Hurd = "Operating System :: POSIX :: GNU Hurd"
HPUX = "Operating System :: POSIX :: HP-UX"
IRIX = "Operating System :: POSIX :: IRIX"
Linux = "Operating System :: POSIX :: Linux"
Other = "Operating System :: POSIX :: Other"
SCO = "Operating System :: POSIX :: SCO"
SunOS_Solaris = "Operating System :: POSIX :: SunOS/Solaris"
PalmOS = "Operating System :: PalmOS"
RISC_OS = "Operating System :: RISC OS"
Unix = "Operating System :: Unix"
iOS = "Operating System :: iOS"
class Programming_Language(object):
APL = "Programming Language :: APL"
ASP = "Programming Language :: ASP"
Ada = "Programming Language :: Ada"
Assembly = "Programming Language :: Assembly"
Awk = "Programming Language :: Awk"
Basic = "Programming Language :: Basic"
C = "Programming Language :: C"
C_sharp = "Programming Language :: C#"
C_plus_plus = "Programming Language :: C++"
Cold_Fusion = "Programming Language :: Cold Fusion"
Cython = "Programming Language :: Cython"
Delphi_Kylix = "Programming Language :: Delphi/Kylix"
Dylan = "Programming Language :: Dylan"
Eiffel = "Programming Language :: Eiffel"
EmacsLisp = "Programming Language :: Emacs-Lisp"
Erlang = "Programming Language :: Erlang"
Euler = "Programming Language :: Euler"
Euphoria = "Programming Language :: Euphoria"
F_sharp = "Programming Language :: F#"
Forth = "Programming Language :: Forth"
Fortran = "Programming Language :: Fortran"
Haskell = "Programming Language :: Haskell"
Java = "Programming Language :: Java"
JavaScript = "Programming Language :: JavaScript"
Kotlin = "Programming Language :: Kotlin"
Lisp = "Programming Language :: Lisp"
Logo = "Programming Language :: Logo"
ML = "Programming Language :: ML"
Modula = "Programming Language :: Modula"
OCaml = "Programming Language :: OCaml"
Object_Pascal = "Programming Language :: Object Pascal"
Objective_C = "Programming Language :: Objective C"
Other = "Programming Language :: Other"
Other_Scripting_Engines = "Programming Language :: Other Scripting Engines"
PHP = "Programming Language :: PHP"
PL_SQL = "Programming Language :: PL/SQL"
PROGRESS = "Programming Language :: PROGRESS"
Pascal = "Programming Language :: Pascal"
Perl = "Programming Language :: Perl"
Pike = "Programming Language :: Pike"
Pliant = "Programming Language :: Pliant"
Prolog = "Programming Language :: Prolog"
class Python(object):
Python = "Programming Language :: Python"
class Two(object):
Two = "Programming Language :: Python :: 3.2"
Only = "Programming Language :: Python :: 2 :: Only"
class Three(object):
Three = "Programming Language :: Python :: 3.3"
Only = "Programming Language :: Python :: 3 :: Only"
Four = "Programming Language :: Python :: 3.4"
Five = "Programming Language :: Python :: 3.5"
Six = "Programming Language :: Python :: 3.6"
Seven = "Programming Language :: Python :: 3.7"
Zero = "Programming Language :: Python :: 3.0"
One = "Programming Language :: Python :: 3.1"
Ten = "Programming Language :: Python :: 3.10"
Eight = "Programming Language :: Python :: 3.8"
Nine = "Programming Language :: Python :: 3.9"
class Implementation(object):
Implementation = "Programming Language :: Python :: Implementation"
CPython = "Programming Language :: Python :: Implementation :: CPython"
IronPython = "Programming Language :: Python :: Implementation :: IronPython"
Jython = "Programming Language :: Python :: Implementation :: Jython"
MicroPython = "Programming Language :: Python :: Implementation :: MicroPython"
PyPy = "Programming Language :: Python :: Implementation :: PyPy"
Stackless = "Programming Language :: Python :: Implementation :: Stackless"
R = "Programming Language :: R"
REBOL = "Programming Language :: REBOL"
Rexx = "Programming Language :: Rexx"
Ruby = "Programming Language :: Ruby"
Rust = "Programming Language :: Rust"
SQL = "Programming Language :: SQL"
Scheme = "Programming Language :: Scheme"
Simula = "Programming Language :: Simula"
Smalltalk = "Programming Language :: Smalltalk"
Tcl = "Programming Language :: Tcl"
Unix_Shell = "Programming Language :: Unix Shell"
Visual_Basic = "Programming Language :: Visual Basic"
XBasic = "Programming Language :: XBasic"
YACC = "Programming Language :: YACC"
Zope = "Programming Language :: Zope"
class Topic(object):
Adaptive_Technologies = "Topic :: Adaptive Technologies"
Artistic_Software = "Topic :: Artistic Software"
class Communications(object):
Communications = "Topic :: Communications"
BBS = "Topic :: Communications :: BBS"
class Chat(object):
Chat = "Topic :: Communications :: Chat"
ICQ = "Topic :: Communications :: Chat :: ICQ"
Internet_Relay_Chat = "Topic :: Communications :: Chat :: Internet Relay Chat"
Unix_Talk = "Topic :: Communications :: Chat :: Unix Talk"
Conferencing = "Topic :: Communications :: Conferencing"
class Email(object):
Email = "Topic :: Communications :: Email"
Address_Book = "Topic :: Communications :: Email :: Address Book"
Email_Clients_MUA = "Topic :: Communications :: Email :: Email Clients (MUA)"
Filters = "Topic :: Communications :: Email :: Filters"
Mail_Transport_Agents = "Topic :: Communications :: Email :: Mail Transport Agents"
Mailing_List_Servers = "Topic :: Communications :: Email :: Mailing List Servers"
class PostOffice(object):
PostOffice = "Topic :: Communications :: Email :: Post-Office"
IMAP = "Topic :: Communications :: Email :: Post-Office :: IMAP"
POP3 = "Topic :: Communications :: Email :: Post-Office :: POP3"
FIDO = "Topic :: Communications :: FIDO"
Fax = "Topic :: Communications :: Fax"
class File_Sharing(object):
File_Sharing = "Topic :: Communications :: FileIO Sharing"
Gnutella = "Topic :: Communications :: FileIO Sharing :: Gnutella"
Napster = "Topic :: Communications :: FileIO Sharing :: Napster"
Ham_Radio = "Topic :: Communications :: Ham Radio"
Internet_Phone = "Topic :: Communications :: Internet Phone"
Telephony = "Topic :: Communications :: Telephony"
Usenet_News = "Topic :: Communications :: Usenet News"
class Database(object):
Database = "Topic :: Database"
Database_Engines_Servers = "Topic :: Database :: Database Engines/Servers"
FrontEnds = "Topic :: Database :: Front-Ends"
| |
= config.path_star + " --quantMode TranscriptomeSAM GeneCounts --runThreadN " + str(nproc) + " --genomeDir " + path_genome
command = command + " --readFilesIn " + fn + " --outFileNamePrefix " + path_base + folder + "/results_star/" + sample + "_" + gg
if len(star_params) > 0:
command = command + star_params
commands.append(command + sample_checker.replace("#FOLDER", path_base + folder + "/results_star").replace("#SAMPLE", sample))
create_scripts(nchild, commands, path_base, folder, output)
return submit_job_super("star", path_base + folder, wt, str(nproc), q, len(samples), bsub_suffix, nchild, timestamp)
def starfusion(timestamp, path_base, folder, samples, nproc, wt, q, path_star_fusion, star_fusion_params, tg):
output = "results_star-fusion"
secure_mkdir(path_base + folder, output)
print "## Identification of gene fusions with star-fusion"
print "> Writing jobs for Star-Fusion..."
nproc, nchild, bsub_suffix = manager.get_bsub_arg(nproc, len(samples))
commands = list()
ksamp = sortbysize(samples)
for sample in ksamp:
files = samples[sample]
if not tg:
fn = files
else:
g = path_base + folder + "/results_trimgalore/"
suf = ""
if not files[0].split("/")[-1].endswith(".gz"):
suf = ".gz"
fn = [g + files[0].split("/")[-1] + suf, g + files[1].split("/")[-1] + suf]
prefix = path_base + folder + "/results_star-fusion/" + sample
call = config.path_starfusion + " --output_dir " + prefix + " --genome_lib_dir " + path_star_fusion + " --left_fq " + fn[0] + " --right_fq " + fn[1] + " --CPU " + str(nproc)
if len(star_fusion_params) > 0:
call = call + star_fusion_params
commands.append(call + sample_checker.replace("#FOLDER", path_base + folder + "/results_star-fusion").replace("#SAMPLE", sample))
create_scripts(nchild, commands, path_base, folder, output)
return submit_job_super("star-fusion", path_base + folder, wt, str(nproc), q, len(samples), bsub_suffix, nchild, timestamp)
def picardqc(timestamp, path_base, folder, samples, nproc, wt, q, annots, strand):
nstrand = {" --stranded=no":"NONE", " --stranded=yes":"FIRST_READ_TRANSCRIPTION_STRAND", " --stranded=no":"SECOND_READ_TRANSCRIPTION_STRAND"}
output = "results_picard"
secure_mkdir(path_base + folder, output)
print "## Alignment QC Picard-CollectRnaSeqMetrics"
print "> Writing jobs for Picard QC..."
nproc, nchild, bsub_suffix = manager.get_bsub_arg(nproc, len(samples))
commands = list()
ksamp = sortbysize(samples)
proc_files = os.listdir(path_base + folder + "/results_star/")
for sample in ksamp:
in_file = path_base + folder + "/results_star/" + sample + "_Aligned.out.sam"
if sample + "_Aligned.out.sam" in proc_files:
for i in range(len(config.nannots)):
annot = annots[i]
out_file = in_file.replace(".sam", "." + config.nannots[i] + ".qc").replace("results_star/", "results_picard/").replace("_Aligned.out", "")
call = "java -jar " + config.path_picard + "/CollectRnaSeqMetrics.jar REF_FLAT=" + annot + " STRAND_SPECIFICITY=" + nstrand[strand] + " INPUT=" + in_file + " OUTPUT=" + out_file
if i == (len(config.nannots)-1):
commands.append(call + sample_checker.replace("#FOLDER", path_base + folder + "/results_picard").replace("#SAMPLE", sample))
else:
commands.append(call)
else:
print "Warning: [Picard] STAR output file not found -> " + in_file
create_scripts(nchild, commands, path_base, folder, output)
return submit_job_super("picard", path_base + folder, wt, str(nproc), q, len(samples), bsub_suffix, nchild, timestamp)
def htseq(timestamp, path_base, folder, samples, path_annotation, nproc, wt, q, mode, strand, countmode):
output = "results_htseq-" + mode
secure_mkdir(path_base + folder, output)
print "## HTseq-count"
print "> Writing jobs for HTseq-count " + mode + " analysis..."
nproc, nchild, bsub_suffix = manager.get_bsub_arg(nproc, len(samples))
commands = list()
ksamp = sortbysize(samples)
proc_files = os.listdir(path_base + folder + "/results_star/")
for sample in ksamp:
in_file = path_base + folder + "/results_star/" + sample + "_Aligned.out.sam"
if sample + "_Aligned.out.sam" in proc_files:
outputf= path_base + folder + "/results_htseq-" + mode + "/" + sample + ".tab"
if mode == "gene":
ld1 = config.path_htseq + strand + " -m " + countmode + " -q " + in_file + " " + path_annotation
else:
ld1 = config.path_htseq + strand + " -m " + countmode + " -i exon_id -q " + in_file + " " + path_annotation
call = ld1 + " > " + outputf
commands.append(call + sample_checker.replace("#FOLDER", path_base + folder + "/" + output).replace("#SAMPLE", sample))
else:
print "Warning: [HTseq-" + mode + "] STAR output file not found -> " + in_file
create_scripts(nchild, commands, path_base, folder, output)
return submit_job_super("htseq-" + mode, path_base + folder, wt, str(nproc), q, len(samples), bsub_suffix, nchild, timestamp)
def sam2sortbam(timestamp, path_base, folder, samples, nproc, wt, q):
output = "results_sam2sortbam"
secure_mkdir(path_base + folder, output)
print "## SAM2SORTEDBAM"
print "> Writing jobs for SAM2SORTEDBAM..."
nproc, nchild, bsub_suffix = manager.get_bsub_arg(nproc, len(samples))
commands = list()
ksamp = sortbysize(samples)
proc_files = os.listdir(path_base + folder + "/results_star/")
for sample in ksamp:
in_file = path_base + folder + "/results_star/" + sample + "_Aligned.out.sam"
if sample + "_Aligned.out.sam" in proc_files:
out_file = path_base + folder + "/results_sam2sortbam/" + sample + ".sorted.bam"
com = "java -jar " + config.path_picard + "/AddOrReplaceReadGroups.jar I=" + in_file + " O=" + out_file +" SO=coordinate RGID=id RGLB=library RGPL=ILLUMINA RGPU=machine RGSM=sample 2> " + out_file + ".log"
commands.append(com + sample_checker.replace("#FOLDER", path_base + folder + "/results_sam2sortbam").replace("#SAMPLE", sample))
else:
print "Warning: [SAM2SORTEDBAM] STAR output file not found -> " + in_file
create_scripts(nchild, commands, path_base, folder, output)
return submit_job_super("sam2sortbam", path_base + folder, wt, str(nproc), q, len(samples), bsub_suffix, nchild, timestamp)
def jsplice(timestamp, path_base, folder, samples, nproc, wt, q, genomebuild, pheno, extra_args, strand):
output_dir = path_base + folder + '/results_jsplice'
secure_mkdir(path_base + folder, 'results_jsplice')
print "## jSPLICE"
print "> Writing jobs for jSPLICE..."
nproc, nchild, bsub_suffix = manager.get_bsub_arg('1/NA/NA', len(samples))
commands = list()
ksamp = sortbysize(samples)
out = open(output_dir + '/expdesign.txt', 'w')
print >> out, '#exp\tcond\tjxnFile\tbamFile'
for sample in ksamp:
sj_file = path_base + folder + '/results_star/' + sample + '_SJ.out.tab' # Junction file created by STAR
sj_out_file = output_dir + '/' + sample + '.SJ.bed'
bam_file = path_base + folder + '/results_sam2sortbam/' + sample + '.sorted.bam' # BAM file created by STAR/Picard(AddOrReplaceReadGroups)
if os.path.exists(sj_file) and os.path.exists(bam_file) and len(pheno[sample].split(':'))==2:
command = 'python ' + config.path_jsplice + '/starJxn2bed.py -f ' + sj_file + ' -o '+ sj_out_file
commands.append(command + sample_checker.replace("#FOLDER", output_dir).replace("#SAMPLE", sample))
print >> out, '\t'.join([pheno[sample].split(':')[0], pheno[sample].split(':')[1], sj_out_file, bam_file])
else:
print "Warning: [JSPLICE] STAR output files not found -> " + sample
out.close()
if strand == " --stranded=no":
extra_args = '-s ' + extra_args
commands.append('python ' + config.path_jsplice + '/jSplice.py -d ' + output_dir + '/expdesign.txt -o ' + output_dir + ' -a '+ config.path_annotation.replace("#LABEL", genomebuild) + ' ' + extra_args)
create_scripts(nchild, commands, path_base, folder, 'results_jsplice')
return submit_job_super("jsplice", path_base + folder, wt, str(nproc), q, len(samples), bsub_suffix, nchild, timestamp)
def picard_IS(timestamp, path_base, folder, samples, nproc, wt, q):
output = "results_picard_IS"
secure_mkdir(path_base + folder, output)
print "## Picard-InsertSize"
print "> Writing jobs for Picard InsertSize..."
nproc, nchild, bsub_suffix = manager.get_bsub_arg(nproc, len(samples))
commands = list()
ksamp = sortbysize(samples)
proc_files = os.listdir(path_base + folder + "/results_sam2sortbam/")
for sample in ksamp:
in_file = path_base + folder + "/results_sam2sortbam/" + sample + ".sorted.bam"
if sample + ".sorted.bam" in proc_files:
for i in range(len(config.nannots)):
out_file = in_file.replace("results_sam2sortbam/", "results_picard_IS/").replace(".sorted.bam", "")
call = "java -jar " + config.path_picard + "/CollectInsertSizeMetrics.jar I="+in_file+" O="+out_file+".txt H="+out_file+".pdf"
commands.append(call + sample_checker.replace("#FOLDER", path_base + folder + "/results_picard_IS").replace("#SAMPLE", sample))
else:
print "Warning: [Picard] Sorted BAM file not found -> " + in_file
create_scripts(nchild, commands, path_base, folder, output)
return submit_job_super("picard_IS", path_base + folder, wt, str(nproc), q, len(samples), bsub_suffix, nchild, timestamp)
def varscan(timestamp, path_base, folder, samples, nproc, wt, q, genome_build, args):
ref = config.path_fasta.replace("#LABEL",genome_build)
output = "results_varscan"
secure_mkdir(path_base + folder, output)
print "## Variang calling with VARSCAN"
print "> Writing jobs for VARSCAN..."
nproc, nchild, bsub_suffix = manager.get_bsub_arg(nproc, len(samples))
commands = list()
ksamp = sortbysize(samples)
proc_files = os.listdir(path_base + folder + "/results_sam2sortbam/")
for sample in ksamp:
in_file = path_base + folder + "/results_sam2sortbam/" + sample + ".sorted.bam"
if sample + ".sorted.bam" in proc_files:
out_file = path_base + folder + "/results_varscan/" + sample + ".vcf"
com = config.path_samtools + " mpileup -B -f " + ref + " " + in_file + " | java -jar " + config.path_varscan + " mpileup2cns " + args + " > " + out_file
commands.append(com + sample_checker.replace("#FOLDER", path_base + folder + "/results_varscan").replace("#SAMPLE", sample))
else:
print "Warning: [VARSCAN] SORTED BAM output file not found -> " + in_file
create_scripts(nchild, commands, path_base, folder, output)
return submit_job_super("varscan", path_base + folder, wt, str(nproc), q, len(samples), bsub_suffix, nchild, timestamp)
def gatk(timestamp, path_base, folder, samples, nproc, wt, q, genome_build, args):
args = args.split("|")
multithread = False
filt = "30"
if len(args) == 2:
if args[0] == "yes":
multithread = True
filt = args[1]
output = "results_gatk"
secure_mkdir(path_base + folder, output)
print "## Variang calling with GATK"
print "> Writing jobs for GATK..."
| |
<reponame>metodj/FGP-VAE
import argparse
import time
import pickle
import os
import json
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_probability as tfp
from utils import make_checkpoint_folder, pandas_res_saver, plot_mnist, import_rotated_mnist, print_trainable_vars
from VAE_utils import mnistVAE
from FGPVAE_model import FGP, forward_pass_FGPVAE_rotated_mnist, predict_FGPVAE_rotated_mnist, \
extrapolate_experiment_eval_data, latent_samples_FGPVAE
tfd = tfp.distributions
tfk = tfp.math.psd_kernels
def run_experiment_rotated_mnist_FGPVAE(args):
"""
Function with tensorflow graph and session for FGPVAE experiments on rotated MNIST data.
For description of FGPVAE see FGPVAE.tex
:param args:
:return:
:param args:
:return:
"""
# define some constants
n = len(args.dataset)
N_train = n * np.floor(4050 / args.N_t) * args.N_t
N_eval = n * 640
assert not (args.object_prior_corr and args.extrapolate_experiment), \
"When using correlated object priors, can not do extrapolation experiment!"
assert args.batch_size % args.N_t == 0
if args.save:
# Make a folder to save everything
extra = args.elbo + "_" + str(args.beta)
chkpnt_dir = make_checkpoint_folder(args.base_dir, args.expid, extra)
pic_folder = chkpnt_dir + "pics/"
res_file = chkpnt_dir + "res/ELBO_pandas"
print("\nCheckpoint Directory:\n" + str(chkpnt_dir) + "\n")
json.dump(vars(args), open(chkpnt_dir + "/args.json", "wt"))
# Init plots
if args.show_pics:
plt.ion()
graph = tf.Graph()
with graph.as_default():
# ====================== 1) import data ======================
# for FGPVAE not shuffled data is always used
ending = args.dataset + "_not_shuffled.p"
iterator, training_init_op, eval_init_op, test_init_op, train_data_dict, eval_data_dict, test_data_dict, \
eval_batch_size_placeholder, test_batch_size_placeholder = import_rotated_mnist(args.mnist_data_path,
ending=ending,
batch_size=args.batch_size,
digits=args.dataset,
N_t=args.N_t)
if args.extrapolate_experiment:
observed_images_extra, observed_aux_data_extra, \
test_images_extra, test_aux_data_extra = extrapolate_experiment_eval_data(mnist_path=args.mnist_data_path,
digit=args.dataset, N_t=args.N_t)
# get the batch
input_batch = iterator.get_next()
# ====================== 2) build ELBO graph ======================
# init VAE object
VAE = mnistVAE(L=args.L)
beta = tf.compat.v1.placeholder(dtype=tf.float64, shape=())
# init GP object
if args.ov_joint:
object_vectors_init = pickle.load(open(args.mnist_data_path + 'pca_ov_init{}.p'.format(args.dataset), 'rb'))
else:
object_vectors_init = None
GP = FGP(init_amplitude=1.0, init_length_scale=1.0, GP_joint=args.GP_joint, L_w=args.L_w,
object_vectors_init=object_vectors_init, object_prior_corr=args.object_prior_corr,
K_obj_normalize=args.object_kernel_normalize)
# forward pass
N_t = tf.compat.v1.placeholder(dtype=tf.int64, shape=())
C_ma_placeholder = tf.compat.v1.placeholder(dtype=tf.float64, shape=())
lagrange_mult_placeholder = tf.compat.v1.placeholder(dtype=tf.float64, shape=())
alpha_placeholder = tf.compat.v1.placeholder(dtype=tf.float64, shape=())
elbo, recon_loss, elbo_kl_part, p_m, p_v, \
qnet_mu, qnet_var, recon_images, latent_samples, \
C_ma, lagrange_mult = forward_pass_FGPVAE_rotated_mnist(input_batch,
beta=beta,
vae=VAE,
GP=GP,
N_t=N_t,
clipping_qs=args.clip_qs,
bayes_reg_view=args.bayes_reg_view,
omit_C_tilde=args.omit_C_tilde,
C_ma=C_ma_placeholder,
lagrange_mult=lagrange_mult_placeholder,
alpha=alpha_placeholder,
kappa=np.sqrt(args.kappa_squared),
GECO=args.GECO)
# prediction
# TODO: add support for batching for prediction pipeline
# (tf.where, tf.boolean_mask, tf.gather etc. to select train images belonging to ids in the test batch)
train_images_placeholder = tf.compat.v1.placeholder(dtype=tf.float64, shape=(None, 28, 28, 1))
train_aux_data_placeholder = tf.compat.v1.placeholder(dtype=tf.float64, shape=(None, 10))
test_images_placeholder = tf.compat.v1.placeholder(dtype=tf.float64, shape=(None, 28, 28, 1))
test_aux_data_placeholder = tf.compat.v1.placeholder(dtype=tf.float64, shape=(None, 10))
recon_images_test, recon_loss_test = predict_FGPVAE_rotated_mnist(test_images_placeholder,
test_aux_data_placeholder,
train_images_placeholder,
train_aux_data_placeholder,
vae=VAE, GP=GP, N_t=args.N_t,
clipping_qs=args.clip_qs,
bayes_reg_view=args.bayes_reg_view,
omit_C_tilde=args.omit_C_tilde)
if args.save_latents:
latent_samples_full = latent_samples_FGPVAE(train_images_placeholder, train_aux_data_placeholder,
vae=VAE, GP=GP, N_t=N_t, clipping_qs=args.clip_qs)
# ====================== 3) optimizer ops ======================
global_step = tf.Variable(0, name='global_step', trainable=False)
lr = tf.compat.v1.placeholder(dtype=tf.float64, shape=())
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=lr)
train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
if args.GECO:
gradients = tf.gradients(elbo, train_vars)
else:
# minimizing the negative elbo!
gradients = tf.gradients(-elbo, train_vars)
optim_step = optimizer.apply_gradients(grads_and_vars=zip(gradients, train_vars),
global_step=global_step)
# ====================== 4) Pandas saver ======================
if args.save:
res_vars = [global_step,
tf.reduce_sum(elbo) / N_eval,
tf.reduce_sum(recon_loss) / N_eval,
tf.reduce_sum(elbo_kl_part) / N_eval,
tf.math.reduce_min(qnet_var),
tf.math.reduce_max(qnet_var),
tf.math.reduce_min(p_v),
tf.math.reduce_max(p_v),
tf.math.reduce_min(qnet_mu),
tf.math.reduce_max(qnet_mu),
tf.math.reduce_min(p_m),
tf.math.reduce_max(p_m),
latent_samples,
qnet_var,
C_ma,
lagrange_mult]
res_names = ["step",
"ELBO",
"recon loss",
"KL term",
"min qs_var",
"max qs_var",
"min q_var",
"max q_var",
'min qs_mean',
'max qs_mean',
'min q_mean',
'max q_mean',
'latent_samples',
'full qs_var',
"C_ma",
"lagrange_mult"]
res_saver = pandas_res_saver(res_file, res_names)
# ====================== 5) print and init trainable params ======================
print_trainable_vars(train_vars)
init_op = tf.global_variables_initializer()
# ====================== 6) saver and GPU ======================
if args.save_model_weights:
saver = tf.compat.v1.train.Saver(max_to_keep=3)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.ram)
if args.object_prior_corr:
N_print = 170
else:
N_print = 10
# ====================== 7) tf.session ======================
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
sess.run(init_op)
first_step = True # switch for initizalition of GECO algorithm
C_ma_ = 0.0
lagrange_mult_ = 1.0
start_time = time.time()
cgen_test_set_MSE = []
for epoch in range(args.nr_epochs):
# handcrafted learning rate and beta schedules
if epoch < args.beta_schedule_switch:
beta_main = args.beta
lr_main = args.lr
else:
# beta_main = args.beta
beta_main = args.beta / 10
lr_main = args.lr / 10
# 7.1) train for one epoch
sess.run(training_init_op)
elbos, losses = [], []
start_time_epoch = time.time()
while True:
try:
if first_step:
alpha = 0.0
else:
alpha = args.alpha
_, g_s_, elbo_, C_ma_, lagrange_mult_, recon_loss_ = sess.run([optim_step, global_step,
elbo, C_ma, lagrange_mult,
recon_loss],
{beta: beta_main, lr: lr_main,
alpha_placeholder: alpha,
C_ma_placeholder: C_ma_,
lagrange_mult_placeholder: lagrange_mult_,
N_t: args.N_t})
elbos.append(elbo_)
losses.append(recon_loss_)
first_step = False # switch for initizalition of GECO algorithm
except tf.errors.OutOfRangeError:
if (epoch + 1) % N_print == 0:
print('Epoch {}, mean ELBO: {}'.format(epoch, np.sum(elbos) / N_train))
MSE = np.sum(losses) / N_train
print('MSE loss on train set for epoch {} : {}'.format(epoch, MSE))
end_time_epoch = time.time()
print("Time elapsed for epoch {}: {}".format(epoch, end_time_epoch - start_time_epoch))
break
# 7.2) calculate performance metrics on eval set
if args.save and (epoch + 1) % N_print == 0:
losses, elbos = [], []
sess.run(eval_init_op,
{eval_batch_size_placeholder: 240}) # since eval batch_size needs to be divisibile by 16
while True:
try:
recon_loss_, elbo_ = sess.run([recon_loss, elbo], {beta: beta_main,
N_t: 16,
alpha_placeholder: args.alpha,
C_ma_placeholder: C_ma_,
lagrange_mult_placeholder: lagrange_mult_})
losses.append(recon_loss_)
elbos.append(elbo_)
except tf.errors.OutOfRangeError:
print('MSE loss on eval set for epoch {} : {}'.format(epoch, np.sum(losses) / N_eval))
break
# 7.3) save diagnostics metrics to Pandas df
if args.save and (epoch + 1) % N_print == 0:
sess.run(eval_init_op, {eval_batch_size_placeholder: len(eval_data_dict['images'])})
new_res = sess.run(res_vars, {beta: args.beta, N_t: 16,
alpha_placeholder: args.alpha,
C_ma_placeholder: C_ma_,
lagrange_mult_placeholder: lagrange_mult_})
res_saver(new_res, 1)
# 7.4) calculate loss on test set and visualize reconstructed images
if (epoch + 1) % N_print == 0:
# 7.4.1) test set: conditional generation
n_img = len(args.dataset) * 270 * args.N_t # number of train images that belong to test ids
recon_loss_test_, recon_images_test_ = sess.run([recon_loss_test, recon_images_test],
{train_images_placeholder: train_data_dict[
'images'][:n_img, :],
test_images_placeholder: test_data_dict['images'],
train_aux_data_placeholder: train_data_dict[
'aux_data'][:n_img,
:],
test_aux_data_placeholder: test_data_dict[
'aux_data']})
cgen_test_set_MSE.append((epoch, recon_loss_test_))
print(
"Conditional generation MSE loss on test set for epoch {}: {}".format(epoch, recon_loss_test_))
plot_mnist(test_data_dict['images'], recon_images_test_,
title="Epoch: {}. CGEN MSE test set:{}".format(epoch + 1, round(recon_loss_test_, 4)))
if args.show_pics:
plt.show()
plt.pause(0.01)
if args.save:
plt.savefig(pic_folder + str(g_s_) + "_cgen_test_set.png")
with open(pic_folder + "test_metrics.txt", "a") as f:
f.write("{},{}\n".format(epoch + 1, round(recon_loss_test_, 4)))
# 7.4.2) extrapolate experiment (using validation dataset)
if args.extrapolate_experiment:
recon_loss_test_, recon_images_test_extrapolate = sess.run([recon_loss_test, recon_images_test],
{
train_images_placeholder: observed_images_extra,
test_images_placeholder: test_images_extra,
train_aux_data_placeholder: observed_aux_data_extra,
test_aux_data_placeholder: test_aux_data_extra})
print(
"Conditional generation MSE loss for extrapolate experiment for epoch {}: {}".format(epoch,
recon_loss_test_))
plot_mnist(test_images_extra, recon_images_test_extrapolate,
title="Epoch: {}. CGEN MSE extrapolate experiment:{}".format(epoch + 1,
round(recon_loss_test_,
4)))
if args.show_pics:
plt.show()
plt.pause(0.01)
if args.save:
plt.savefig(pic_folder + str(g_s_) + "_cgen_extra_exp.png")
with open(pic_folder + "test_metrics.txt", "a") as f:
f.write("{},{}\n".format(epoch + 1, round(recon_loss_test_, 4)))
# save model weights
if args.save and args.save_model_weights:
saver.save(sess, chkpnt_dir + "model", global_step=g_s_)
# log running time
end_time = time.time()
print("Running time for {} epochs: {}".format(args.nr_epochs, round(end_time - start_time, 2)))
# report best test set cgen MSE achieved throughout training
best_cgen_MSE = sorted(cgen_test_set_MSE, key=lambda x: x[1])[0]
print("Best cgen MSE on test set throughout training at epoch {}: {}".format(best_cgen_MSE[0],
best_cgen_MSE[1]))
# save images from conditional generation
if args.save:
with open(chkpnt_dir + '/cgen_images.p', 'wb') as test_pickle:
pickle.dump(recon_images_test_, test_pickle)
# save latents
if args.save_latents:
latent_samples_full_ = sess.run(latent_samples_full,
{train_images_placeholder: train_data_dict['images'],
train_aux_data_placeholder: train_data_dict['aux_data'],
N_t: args.N_t})
with open(chkpnt_dir + '/latents_train_full.p', 'wb') as pickle_latents:
pickle.dump((latent_samples_full_, train_data_dict['aux_data'], train_data_dict['images']),
pickle_latents)
if __name__=="__main__":
default_base_dir = os.getcwd()
# =============== parser rotated MNIST data ===============
parser_mnist = argparse.ArgumentParser(description='Train SVGPVAE or FGPVAE for rotated MNIST data.')
parser_mnist.add_argument('--expid', type=str, default="debug_MNIST", help='give this experiment a name')
parser_mnist.add_argument('--base_dir', type=str, default=default_base_dir,
help='folder within a new dir is made for each run')
parser_mnist.add_argument('--mnist_data_path', type=str, default='MNIST data/',
help='Path where rotated MNIST data is stored.')
parser_mnist.add_argument('--batch_size', type=int, default=220)
parser_mnist.add_argument('--nr_epochs', type=int, default=1000)
parser_mnist.add_argument('--beta', type=float, default=0.001)
parser_mnist.add_argument('--nr_inducing_points', type=float, default=5, help="Number of object vectors per angle.")
parser_mnist.add_argument('--save', action="store_true", help='Save model metrics in Pandas df as well as images.')
parser_mnist.add_argument('--GP_joint', action="store_true", help='GP hyperparams joint optimization.')
parser_mnist.add_argument('--ov_joint', action="store_true", help='Object vectors joint optimization.')
parser_mnist.add_argument('--lr', type=float, default=0.001, help='Learning rate for Adam optimizer.')
parser_mnist.add_argument('--not_shuffled', action="store_true", help='Do not shuffle train and test data.')
parser_mnist.add_argument('--save_model_weights', action="store_true",
help='Save model weights. For debug purposes.')
parser_mnist.add_argument('--dataset', type=str, choices=['3', '36', '13679'], default='3')
parser_mnist.add_argument('--show_pics', action="store_true", help='Show images during training.')
parser_mnist.add_argument('--beta_schedule_switch', type=int, default=1000)
parser_mnist.add_argument('--opt_regime', type=str, default=['joint-120'], nargs="+")
parser_mnist.add_argument('--L', type=int, default=16, help="Nr. of latent channels")
parser_mnist.add_argument('--clip_qs', action="store_true", help='Clip variance of inference network.')
parser_mnist.add_argument('--ram', type=float, default=1.0, help='fraction of GPU ram to use')
parser_mnist.add_argument('--test_set_metrics', action='store_true',
help='Calculate metrics on test data. If false, metrics are calculated on eval data.')
parser_mnist.add_argument('--GECO', action='store_true', help='Use GECO algorithm for training.')
parser_mnist.add_argument('--alpha', type=float, | |
from warnings import warn
from collections import OrderedDict
from ..core.util import GuidTracker
from ..core import (
AtlasAttributeDef,
AtlasClassification,
AtlasEntity,
ClassificationTypeDef,
EntityTypeDef
)
from .lineagemixin import LineageMixIn
from . import util as reader_util
class ReaderConfiguration():
"""
A base configuration for the Reader class. Allows you to customize
headers with a source_prefix, target_prefix, and process_prefix for
parsing table and column lineages.
"""
def __init__(self, **kwargs):
super().__init__()
self.value_separator = kwargs.get('value_separator', ';')
self.source_prefix = kwargs.get(
"source_prefix", "Source")
self.target_prefix = kwargs.get(
"target_prefix", "Target")
self.process_prefix = kwargs.get(
"process_prefix", "Process")
self.column_transformation_name = kwargs.get(
"column_transformation_name", "transformation")
class Reader(LineageMixIn):
"""
The base Reader with functionality that supports python dicts.
"""
TEMPLATE_HEADERS = {
"FineGrainColumnLineage": [
"Target table", "Target column", "Target classifications",
"Source table", "Source column", "Source classifications",
"transformation"
],
"TablesLineage": [
"Target table", "Target type", "Target classifications",
"Source table", "Source type", "Source classifications",
"Process name", "Process type"
],
"EntityDefs": [
"Entity TypeName", "name", "description",
"isOptional", "isUnique", "defaultValue",
"typeName", "displayName", "valuesMinCount",
"valuesMaxCount", "cardinality", "includeInNotification",
"indexType", "isIndexable"
],
"ClassificationDefs": [
"classificationName", "entityTypes", "description"
],
"BulkEntities": [
"typeName", "name", "qualifiedName"
],
"UpdateLineage": [
"Target typeName", "Target qualifiedName", "Source typeName",
"Source qualifiedName", "Process name", "Process qualifiedName",
"Process typeName"
],
"ColumnMapping": [
"Source qualifiedName", "Source column", "Target qualifiedName",
"Target column", "Process qualifiedName", "Process typeName",
"Process name"
]
}
def _splitField(self, attrib):
return [e for e in attrib.split(self.config.value_separator) if e]
def __init__(self, configuration, guid=-1000):
"""
Creates the base Reader with functionality that supports python dicts.
:param configuration:
A list of dicts containing at least `Entity TypeName` and `name`
:type configuration:
:class:`~pyapacheatlas.readers.reader.ReaderConfiguration`
:param int guid:
A negative integer to use as the starting counter for entities
created by this reader.
"""
super().__init__()
self.config = configuration
self.guidTracker = GuidTracker(guid)
def _organize_attributes(self, row, existing_entities, ignore=[]):
"""
Organize the row entries into a distinct set of attributes and
relationshipAttributes.
:param dict(str,str) row:
A dict representing the input rows.
:param existing_entities:
A list of existing atlas entities that will be used to infer
any relationship attributes.
:type existing_entities:
dict(str, `:class:~pyapacheatlas.core.entity.AtlasEntity`)
:param list(str) ignore:
A set of keys to ignore and omit from the returned dict.
:return:
A dictionary containing 'attributes' and 'relationshipAttributes'
:rtype: dict(str, dict(str,str))
"""
output = {"attributes": {}, "relationshipAttributes": {}, "root":{}}
for column_name, cell_value in row.items():
# Remove the required attributes so they're not double dipping.
if column_name in ignore:
continue
# Remove any cell with a None / Null attribute
elif cell_value is None:
continue
# If the Attribute key starts with [Relationship]
# Move it to the relation
elif column_name.startswith("[Relationship]"):
cleaned_key = column_name.replace("[Relationship]", "").strip()
if cleaned_key == "meanings":
terms = self._splitField(cell_value)
min_reference = [
{"typeName": "AtlasGlossaryTerm",
"uniqueAttributes": {
"qualifiedName": <EMAIL>(t)
}
} for t in terms
]
else:
# Assuming that we can find this in an existing entity
# TODO: Add support for guid:xxx or typeName/uniqueAttributes.qualifiedName
try:
min_reference = existing_entities[cell_value].to_json(minimum=True)
# LIMITATION: We must have already seen the relationship
# attribute to be certain it can be looked up.
except KeyError:
raise KeyError(
f"The entity {cell_value} should be listed before {row['qualifiedName']}."
)
output["relationshipAttributes"].update(
{cleaned_key: min_reference}
)
# TODO: Add support for Business, Custom
elif column_name.startswith("[root]"):
# This is a root level attribute
cleaned_key = column_name.replace("[root]", "").strip()
output_value = cell_value
if self.config.value_separator in cell_value:
# There's a delimiter in here
output_value = self._splitField(cell_value)
# This seems like a poor place to add business logic like this
if cleaned_key == "classifications":
output_value = [output_value] if not isinstance(output_value, list) else output_value
output_value = [AtlasClassification(c).to_json() for c in output_value]
elif cleaned_key == "labels" and not isinstance(output_value, list):
output_value = [output_value]
output["root"].update( {cleaned_key: output_value} )
else:
output["attributes"].update({column_name: cell_value})
return output
def parse_bulk_entities(self, json_rows):
"""
Create an AtlasEntityWithExtInfo consisting of entities and their attributes
for the given json_rows.
:param list(dict(str,object)) json_rows:
A list of dicts containing at least `typeName`, `name`, and `qualifiedName`
that represents the entity to be uploaded.
:return: An AtlasEntityWithExtInfo with entities for the provided rows.
:rtype: dict(str, list(dict))
"""
# For each row,
# Extract the
# Extract any additional attributes
headers_that_arent_attribs = ["typeName", "name", "qualifiedName", "classifications", "owners", "experts"]
existing_entities = OrderedDict()
# TODO: Remove this once deprecation is removed
classification_column_used = False
for row in json_rows:
if ((row["name"] is None) or (row["typeName"] is None) or
(row["qualifiedName"] is None)):
# An empty row snuck in somehow, skip it.
continue
_extracted = self._organize_attributes(
row,
existing_entities,
headers_that_arent_attribs
)
entity = AtlasEntity(
name=row["name"],
typeName=row["typeName"],
qualified_name=row["qualifiedName"],
guid=self.guidTracker.get_guid(),
attributes=_extracted["attributes"],
relationshipAttributes=_extracted["relationshipAttributes"],
**_extracted["root"]
)
# TODO: Remove at 1.0.0 launch
if "classifications" in row:
classification_column_used = True
entity.classifications = reader_util.string_to_classification(
row["classifications"],
sep=self.config.value_separator)
if "experts" in row or "owners" in row and len( row.get("experts", []) + row.get("owners", []) ) > 0:
experts = []
owners = []
if len(row.get("experts", []) or [])>0:
experts = [{"id":e} for e in row.get("experts", "").split(self.config.value_separator) if e != '']
if len(row.get("owners", []) or [])>0:
owners = [{"id":o} for o in row.get("owners", "").split(self.config.value_separator) if o != '']
entity.contacts = {"Expert": experts, "Owner": owners }
existing_entities.update({row["qualifiedName"]: entity})
output = {"entities": [e.to_json()
for e in list(existing_entities.values())]}
# TODO: Remove this once deprecation is removed
if classification_column_used:
warn("Using `classifications` as a field header is deprecated and will be unsupported in the future."+
" Please use `[root] classifications` instead.")
return output
def parse_entity_defs(self, json_rows):
"""
Create an AtlasTypeDef consisting of entityDefs for the
given json_rows. The columns `Entity TypeName` and `Entity superTypes`
are special and map to typeName and superTypes respectively.
Entity TypeName must be repeated for each row that has a relevant
attribute being defined on it. For example, if you plan on including
five attributes for type X, you would need to have five rows and
each row would have to fill in the Entity TypeName column.
superTypes can be specified all in one cell (default delimiter is `;`
and is controlled by the Reader's configuration) or across multiple
cells. If you specify DataSet in one row for type X and hive_table
for type X in a second row, it will result in a superType
of `[DataSet, hive_table]`.
:param list(dict(str,str)) json_rows:
A list of dicts containing at least `Entity TypeName` and `name`
that represents the metadata for a given entity type's
attributeDefs. Extra metadata will be ignored.
:return: An AtlasTypeDef with entityDefs for the provided rows.
:rtype: dict(str, list(dict))
"""
entities = dict()
entities_to_superTypes = dict()
attribute_metadata_seen = set()
output = {"entityDefs": []}
splitter = lambda attrib: [e for e in attrib.split(self.config.value_separator) if e]
# Required attributes
# Get all the attributes it's expecting official camel casing
# with the exception of "Entity TypeName"
for row in json_rows:
try:
entityTypeName = row["Entity TypeName"]
except KeyError:
raise KeyError("Entity TypeName not found in {}".format(row))
_ = row.pop("Entity TypeName")
# If the user wants to add super types, they might be adding
# multiple on each row. They DON'T NEED TO but they might
entitySuperTypes = []
if "Entity superTypes" in row:
superTypes_string = row.pop("Entity superTypes")
# Might return a None or empty string
if superTypes_string:
entitySuperTypes = splitter(superTypes_string)
# Need to add this entity to the superTypes mapping if it doesn't
# already exist
if entityTypeName in entities_to_superTypes:
entities_to_superTypes[entityTypeName].extend(entitySuperTypes)
else:
entities_to_superTypes[entityTypeName] = entitySuperTypes
# Update all seen attribute metadata
columns_in_row = list(row.keys())
attribute_metadata_seen = attribute_metadata_seen.union(
set(columns_in_row))
# Remove any null cells, otherwise the AttributeDefs constructor
# doesn't use the defaults.
for column in columns_in_row:
if row[column] is None:
_ = row.pop(column)
json_attribute_def = AtlasAttributeDef(**row).to_json()
if entityTypeName not in entities:
entities[entityTypeName] = []
entities[entityTypeName].append( json_attribute_def )
# Create the entitydefs
for entityType in entities:
# Handle super types by de-duping, removing Nones / empty str and
# defaulting to ["DataSet"] if no user input super Types
all_super_types = [t for t in set(entities_to_superTypes[entityType]) if t]
if len(all_super_types) == 0:
all_super_types = ["DataSet"]
local_entity_def = EntityTypeDef(
name=entityType,
attributeDefs=entities[entityType],
# Adding this as a default until I figure
# do this from the excel / json readers.
superTypes=all_super_types
).to_json()
output["entityDefs"].append(local_entity_def)
# Extra attribute metadata (e.g. extra columns / json entries)
# are | |
# -*- coding: utf-8 -*-
import os
import sys
import tempfile
import unittest
import warnings
import xml.etree.ElementTree as ET
import pandas as pd
from mock import patch
from dwca.darwincore.utils import qualname as qn
from dwca.descriptors import ArchiveDescriptor, DataFileDescriptor
from dwca.exceptions import RowNotFound, InvalidArchive, NotADataFile
from dwca.files import CSVDataFile
from dwca.read import DwCAReader, GBIFResultsReader
from dwca.rows import CoreRow, ExtensionRow
from .helpers import sample_data_path
class TestPandasIntegration(unittest.TestCase):
"""Tests of Pandas integration features."""
# TODO: test weirder archives (encoding, lime termination, ...)
def test_missing_extension_path(self):
with self.assertRaises(InvalidArchive):
DwCAReader(sample_data_path('dwca-missing-extension-details'))
@patch('dwca.vendor._has_pandas', False)
def test_pd_read_pandas_unavailable(self):
with DwCAReader(sample_data_path('dwca-simple-test-archive.zip')) as dwca:
with self.assertRaises(ImportError):
dwca.pd_read('occurrence.txt')
def test_pd_read_simple_case(self):
with DwCAReader(sample_data_path('dwca-simple-test-archive.zip')) as dwca:
df = dwca.pd_read('occurrence.txt')
# check types, headers and dimensions
self.assertIsInstance(df, pd.DataFrame)
cols = df.columns.values.tolist()
self.assertEqual(cols, ['id', 'basisOfRecord', 'locality', 'family', 'scientificName'])
self.assertEqual(df.shape, (2, 5)) # Row/col counts are correct
# check content
self.assertEqual(df['basisOfRecord'].values.tolist(), ['Observation', 'Observation'])
self.assertEqual(df['family'].values.tolist(), ['Tetraodontidae', 'Osphronemidae'])
self.assertEqual(df['locality'].values.tolist(), ['Borneo', 'Mumbai'])
self.assertEqual(df['scientificName'].values.tolist(), ['tetraodon fluviatilis', 'betta splendens'])
def test_pd_read_no_data_files(self):
with DwCAReader(sample_data_path('dwca-simple-test-archive.zip')) as dwca:
with self.assertRaises(NotADataFile):
dwca.pd_read('imaginary_file.txt')
with self.assertRaises(NotADataFile):
dwca.pd_read('eml.xml')
def test_pd_read_extensions(self):
with DwCAReader(sample_data_path('dwca-2extensions.zip')) as dwca:
desc_df = dwca.pd_read('description.txt')
self.assertIsInstance(desc_df, pd.DataFrame)
self.assertEqual(desc_df.shape, (3, 4))
self.assertEqual(desc_df['language'].values.tolist(), ['EN', 'FR', 'EN'])
vern_df = dwca.pd_read('vernacularname.txt')
self.assertIsInstance(vern_df, pd.DataFrame)
self.assertEqual(vern_df.shape, (4, 4))
self.assertEqual(vern_df['countryCode'].values.tolist(), ['US', 'ZA', 'FI', 'ZA'])
def test_pd_read_quotedir(self):
with DwCAReader(sample_data_path('dwca-csv-quote-dir')) as dwca:
df = dwca.pd_read('occurrence.txt')
# The field separator is found in a quoted field, don't break
self.assertEqual(df.shape, (2, 5))
self.assertEqual(df['basisOfRecord'].values.tolist()[0], 'Observation, something')
def test_pd_read_default_values(self):
with DwCAReader(sample_data_path('dwca-test-default.zip')) as dwca:
df = dwca.pd_read('occurrence.txt')
self.assertIn('country', df.columns.values.tolist())
for country in df['country'].values.tolist():
self.assertEqual(country, 'Belgium')
def test_pd_read_utf8_eol_ignored(self):
"""Ensure we don't split lines based on the x85 utf8 EOL char.
(only the EOL string specified in meta.xml should be used).
"""
with DwCAReader(sample_data_path('dwca-utf8-eol-test.zip')) as dwca:
df = dwca.pd_read('occurrence.txt')
# If line properly split => 64 columns.
# (61 - and probably an IndexError - if errors)
self.assertEqual(64, df.shape[1])
def test_pd_read_simple_csv(self):
with DwCAReader(sample_data_path('dwca-simple-csv.zip')) as dwca:
df = dwca.pd_read('0008333-160118175350007.csv')
# Ensure we get the correct number of rows
self.assertEqual(3, df.shape[0])
# Ensure we can access arbitrary data
self.assertEqual(df['decimallatitude'].values.tolist()[1], -31.98333)
class TestDwCAReader(unittest.TestCase):
# TODO: Move row-oriented tests to another test class
"""Unit tests for DwCAReader class."""
def test_partial_default(self):
with DwCAReader(sample_data_path('dwca-partial-default.zip')) as dwca:
self.assertEqual(dwca.rows[0].data[qn('country')], 'France') # Value comes from data file
self.assertEqual(dwca.rows[1].data[qn('country')], 'Belgium') # Value is field default
def test_core_file_location(self):
with DwCAReader(sample_data_path('dwca-simple-test-archive.zip')) as dwca:
self.assertEqual(dwca.core_file_location, 'occurrence.txt')
with DwCAReader(sample_data_path('dwca-simple-csv.zip')) as dwca:
self.assertEqual(dwca.core_file_location, '0008333-160118175350007.csv')
def test_core_file(self):
with DwCAReader(sample_data_path('dwca-simple-test-archive.zip')) as dwca:
self.assertIsInstance(dwca.core_file, CSVDataFile)
# Quick content check just to be sure
self.assertEqual(dwca.core_file.lines_to_ignore, 1)
def test_extension_file_noext(self):
with DwCAReader(sample_data_path('dwca-simple-test-archive.zip')) as dwca:
self.assertEqual(dwca.extension_files, [])
def test_extension_files(self):
with DwCAReader(sample_data_path('dwca-2extensions.zip')) as dwca:
# Check extension_files is iterable and contains the right type
for ext in dwca.extension_files:
self.assertIsInstance(ext, CSVDataFile)
# Check the length is correct
self.assertEqual(len(dwca.extension_files), 2)
# Check the order of the metafile is respected + quick content check
self.assertEqual(dwca.extension_files[0].file_descriptor.file_location, 'description.txt')
self.assertEqual(dwca.extension_files[1].file_descriptor.file_location, 'vernacularname.txt')
def test_get_descriptor_for(self):
with DwCAReader(sample_data_path('dwca-2extensions.zip')) as dwca:
# We can get a DataFileDescriptor for each data file
self.assertIsInstance(dwca.get_descriptor_for('taxon.txt'), DataFileDescriptor)
self.assertIsInstance(dwca.get_descriptor_for('description.txt'), DataFileDescriptor)
self.assertIsInstance(dwca.get_descriptor_for('vernacularname.txt'), DataFileDescriptor)
# But NotADataFile exception for non-data files
with self.assertRaises(NotADataFile):
dwca.get_descriptor_for('eml.xml')
with self.assertRaises(NotADataFile):
dwca.get_descriptor_for('meta.xml')
# Also NotADataFile for files that don't actually exists
with self.assertRaises(NotADataFile):
dwca.get_descriptor_for('imaginary_file.txt')
# Basic content checks of the descriptors
taxon_descriptor = dwca.get_descriptor_for('taxon.txt')
self.assertEqual(dwca.descriptor.core, taxon_descriptor)
self.assertEqual(taxon_descriptor.file_location, 'taxon.txt')
self.assertEqual(taxon_descriptor.file_encoding, 'utf-8')
self.assertEqual(taxon_descriptor.type, 'http://rs.tdwg.org/dwc/terms/Taxon')
description_descriptor = dwca.get_descriptor_for('description.txt')
self.assertEqual(description_descriptor.file_location, 'description.txt')
self.assertEqual(description_descriptor.file_encoding, 'utf-8')
self.assertEqual(description_descriptor.type, 'http://rs.gbif.org/terms/1.0/Description')
vernacular_descriptor = dwca.get_descriptor_for('vernacularname.txt')
self.assertEqual(vernacular_descriptor.file_location, 'vernacularname.txt')
self.assertEqual(vernacular_descriptor.file_encoding, 'utf-8')
self.assertEqual(vernacular_descriptor.type, 'http://rs.gbif.org/terms/1.0/VernacularName')
# Also check we can get a DataFileDescriptor for a simple Archive (without metafile)
with DwCAReader(sample_data_path('dwca-simple-csv.zip')) as dwca:
self.assertIsInstance(dwca.get_descriptor_for('0008333-160118175350007.csv'), DataFileDescriptor)
def test_open_included_file(self):
"""Ensure DwCAReader.open_included_file work as expected."""
# Let's use it to read the raw core data file:
with DwCAReader(sample_data_path('dwca-simple-dir')) as dwca:
f = dwca.open_included_file('occurrence.txt')
raw_occ = f.read()
self.assertTrue(raw_occ.endswith("'betta' splendens\n"))
# TODO: test more cases: opening mode, exceptions raised, ...
def test_descriptor_references_non_existent_data_field(self):
"""Ensure InvalidArchive is raised when a file descriptor references non-existent field.
This ensure cases like http://dev.gbif.org/issues/browse/PF-2470 (descriptor contains
<field index="234" term="http://rs.gbif.org/terms/1.0/lastCrawled"/>, but has only 234
fields in data file) fail in a visible way (previously, archive just appeared empty).
"""
with DwCAReader(sample_data_path('dwca-malformed-descriptor')) as dwca:
with self.assertRaises(InvalidArchive):
for _ in dwca:
pass
def test_use_extensions(self):
"""Ensure the .use_extensions attribute of DwCAReader works as intended."""
with DwCAReader(sample_data_path('dwca-simple-test-archive.zip')) as dwca:
self.assertFalse(dwca.use_extensions) # Basic archive without extensions
with DwCAReader(sample_data_path('dwca-simple-csv.zip')) as dwca: # Just a CSV file, so no extensions
self.assertFalse(dwca.use_extensions)
with DwCAReader(sample_data_path('dwca-star-test-archive.zip')) as dwca:
self.assertTrue(dwca.use_extensions)
with DwCAReader(sample_data_path('dwca-2extensions.zip')) as dwca:
self.assertTrue(dwca.use_extensions)
with DwCAReader(sample_data_path('dwca-star-test-archive.zip'), extensions_to_ignore="vernacularname.txt") as dwca:
# We ignore the extension, so archive appears without
self.assertFalse(dwca.use_extensions)
def test_default_metadata_filename(self):
"""Ensure that metadata is found by it's default name.
Metadata is named "EML.xml", but no metadata attribute in Metafile.
"""
with DwCAReader(sample_data_path('dwca-default-metadata-filename.zip')) as dwca:
self.assertIsInstance(dwca.metadata, ET.Element)
v = (dwca.metadata.find('dataset')
.find('creator')
.find('individualName')
.find('givenName').text)
self.assertEqual(v, 'Nicolas')
def test_subdirectory_archive(self):
"""Ensure we support Archives where all the content is under a single directory."""
tmp_dir = tempfile.gettempdir()
num_files_before = len(os.listdir(tmp_dir))
with DwCAReader(sample_data_path('dwca-simple-subdir.zip')) as dwca:
# Ensure we have access to metadata
self.assertIsInstance(dwca.metadata, ET.Element)
# And to the rows themselves
for row in dwca:
self.assertIsInstance(row, CoreRow)
rows = list(dwca)
self.assertEqual('Borneo', rows[0].data[qn('locality')])
num_files_during = len(os.listdir(tmp_dir))
num_files_after = len(os.listdir(tmp_dir))
# Let's also check temporary dir is correctly created and removed.
self.assertEqual(num_files_before + 1, num_files_during)
self.assertEqual(num_files_before, num_files_after)
def test_exception_invalid_archives_missing_metadata(self):
"""An exception is raised when referencing a missing metadata file."""
# Sometimes, the archive metafile references a metadata file that's not present in the
# archive. See for example http://dev.gbif.org/issues/browse/PF-2125
with self.assertRaises(InvalidArchive) as cm:
a = DwCAReader(sample_data_path('dwca-invalid-lacks-metadata'))
a.close()
the_exception = cm.exception
expected_message = "eml.xml is referenced in the archive descriptor but missing."
self.assertEqual(str(the_exception), expected_message)
def test_implicit_encoding_metadata(self):
"""If the metadata file doesn't specifies encoding, use UTF-8."""
with DwCAReader(sample_data_path('dwca-simple-dir')) as dwca:
v = (dwca.metadata.find('dataset').find('creator').find('individualName')
.find('surName').text)
self.assertEqual(v, u'Noé')
def test_explicit_encoding_metadata(self):
"""If the metadata file explicitly specifies encoding (<xml ...>), make sure it is used."""
with DwCAReader(sample_data_path('dwca-metadata-windows1252-encoding')) as dwca:
v = (dwca.metadata.find('dataset').find('creator').find('individualName')
.find('surName').text)
self.assertEqual(v, u'Noé') # Is the accent properly interpreted?
def test_exception_invalid_simple_archives(self):
"""Ensure an exception is raised when simple archives can't be interpreted.
When there's no metafile in an archive, this one consists of a single data core file,
and possibly some metadata in EML.xml. If the archive doesn't follow this structure,
python-dwca-reader can't detect the data file and should throw an InvalidArchive exception.
"""
# There's a random file (in addition to data and EML.xml) in this one, so we can't choose
# which file is the datafile.
with self.assertRaises(InvalidArchive):
a = DwCAReader(sample_data_path('dwca-invalid-simple-toomuch.zip'))
a.close()
with self.assertRaises(InvalidArchive):
a = DwCAReader(sample_data_path('dwca-invalid-simple-two.zip'))
a.close()
def test_default_values_metafile(self):
"""
Ensure default values are used when optional attributes are absent in metafile.
Optional attributes tested here: linesTerminatedBy, fieldsTerminatedBy.
"""
with DwCAReader(sample_data_path('dwca-meta-default-values')) as dwca:
# Test iterating on rows...
for row in dwca:
self.assertIsInstance(row, CoreRow)
# And verify the values themselves:
# Test also "fieldsenclosedBy"?
def test_simplecsv_archive(self):
"""Ensure the reader works with archives consiting of a single CSV file.
As described in page #2 of http://www.gbif.org/resource/80639, those archives consists
of a single core data file where the first line provides the names of the Darwin Core terms
represented in the published data. That also seems to match quite well the definition of
Simple Darwin Core expressed as text: http://rs.tdwg.org/dwc/terms/simple/index.htm.
"""
with DwCAReader(sample_data_path('dwca-simple-csv.zip')) as dwca:
# Ensure we get the correct number of rows
self.assertEqual(len(dwca.rows), 3)
# Ensure we can access arbitrary data
self.assertEqual(dwca.get_corerow_by_position(1).data['decimallatitude'], '-31.98333')
# Archive descriptor should be None
self.assertIsNone(dwca.descriptor)
# (scientific) metadata should be None
self.assertIsNone(dwca.metadata)
# Let's do the same tests again but with DOS line endings in the data file
with DwCAReader(sample_data_path('dwca-simple-csv-dos.zip')) as dwca:
# Ensure we get the correct number of rows
self.assertEqual(len(dwca.rows), 3)
# Ensure we can access arbitrary data
self.assertEqual(dwca.get_corerow_by_position(1).data['decimallatitude'], '-31.98333')
# Archive descriptor should be None
self.assertIsNone(dwca.descriptor)
# (scientific) metadata should be None
self.assertIsNone(dwca.metadata)
# And with a file where fields are not double quotes-enclosed:
with DwCAReader(sample_data_path('dwca-simple-csv-notenclosed.zip')) as dwca:
# Ensure we get the correct number of rows
self.assertEqual(len(dwca.rows), 3)
# Ensure we can access arbitrary data
self.assertEqual(dwca.get_corerow_by_position(1).data['decimallatitude'], '-31.98333')
# Archive descriptor should be None
self.assertIsNone(dwca.descriptor)
# (scientific) metadata should be None
self.assertIsNone(dwca.metadata)
def test_simplecsv_archive_eml(self):
"""Test Archive without metafile, but containing metadata.
Similar to test_simplecsv_archive, except the archive also contains a Metadata file named
EML.xml. This correspond to the second case on page #2 of
http://www.gbif.org/resource/80639. The metadata file having the "standard name", it should
| |
<gh_stars>10-100
"""Help classes and functions related to geometry."""
import math
from typing import Iterable, List, Optional, Sequence, Union
import numpy as np
from scipy.spatial.transform import Rotation
from morfeus.data import ANGSTROM_TO_BOHR
from morfeus.typing import Array1D, Array2D, ArrayLike1D, ArrayLike2D
from morfeus.utils import get_connectivity_matrix
class Atom:
"""Atom common for morfeus calculations.
Args:
element: Atomic number (starting from 1)
coordinates: Coordinates (Å)
radius: vdW radius (Å)
index: Atom index (starting from 1)
Attributes:
accessible_mask: Boolean mask for accessible points
accessible_points: Points accessible to solvent (Å)
area: Solvent-accessible surface area (Ų)
cone: Cone tangent to atom
coordinates: Coordinates (Å)
coordination_number: Coordination number
element: Atomic number (1-indexed)
index: Atom index (1-indexed)
invisible_mask: Boolean mask for invisible points
occluded_mask: Boolean mask for occluded points
occluded_points: Points occluded by other atoms (Å)
p_values: P values
point_areas: Point areas (Ų)
point_volumes: Point volumes (ų)
point: Points (Å)
proximal_mask: Boolean mask for proximal points
radius: vdW radius (Å)
volume: Volume inside solvent-accessible surface area (ų)
"""
accessible_mask: Array1D
accessible_points: Array2D
area: float
cone: "Cone"
coordinates: Array1D
coordination_number: float
element: int
index: int
invisible_mask: Array1D
occluded_mask: Array1D
occluded_points: Array2D
p_values: Array1D
point_areas: Array1D
point_volumes: Array1D
points: Array2D
proximal_mask: Array1D
radius: float
volume: float
def __init__(
self, element: int, coordinates: ArrayLike1D, radius: float, index: int
) -> None:
# Set up initial attributes
self.coordinates = np.array(coordinates)
self.element = element
self.index = index
self.radius = radius
def get_cone(self) -> None:
"""Construct cone for atom."""
vector = self.coordinates
normal = vector / np.linalg.norm(vector)
sin_alpha = self.radius / np.linalg.norm(vector)
alpha = math.asin(sin_alpha)
cone = Cone(alpha, [self], normal)
self.cone = cone
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.index!r})"
class Cone:
"""Cone used in cone angle calculations.
Args:
angle: Cone angle (rad)
atoms: Atoms that are tangent to cone (1-indexed)
normal: Normal vector of cone
Attributes:
angle: Cone angle (rad)
atoms: Atoms that are tangent to cone (1-indexed)
normal: Normal vector of cone
"""
angle: float
atoms: List[Atom]
normal: Array1D
def __init__(
self, angle: float, atoms: Sequence[Atom], normal: ArrayLike1D
) -> None:
self.angle = angle
self.atoms = list(atoms)
self.normal = np.array(normal)
def is_inside(self, atom: Atom) -> bool:
"""Tests if atom lies inside the cone.
Args:
atom: Atom to test.
Returns:
True if inside, False if outside.
"""
# Get vertex angle of atom
beta = atom.cone.angle
# Calculate angle between cone normal vector and unit vector to atom
cos = np.dot(atom.cone.normal, self.normal)
# Take into account numerical problems that sometimes gives a value
# somewhat above 1
if 1 - cos > 0 and 1 - cos < 1e-5:
cos = 1
angle = math.acos(cos)
# Check if atom lies inside cone, within numerical reason
diff = self.angle - (beta + angle)
if diff > -1e-5:
return True
else:
return False
def is_inside_points(self, points: ArrayLike2D, method: str = "cross") -> Array1D:
"""Test if points are inside cone of atom.
Args:
points: Points to check (Å)
method: Method for testing: 'angle', 'cross' or 'dot'
Returns:
is_inside: Boolean array with points marked as inside
Raises:
ValueError: When method not supported
"""
points = np.array(points)
if method in ["cross", "dot"]:
# Calculate radius of cone at distance of each point
cone_distances = np.dot(points, self.normal)
cone_radii = np.tan(self.angle) * cone_distances
# Calculate orthogonal distance of points to cone normal vector
if method == "cross":
orth_distances = np.linalg.norm(np.cross(-points, self.normal), axis=1)
elif method == "dot":
orth_distances = np.linalg.norm(
-points - np.dot(points, self.normal).reshape(-1, 1) * self.normal,
axis=1,
)
# Determine if distance is smaller than cone radius.
inside = orth_distances < cone_radii
elif method == "angle":
norm_points = points / np.linalg.norm(points, axis=1).reshape(-1, 1)
# Calculate angle between cone normal vector and unit vector to
# atom
cos = np.dot(norm_points, self.normal)
# Take into account numerical problems that sometimes gives a value
# somewhat above 1
cos[np.logical_and(1 - cos > 0, 1 - cos < 1e-5)] = 1
angle = np.arccos(cos)
# Check if atom lies inside cone, within numerical reason
diff = self.angle - angle
inside = diff > -1e-5
else:
raise ValueError(f"method={method} not supported.")
is_inside: np.ndarray = inside
return is_inside
def __repr__(self) -> str:
atoms = ", ".join([str(atom.index) for atom in self.atoms])
return f"{self.__class__.__name__}(Tangent atoms: {atoms})"
class Sphere:
"""Sphere class for creating and holding points on vdW surface.
Args:
center: Coordinates for center (Å)
density: Area per point (Ų) for empty sphere
and volume per point (ų) for filled sphere.
filled: Whether a sphere with internal points should be constructed (works only
with method='projection')
method: Method for generating points: 'fibonacci', 'polar' or 'projection'
radius: Radius (Å)
Attributes:
area: Area (Ų)
center: Coordinates for sphere center (Å)
circumference: Circumference (Å)
density: Density of points (Ų or ų)
points: Points in/on sphere (Å)
radius: Radius (Å)
volume: Volume (ų)
"""
area: float
center: Array1D
circumference: float
density: float
points: Array1D
radius: float
volume: float
def __init__(
self,
center: ArrayLike1D,
radius: float,
density: float = 0.005,
method: str = "fibonacci",
filled: bool = False,
) -> None:
self.center = np.array(center)
self.radius = radius
self.circumference = math.pi * radius * 2
self.area = 4 * radius ** 2 * math.pi
self.volume = 4 * radius ** 3 * math.pi / 3
self.density = density
if method == "polar":
self.points = self._get_points_polar(density=density)
elif method == "projection":
self.points = self._get_points_projected(density=density, filled=filled)
elif method == "fibonacci":
self.points = self._get_points_fibonacci(density=density)
@staticmethod
def _get_cartesian_coordinates(
r: float, theta: ArrayLike1D, phi: ArrayLike1D
) -> Array2D:
"""Converts polar to Cartesian coordinates.
Args:
phi: Phi angles (radians)
r: Radius (Å)
theta: Theta angles (radians)
Returns:
points: Cartesian points (Å)
"""
# Calculate x, y and z coordinates
x = r * np.sin(theta) * np.cos(phi)
y = r * np.sin(theta) * np.sin(phi)
z = r * np.cos(theta)
# Stack coordinates as columns
points: np.ndarray = np.column_stack((x, y, z))
return points
def _get_points_fibonacci(self, density: float) -> Array2D:
"""Construct points on sphere surface by the Fibonacci golden spiral method.
Method vectorized from
https://stackoverflow.com/questions/9600801/evenly-distributing-n-points-on-a-sphere
Args:
density: Area per point (Ų)
Returns:
points: Surface points (Å)
"""
# Generate points on unit sphere
rnd = 1
n = int(round((self.area / density)))
offset = 2.0 / n
increment = math.pi * (3.0 - math.sqrt(5.0))
i = np.arange(n)
y = ((i * offset) - 1) + (offset / 2)
r = np.sqrt(1 - np.square(y))
phi = np.mod((i + rnd), n) * increment
x = np.cos(phi) * r
z = np.sin(phi) * r
# Generate points and adjust with radius and center
points = np.column_stack((x, y, z))
points = points * self.radius
points: np.ndarray = points + self.center
return points
def _get_points_polar(self, density: float) -> Array2D:
"""Construct points on sphere by polar coordinate method.
Args:
density: Area per point (Ų)
Returns:
points: Points on sphere (Å)
"""
# Calculate number of points
n = int(round((self.area / density / 2) ** (1 / 2)))
# Set up points along theta and phi
theta = np.linspace(0, math.pi, n)
phi = np.linspace(0, 2 * math.pi, 2 * n)
# Combine together all the possible combinations of theta and phi
combined_theta_phi = np.dstack(np.meshgrid(theta, phi)).reshape(-1, 2)
theta = combined_theta_phi[:, 0]
phi = combined_theta_phi[:, 1]
# Get the Cartesian coordinates
points = self._get_cartesian_coordinates(self.radius, theta, phi)
# Adjust to sphere center
points: np.ndarray = points + self.center
return points
def _get_points_projected(self, density: float, filled: bool = False) -> Array2D:
"""Construct points on sphere surface by projection.
Args:
density: Area per point (Ų) for empty sphere and volume
per point (ų) for filled sphere
filled: Whether to generate internal points
Returns:
points: Array of surface points (Å)
"""
# Calculate number of points from density of empty or filled sphere.
if filled:
n = int(round((self.volume / density * 6 / math.pi) ** (1 / 3)))
else:
n = int(round((self.area / density * 6 / math.pi) ** (1 / 3)))
# Generate points in box
r = self.radius
x = np.linspace(-r, r, n)
y = np.linspace(-r, r, n)
z = np.linspace(-r, r, n)
points = np.stack(np.meshgrid(x, y, z), -1).reshape(-1, 3)
# Remove points outside of | |
does not have a commutative antiderivative, it cannot be
solved by linodesolve.
Returns
=======
Dict
Raises
======
NotImplementedError
When the coefficient matrix does not have a commutative antiderivative
See Also
========
linodesolve: Function for which linodesolve_type gets the information
"""
match = {}
is_non_constant = not _matrix_is_constant(A, t)
is_non_homogeneous = not (b is None or b.is_zero_matrix)
type = "type{}".format(int("{}{}".format(int(is_non_constant), int(is_non_homogeneous)), 2) + 1)
B = None
match.update({"type_of_equation": type, "antiderivative": B})
if is_non_constant:
B, is_commuting = _is_commutative_anti_derivative(A, t)
if not is_commuting:
raise NotImplementedError(filldedent('''
The system does not have a commutative antiderivative, it cannot be solved
by linodesolve.
'''))
match['antiderivative'] = B
match.update(_first_order_type5_6_subs(A, t, b=b))
return match
def _first_order_type5_6_subs(A, t, b=None):
match = {}
factor_terms = _factor_matrix(A, t)
is_homogeneous = b is None or b.is_zero_matrix
if factor_terms is not None:
t_ = Symbol("{}_".format(t))
F_t = integrate(factor_terms[0], t)
inverse = solveset(Eq(t_, F_t), t)
# Note: A simple way to check if a function is invertible
# or not.
if isinstance(inverse, FiniteSet) and not inverse.has(Piecewise)\
and len(inverse) == 1:
A = factor_terms[1]
if not is_homogeneous:
b = b / factor_terms[0]
b = b.subs(t, list(inverse)[0])
type = "type{}".format(5 + (not is_homogeneous))
match.update({'func_coeff': A, 'tau': F_t,
't_': t_, 'type_of_equation': type, 'rhs': b})
return match
def linear_ode_to_matrix(eqs, funcs, t, order):
r"""
Convert a linear system of ODEs to matrix form
Explanation
===========
Express a system of linear ordinary differential equations as a single
matrix differential equation [1]. For example the system $x' = x + y + 1$
and $y' = x - y$ can be represented as
.. math:: A_1 X' = A_0 X + b
where $A_1$ and $A_0$ are $2 \times 2$ matrices and $b$, $X$ and $X'$ are
$2 \times 1$ matrices with $X = [x, y]^T$.
Higher-order systems are represented with additional matrices e.g. a
second-order system would look like
.. math:: A_2 X'' = A_1 X' + A_0 X + b
Examples
========
>>> from sympy import Function, Symbol, Matrix, Eq
>>> from sympy.solvers.ode.systems import linear_ode_to_matrix
>>> t = Symbol('t')
>>> x = Function('x')
>>> y = Function('y')
We can create a system of linear ODEs like
>>> eqs = [
... Eq(x(t).diff(t), x(t) + y(t) + 1),
... Eq(y(t).diff(t), x(t) - y(t)),
... ]
>>> funcs = [x(t), y(t)]
>>> order = 1 # 1st order system
Now ``linear_ode_to_matrix`` can represent this as a matrix
differential equation.
>>> (A1, A0), b = linear_ode_to_matrix(eqs, funcs, t, order)
>>> A1
Matrix([
[1, 0],
[0, 1]])
>>> A0
Matrix([
[1, 1],
[1, -1]])
>>> b
Matrix([
[1],
[0]])
The original equations can be recovered from these matrices:
>>> eqs_mat = Matrix([eq.lhs - eq.rhs for eq in eqs])
>>> X = Matrix(funcs)
>>> A1 * X.diff(t) - A0 * X - b == eqs_mat
True
If the system of equations has a maximum order greater than the
order of the system specified, a ODEOrderError exception is raised.
>>> eqs = [Eq(x(t).diff(t, 2), x(t).diff(t) + x(t)), Eq(y(t).diff(t), y(t) + x(t))]
>>> linear_ode_to_matrix(eqs, funcs, t, 1)
Traceback (most recent call last):
...
ODEOrderError: Cannot represent system in 1-order form
If the system of equations is nonlinear, then ODENonlinearError is
raised.
>>> eqs = [Eq(x(t).diff(t), x(t) + y(t)), Eq(y(t).diff(t), y(t)**2 + x(t))]
>>> linear_ode_to_matrix(eqs, funcs, t, 1)
Traceback (most recent call last):
...
ODENonlinearError: The system of ODEs is nonlinear.
Parameters
==========
eqs : list of SymPy expressions or equalities
The equations as expressions (assumed equal to zero).
funcs : list of applied functions
The dependent variables of the system of ODEs.
t : symbol
The independent variable.
order : int
The order of the system of ODEs.
Returns
=======
The tuple ``(As, b)`` where ``As`` is a tuple of matrices and ``b`` is the
the matrix representing the rhs of the matrix equation.
Raises
======
ODEOrderError
When the system of ODEs have an order greater than what was specified
ODENonlinearError
When the system of ODEs is nonlinear
See Also
========
linear_eq_to_matrix: for systems of linear algebraic equations.
References
==========
.. [1] https://en.wikipedia.org/wiki/Matrix_differential_equation
"""
from sympy.solvers.solveset import linear_eq_to_matrix
if any(ode_order(eq, func) > order for eq in eqs for func in funcs):
msg = "Cannot represent system in {}-order form"
raise ODEOrderError(msg.format(order))
As = []
for o in range(order, -1, -1):
# Work from the highest derivative down
funcs_deriv = [func.diff(t, o) for func in funcs]
# linear_eq_to_matrix expects a proper symbol so substitute e.g.
# Derivative(x(t), t) for a Dummy.
rep = {func_deriv: Dummy() for func_deriv in funcs_deriv}
eqs = [eq.subs(rep) for eq in eqs]
syms = [rep[func_deriv] for func_deriv in funcs_deriv]
# Ai is the matrix for X(t).diff(t, o)
# eqs is minus the remainder of the equations.
try:
Ai, b = linear_eq_to_matrix(eqs, syms)
except NonlinearError:
raise ODENonlinearError("The system of ODEs is nonlinear.")
Ai = Ai.applyfunc(expand_mul)
As.append(Ai if o == order else -Ai)
if o:
eqs = [-eq for eq in b]
else:
rhs = b
return As, rhs
def matrix_exp(A, t):
r"""
Matrix exponential $\exp(A*t)$ for the matrix ``A`` and scalar ``t``.
Explanation
===========
This functions returns the $\exp(A*t)$ by doing a simple
matrix multiplication:
.. math:: \exp(A*t) = P * expJ * P^{-1}
where $expJ$ is $\exp(J*t)$. $J$ is the Jordan normal
form of $A$ and $P$ is matrix such that:
.. math:: A = P * J * P^{-1}
The matrix exponential $\exp(A*t)$ appears in the solution of linear
differential equations. For example if $x$ is a vector and $A$ is a matrix
then the initial value problem
.. math:: \frac{dx(t)}{dt} = A \times x(t), x(0) = x0
has the unique solution
.. math:: x(t) = \exp(A t) x0
Examples
========
>>> from sympy import Symbol, Matrix, pprint
>>> from sympy.solvers.ode.systems import matrix_exp
>>> t = Symbol('t')
We will consider a 2x2 matrix for comupting the exponential
>>> A = Matrix([[2, -5], [2, -4]])
>>> pprint(A)
[2 -5]
[ ]
[2 -4]
Now, exp(A*t) is given as follows:
>>> pprint(matrix_exp(A, t))
[ -t -t -t ]
[3*e *sin(t) + e *cos(t) -5*e *sin(t) ]
[ ]
[ -t -t -t ]
[ 2*e *sin(t) - 3*e *sin(t) + e *cos(t)]
Parameters
==========
A : Matrix
The matrix $A$ in the expression $\exp(A*t)$
t : Symbol
The independent variable
See Also
========
matrix_exp_jordan_form: For exponential of Jordan normal form
References
==========
.. [1] https://en.wikipedia.org/wiki/Jordan_normal_form
.. [2] https://en.wikipedia.org/wiki/Matrix_exponential
"""
P, expJ = matrix_exp_jordan_form(A, t)
return P * expJ * P.inv()
def matrix_exp_jordan_form(A, t):
r"""
Matrix exponential $\exp(A*t)$ for the matrix *A* and scalar *t*.
Explanation
===========
Returns the Jordan form of the $\exp(A*t)$ along with the matrix $P$ such that:
.. math::
\exp(A*t) = P * expJ * P^{-1}
Examples
========
>>> from sympy import Matrix, Symbol
>>> from sympy.solvers.ode.systems import matrix_exp, matrix_exp_jordan_form
>>> t = Symbol('t')
We will consider a 2x2 defective matrix. This shows that our method
works even for defective matrices.
>>> A = Matrix([[1, 1], [0, 1]])
It can be observed that this function gives us the Jordan normal form
and the required invertible matrix P.
>>> P, expJ = matrix_exp_jordan_form(A, t)
Here, it is shown that P and expJ returned by this function is correct
as they satisfy the formula: P * expJ * P_inverse = exp(A*t).
>>> P * expJ * P.inv() == matrix_exp(A, t)
True
Parameters
==========
A : Matrix
The matrix $A$ in the expression $\exp(A*t)$
t : Symbol
The independent variable
References
==========
.. [1] https://en.wikipedia.org/wiki/Defective_matrix
.. [2] https://en.wikipedia.org/wiki/Jordan_matrix
.. [3] https://en.wikipedia.org/wiki/Jordan_normal_form
"""
N, M = A.shape
if N != M:
raise ValueError('Needed square matrix but got shape (%s, %s)' % (N, M))
elif A.has(t):
raise ValueError('Matrix A should not depend on t')
def jordan_chains(A):
'''Chains from Jordan normal form analogous to M.eigenvects().
Returns a dict with eignevalues as keys like:
{e1: [[v111,v112,...], [v121, v122,...]], e2:...}
where vijk is the kth vector in the jth chain for eigenvalue i.
'''
| |
<reponame>alonhirsch/pyTenable
'''
networks
========
The following methods allow for interaction into the Tenable.io
:devportal:`networks <networks>` API endpoints.
Methods available on ``tio.networks``:
.. rst-class:: hide-signature
.. autoclass:: NetworksAPI
:members:
'''
from tenable.io.base import TIOEndpoint, TIOIterator
from tenable.errors import UnexpectedValueError
class NetworksIterator(TIOIterator):
'''
The networks iterator provides a scalable way to work through networks
result sets of any size. The iterator will walk through each page of data,
returning one record at a time. If it reaches the end of a page of records,
then it will request the next page of information and then continue to
return records from the next page (and the next, and the next) until the
counter reaches the total number of records that the API has reported.
Attributes:
count (int): The current number of records that have been returned
page (list):
The current page of data being walked through. pages will be
cycled through as the iterator requests more information from the
API.
page_count (int): The number of record returned from the current page.
total (int):
The total number of records that exist for the current request.
'''
pass
class NetworksAPI(TIOEndpoint):
'''
This will contain all methods related to networks
'''
def create(self, name, description=None, assets_ttl_days=None):
'''
Creates a new network within Tenable.io
:devportal:`networks: create <networks-create>`
Args:
name (str): The name of the new network.
description (str, optional): Description of the network.
assets_ttl_days (int, optional): The number of days to wait before assets age out.
Assets will be permanently deleted if they are not seen on a scan within the specified number of days.
Minimum value: 90
Maximum value: 365
Returns:
:obj:`dict`:
The resource record of the newly created network.
Examples:
>>> nw = tio.networks.create('Example')
'''
if not description:
description = ''
return self._api.post('networks', json={
'name': self._check('name', name, str),
'description': self._check('description', description, str),
'assets_ttl_days': self._check('assets_ttl_days', assets_ttl_days, int)
}).json()
def delete(self, network_id):
'''
Deletes the specified network.
:devportal:`networks: delete <networks-delete>`
Args:
network_id (str): The UUID of the network to remove.
Examples:
>>> tio.networks.delete('00000000-0000-0000-0000-000000000000')
'''
self._api.delete('networks/{}'.format(self._check('network_id', network_id, 'uuid')))
def details(self, network_id):
'''
Retrieves the details of the specified network.
:devportal:`networks: details <networks-details>`
Args:
network_id (str): The UUID of the network.
Examples:
>>> nw = tio.networks.details('00000000-0000-0000-0000-000000000000')
'''
return self._api.get('networks/{}'.format(
self._check('network_id', network_id, 'uuid'))).json()
def edit(self, network_id, name, description=None, assets_ttl_days=None):
'''
Updates the specified network resource.
:devportal:`networks: update <networks-update>`
Args:
network_id (str): The UUID of the network resource to update.
name (str): The new name of the network resource.
description (str, optional):
The new description of the network resource.
assets_ttl_days (int, optional): The number of days to wait before assets age out.
Assets will be permanently deleted if they are not seen on a scan within the specified number of days.
Minimum value: 90
Maximum value: 365
Returns:
:obj:`dict`:
The updates network resource.
Examples:
>>> nw = tio.networks.edit('00000000-0000-0000-0000-000000000000',
... 'Updated Network', 'Updated Description', 180)
'''
if not description:
description = ''
return self._api.put('networks/{}'.format(self._check('network_id', network_id, 'uuid')),
json={
'name': self._check('name', name, str),
'description': self._check('description', description, str),
'assets_ttl_days': self._check('assets_ttl_days', assets_ttl_days, int)
}).json()
def assign_scanners(self, network_id, *scanner_uuids):
'''
Assigns one or many scanners to a network.
:devportal:`networks: assign-scanner <networks-assign-scanner>`
:devportal:`networks: bulk-assign-scanner <networks-assign-scanner-bulk>`
Args:
network_id (str): The UUID of the network.
*scanner_uuids (str): Scanner UUID(s) to assign to the network.
Examples:
Assign a single scanner:
>>> tio.networks,assign_scanners(
... '00000000-0000-0000-0000-000000000000', # Network UUID
... '00000000-0000-0000-0000-000000000000') # Scanner UUID
Assign multiple scanners:
>>> tio.networks,assign_scanners(
... '00000000-0000-0000-0000-000000000000', # Network UUID
... '00000000-0000-0000-0000-000000000000', # Scanner1 UUID
... '00000000-0000-0000-0000-000000000000') # Scanner2 UUID
'''
if len(scanner_uuids) == 1:
self._api.post('networks/{}/scanners/{}'.format(
self._check('network_id', network_id, 'uuid'),
self._check('scanner_uuid', scanner_uuids[0], 'scanner-uuid')
))
elif len(scanner_uuids) > 1:
self._api.post('networks/{}/scanners'.format(
self._check('network_id', network_id, 'uuid')),
json={'scanner_uuids': [self._check('scanner_uuid', i, 'scanner-uuid')
for i in scanner_uuids]})
else:
raise UnexpectedValueError('No scanner_uuids were supplied.')
def list_scanners(self, network_id):
'''
Retrieves the list of scanners associated to a given network.
:devportal:`networks: list-scanners <networks-list-scanners>`
Args:
network_id (str): The UUID of the network.
Returns:
:obj:`list`:
List of scanner resources associated to this network.
Examples:
>>> network = '00000000-0000-0000-0000-000000000000'
>>> for scanner in tio.networks.list_scanners(network):
... pprint(scanner)
'''
return self._api.get('networks/{}/scanners'.format(
self._check('network_id', network_id, 'uuid'))).json()['scanners']
def unassigned_scanners(self, network_id):
'''
Retrieves the list of scanners that are currently unassigned to the given
network. This will include scanners and scanner groups that are
currently assigned to the default network.
:devportal:`networks: list-assignable-scanners <networks-list-assignable-scanners>`
Args:
id (str): The UUID of the network.
Returns:
:obj:`list`:
The list of unassigned scanner resources
Examples:
>>> network = '00000000-0000-0000-0000-000000000000'
>>> for scanner in tio.networks.unassigned_scanners(network):
... pprint(scanner)
'''
return self._api.get('networks/{}/assignable-scanners'.format(
self._check('network_id', network_id, 'uuid'))).json()['scanners']
def list(self, *filters, **kw):
'''
Get the listing of configured networks from Tenable.io.
:devportal:`networks: list <networks-list>`
Args:
*filters (tuple, optional):
Filters are tuples in the form of ('NAME', 'OPERATOR', 'VALUE').
Multiple filters can be used and will filter down the data being
returned from the API.
Examples:
- ``('name', 'eq', 'example')``
As the filters may change and sortable fields may change over
time, it's highly recommended that you look at the output of
the :py:meth:`tio.networks.network_filters() <FiltersAPI.networks_filters>`
endpoint to get more details.
filter_type (str, optional):
The filter_type operator determines how the filters are combined
together. ``and`` will inform the API that all of the filter
conditions must be met for an access group to be returned,
whereas ``or`` would mean that if any of the conditions are met,
the access group record will be returned.
include_deleted (bool, optional):
Indicates whether deleted network objects should be included in
the response. If left unspecified, the default is ``False``.
limit (int, optional):
The number of records to retrieve. Default is 50
offset (int, optional):
The starting record to retrieve. Default is 0.
sort (tuple, optional):
A tuple of tuples identifying the the field and sort order of
the field.
wildcard (str, optional):
A string to pattern match against all available fields returned.
wildcard_fields (list, optional):
A list of fields to optionally restrict the wild-card matching
to.
Returns:
:obj:`NetworksIterator`:
An iterator that handles the page management of the requested
records.
Examples:
Getting the listing of all agents:
>>> for nw in tio.networks.list():
... pprint(nw)
Retrieving all of the windows agents:
>>> for nw in tio.access_groups.list(('name', 'match', 'win')):
... pprint(nw)
'''
limit = 50
offset = 0
pages = None
query = self._parse_filters(filters,
self._api.filters.networks_filters(), rtype='colon')
# If the offset was set to something other than the default starting
# point of 0, then we will update offset to reflect that.
if 'offset' in kw and self._check('offset', kw['offset'], int):
offset = kw['offset']
# The limit parameter affects how many records at a time we will pull
# from the API. The default in the API is set to 50, however we can
# pull any variable amount.
if 'limit' in kw and self._check('limit', kw['limit'], int):
limit = kw['limit']
# For the sorting fields, we are converting the tuple that has been
# provided to us and converting it into a comma-delimited string with
# each field being represented with its sorting order. e.g. If we are
# presented with the following:
#
# sort=(('field1', 'asc'), ('field2', 'desc'))
#
# we will generate the following string:
#
# sort=field1:asc,field2:desc
#
if 'sort' in kw and self._check('sort', kw['sort'], tuple):
query['sort'] = ','.join(['{}:{}'.format(
self._check('sort_field', i[0], str),
self._check('sort_direction', i[1], str, choices=['asc', 'desc'])
) for i in kw['sort']])
# The filter_type determines how the filters are combined together.
# The default is 'and', however you can always explicitly define 'and'
# or 'or'.
if 'filter_type' in kw and self._check(
'filter_type', kw['filter_type'], str, choices=['and', 'or']):
query['ft'] = kw['filter_type']
# The wild-card filter text refers to how the API will pattern match
# within all fields, or specific fields using the wildcard_fields param.
if 'wildcard' in kw and self._check('wildcard', kw['wildcard'], str):
query['w'] = kw['wildcard']
# The wildcard_fields parameter allows the user to restrict the fields
# that the wild-card pattern match pertains to.
if 'wildcard_fields' in kw and self._check(
'wildcard_fields', kw['wildcard_fields'], list):
query['wf'] = ','.join(kw['wildcard_fields'])
if 'include_deleted' in kw and self._check(
'include_deleted', kw['include_deleted'], bool):
query['includeDeleted'] = kw['include_deleted']
# Return the Iterator.
return NetworksIterator(self._api,
_limit=limit,
_offset=offset,
_pages_total=pages,
_query=query,
_path='networks',
_resource='networks'
| |
# coding=utf-8
# Author: <NAME>
# Date: Nov 16, 2014
#
# Description: Plot DDI Statistics
#
#
# coding=utf-8
from __future__ import division
import numpy as np
import pandas as pd
import scipy.stats
from scipy import stats
slice = pd.IndexSlice
pd.set_option('display.max_rows', 24)
pd.set_option('display.max_columns', 24)
pd.set_option('display.width', 300)
pd.set_option('display.precision', 2)
pd.set_option('display.float_format', lambda x: '%.2f' % x)
import util
from collections import OrderedDict
import math
def calc_conf_interval(r, **kwargs):
df = kwargs['n_runs']-1
mean = r.iloc[0]
std = r.iloc[1]
sigma = std/math.sqrt(n_runs)
(ci_min,ci_max) = stats.t.interval(alpha=0.95, df=n_runs-1, loc=mean, scale=sigma)
return pd.Series([ci_min, ci_max], index=['ci_min', 'ci_max'])
#
# Load CSVs
#
dfu, dfc, dfi = util.dfUsersInteractionsSummary(loadCoAdmin=False)
#dfig = dfi.groupby('id_user').agg({'n_drugs':'sum','n_ij_ddi':'sum','n_coadmin':'sum'})
dfd = pd.read_csv('results/dd_drugs.csv.gz', header=0, encoding='utf-8',
names=['id_user','DB_label','count','en_i'],
dtype={'id_user':np.int64})
print '>> dfu'
print dfu.head()
print dfu.shape
print '>> dfc'
print dfc.head()
print dfc.shape
print '>> dfi'
print dfi.head()
print dfi.shape
print '>> dfd'
print dfd.head()
print dfd.shape
dfiu = pd.merge(dfi, dfu[['gender','age','age_group']], how='left', left_on='id_user', right_index=True)
print '>> dfiu'
print dfiu.head()
print '--- --- ---'
#
# Removed Hormones
#
female_hormones = ['Ethinyl Estradiol','Estradiol','Norethisterone','Levonorgestrel','Estrogens Conj.']
dfiu_nh = dfiu.loc[ (~(dfiu['en_i'].isin(female_hormones)) & ~(dfiu['en_j'].isin(female_hormones)) ) , : ].reset_index(drop=True)
dfu['len_ij_ddi_not_hormone'] = dfiu_nh['len_ij_ddi']
dfiug_nh = dfiu_nh.groupby('id_user').agg({'len_ij_ddi':'sum','id_user':'count'})
dfu['n_ij_ddi_not_hormone'] = dfiug_nh['id_user']
print dfu.loc[ dfu['len_ij_ddi']>dfu['len_ij_ddi_not_hormone'], : ].head()
#
# Variables
#
n_user = len( dfu.index.unique() )
n_user_adult = len( dfu.loc[ (dfu['age']>=20), : ].index.unique() )
n_user_male = len( dfu.loc[ (dfu['gender']=='Male'), : ].index.unique() )
n_user_female = len( dfu.loc[ (dfu['gender']=='Female'), : ].index.unique() )
n_user_40p = len( dfu.loc[ ( (dfu['age']>=40) ), :].index.unique() )
n_user_65p = len( dfu.loc[ ( (dfu['age']>=66) ), :].index.unique() )
n_a = dfu['n_a'].sum()
n_i = dfd.groupby('DB_label').agg({'en_i':'first'}).shape[0]
n_i_inter = len(np.unique(dfi[['db_i','db_j']].values))
n_ij = dfu['n_ij'].sum()
n_ij_ddi = dfu['n_ij_ddi'].sum()
n_ij_ddi_unique = len( dfiu['db_ij'].unique() )
n_ij_ddi_unique_nh = len( dfiu_nh['db_ij'].unique() )
n_user_gt2drugs = len( dfu.loc[ (dfu['n_i']>1), : ].index.unique() )
n_user_gt1coadmin = len( dfu.loc[ (dfu['n_ij']>0), : ].index.unique() )
n_user_male_ij = len( dfu.loc[ ((dfu['gender']=='Male') & (dfu['n_ij']>0)), : ].index.unique() )
n_user_female_ij = len( dfu.loc[ ((dfu['gender']=='Female') & (dfu['n_ij']>0)), : ].index.unique() )
n_user_ij_ddi = len( dfu.loc[ (dfu['n_ij_ddi']>0), : ].index.unique() )
n_user_ij_ddi_major = len( dfiu.loc[ (dfiu['severity'].isin(['Major'])) , : ]['id_user'].unique() )
n_user_adult_ij_ddi = len( dfu.loc[ ((dfu['age']>=20) & (dfu['n_ij_ddi']>0)), : ].index.unique() )
n_user_adult_ij_ddi_major = len( dfiu.loc[ ((dfiu['age']>=20) & (dfiu['severity'].isin(['Major']))) , : ]['id_user'].unique() )
n_males_qt1inter = len( dfu.loc[ ((dfu['gender']=='Male') & (dfu['n_ij_ddi']>0)), : ].index.unique() )
n_females_qt1inter = len( dfu.loc[ ((dfu['gender']=='Female') & (dfu['n_ij_ddi']>0)), : ].index.unique() )
n_males_qt1inter_nh = len( dfu.loc[ ((dfu['gender']=='Male') & (dfu['n_ij_ddi_not_hormone']>0)), : ].index.unique() )
n_females_qt1inter_nh = len( dfu.loc[ ((dfu['gender']=='Female') & (dfu['n_ij_ddi_not_hormone']>0)), : ].index.unique() )
n_user_ij_ddi_40p = len( dfu.loc[ ((dfu['n_ij_ddi']>0) & (dfu['age']>=40) ), :].index.unique() )
n_user_ij_ddi_major_65p = len( dfiu.loc[ ((dfiu['age']>=66) & (dfiu['severity'].isin(['Major']))) , : ]['id_user'].unique() )
##
print '--- RRC/RRI direct computation ---'
p_female = n_user_female/n_user
p_male = n_user_male/n_user
print 'P(u^[F]) = {:,.4f}'.format( (p_female) )
print 'P(u^[M]) = {:,.4f}'.format( (p_male) )
print
RRCF = (n_user_female_ij/n_user_female)/(n_user_male_ij/n_user_male)
RRIF = (n_females_qt1inter/n_user_female)/(n_males_qt1inter/n_user_male)
print 'RRC^[F] = ( |U^[c,F]| / |U^[F]| ) / ( |U^[c,M]| / |U^[M]| ) = ({:,d} / {:,d}) / ({:,d}/{:,d}) = {:,.4f}'.format( n_user_female_ij,n_user_female,n_user_male_ij,n_user_male,RRCF )
print 'RRI^[F] = ( |U^[c,F]| / |U^[F]| ) / ( |U^[c,M]| / |U^[M]| ) = ({:,d} / {:,d}) / ({:,d}/{:,d}) = {:,.4f}'.format( n_females_qt1inter,n_user_female,n_males_qt1inter,n_user_male,RRIF )
#print 'P(u^{i*}) = No Hormones'
#p_iNHf = n_females_qt1inter_nh/n_user
#p_iNHm = n_males_qt1inter_nh/n_user
#print 'P(I*>0|g=F) / P(I*>0|g=F) = {:,.4f} / {:,.4f} = {:,.4f}'.format( (p_iNHf/p_f) , (p_iNHm/p_m) , (p_iNHf/p_f)/(p_iNHm/p_m) )
# Load BNU
dfBnu = util.BnuData(age_per_gender=False)
city_pop = int(dfBnu['population'].sum())
city_pop_males = int(dfBnu['males'].sum())
city_pop_females = int(dfBnu['females'].sum())
city_pop_adults = int(dfBnu.iloc[:,4:21].sum().sum())
# Load Censo
#dfCenso = util.dfCenso(age_per_gender=False)
#
# Overall Statistics
#
print '--- Overall Statistics ---'
print "Blumenau population: {:,d}".format(city_pop)
print "Blumenau Males: {:,d}".format(city_pop_males)
print "Blumenau Females: {:,d}".format(city_pop_females)
print
print "Pronto population: {:,d} ({:.2%} of Blumenau)".format(n_user, n_user/city_pop)
print "Pronto males: {:,d} ({:.2%})".format(n_user_male, n_user_male/n_user)
print "Pronto females: {:,d} ({:.2%})".format(n_user_female, n_user_female/n_user)
print
print "Pronto adults (>=20) {:,d}".format(n_user_adult)
print "Unique drugs: {:,d}".format(n_i)
print "Unique drugs involved in DDI: {:,d}".format(n_i_inter)
print "Drugs intervals dispensed: {:,d}".format(n_a)
print "Co-administrations: {:,d}".format(n_ij)
print "Interactions: {:,d} ({:.2%})".format(n_ij_ddi, n_ij_ddi/n_ij )
print "Unique DDI pairs: {:,d}".format(n_ij_ddi_unique)
print "Unique DDI pairs (not hormones): {:,d}".format(n_ij_ddi_unique_nh)
print "Patients with 2+ drugs dispensed: {:,d} ({:.2%})".format(n_user_gt2drugs, n_user_gt2drugs/n_user)
print
print "Patients with 1+ co-administration: {:,d} ({:.2%})".format(n_user_gt1coadmin, n_user_gt1coadmin/n_user)
print "Male patients with 1+ co-administration: {:,d}, ({:.2%})".format(n_user_male_ij , n_user_male_ij/n_user_gt1coadmin)
print "Female patients with 1+ co-administration: {:,d}, ({:.2%})".format(n_user_female_ij , n_user_female_ij/n_user_gt1coadmin)
print
print "Patients with 1+ DDI: {:,d} ({:.2%} Pronto, {:.2%} Bnu)".format(n_user_ij_ddi, n_user_ij_ddi/n_user, n_user_ij_ddi/city_pop)
print "Male patients with 1+ DDI: {:,d} ({:.2%})".format(n_males_qt1inter, n_males_qt1inter/n_user_ij_ddi)
print "Female patients with 1+ DDI: {:,d} ({:.2%})".format(n_females_qt1inter, n_females_qt1inter/n_user_ij_ddi)
print
print "Adults patients (20+) with 1+ DDI: {:,d} ({:.2%} Pronto Adults, {:.2%} Bnu Adults/{:.2%} Pronto, {:.2%} Bnu)".format(n_user_adult_ij_ddi, n_user_adult_ij_ddi/n_user_adult, n_user_adult_ij_ddi/city_pop_adults, n_user_adult_ij_ddi/n_user, n_user_adult_ij_ddi/city_pop)
print "Adult patients (20+) with 1+ MAJOR DDI: {:,d} ({:.2%} Pronto Adults, {:.2%} Bnu Adults/{:.2%} Pronto, {:.2%} Bnu)".format(n_user_adult_ij_ddi_major, n_user_adult_ij_ddi_major/n_user_adult, n_user_adult_ij_ddi_major/city_pop_adults, n_user_adult_ij_ddi_major/n_user, n_user_adult_ij_ddi_major/city_pop)
print "Elderly patients (40+) with 1+ DDI: {:,d} ({:.2%} of patients with DDI, {:.2%} of 40+ patients)".format(n_user_ij_ddi_40p, n_user_ij_ddi_40p/n_user_ij_ddi, n_user_ij_ddi_40p/n_user_40p)
print "Elderly patients (65+) with 1+ MAJOR DDI: {:,d} ({:.2%} of 65+ patients)".format(n_user_ij_ddi_major_65p, n_user_ij_ddi_major_65p/n_user_65p)
#
# Education Stats (everyone)
#
print 'Education (everyone)'
dfEdu = dfu['education'].value_counts().to_frame()
dfEdu.sort_index(inplace=True)
dfEdu['prob1'] = dfEdu['education'] / dfEdu['education'].sum()
dfEdu['cumsum1'] = dfEdu['prob1'].cumsum()
#dfEdu = dfEdu.iloc[:-1,:]
dfEdu['prob2'] = dfEdu['education'] / dfEdu.iloc[:-1,0].sum()
dfEdu['cumsum2'] = dfEdu['prob2'].cumsum()
print dfEdu.to_latex(escape=False)
print dfEdu.sum()
#
# Education stats (above 25 y-old)
#
print 'Education (>25 yld)'
dfEdu = dfu.loc[ dfu['age']>=25 , 'education'].value_counts().to_frame()
dfEdu.sort_index(inplace=True)
dfEdu['prob1'] = dfEdu['education'] / dfEdu['education'].sum()
dfEdu['cumsum1'] = dfEdu['prob1'].cumsum()
#dfEdu = dfEdu.iloc[:-1,:]
dfEdu['prob2'] = dfEdu['education'] / dfEdu.iloc[:-1,0].sum()
dfEdu['cumsum2'] = dfEdu['prob2'].cumsum()
print dfEdu.to_latex(escape=False)
print dfEdu.sum()
#
# Age (Just Patient age distribution, nothing more)
#
print 'Age (distribution)'
dfAge = dfu['age_group'].value_counts().to_frame().sort_index()
dfAge['prob'] = dfAge['age_group'] / dfAge['age_group'].sum()
dfAge['cumsum'] = dfAge['prob'].cumsum()
print dfAge
print dfAge.sum()
#
# DDI per Severity
#
print '--- DDI per Severity ---'
dfi_s = dfi.groupby('severity').agg({'inter':'count','id_user': pd.Series.nunique})
dfi_s.rename(columns={'inter':'n_ij_ddi','id_user':'users'}, inplace=True) # RENAME id_usuario to users
dfi_s['i_per'] = dfi_s['n_ij_ddi'] / dfi_s['n_ij_ddi'].sum() * 100
dfi_s['u_per-pronto'] = dfi_s['users'] / n_user * 100
dfi_s['u_per-pop'] = dfi_s['users'] / city_pop * 100
#dfi_s = dfi_s.rename(index={'NONE':'None'})
columns = ['n_ij_ddi','i_per','users','u_per-pronto','u_per-pop']
print dfi_s.to_latex(columns=columns)
print dfi_s.sum(axis=0)
## Print summing None and *
dfi_s_ = dfi_s
dfi_s_['severity_s'] = pd.Categorical(['Major','Moderate','Minor','None','None'], ordered=True)
dfi_s_ = dfi_s_.groupby('severity_s').agg(sum)
print dfi_s_.to_latex(columns=columns)
## Print only for adult population
dfiu_s = dfiu.loc[ (dfiu['age']>=20), : ].groupby('severity').agg({'id_user':pd.Series.nunique})
dfiu_s.rename(columns={'id_user':'users'}, inplace=True) # RENAME id_usuario to users
dfiu_s['u_per-pronto-adult'] = dfiu_s['users'] / n_user_adult * 100
print dfiu_s.to_latex()
#print dfiu_s.head()
dfiu_s_ = dfiu_s
dfiu_s_['severity_s'] = pd.Categorical(['Major','Moderate','Minor','None','None'], ordered=True)
dfiu_s_ = dfiu_s_.groupby('severity_s').agg(sum)
print dfiu_s_.to_latex()
## Print summing Major-Moderate and Moderate-Minor
dfi_ = dfi[['severity','inter','id_user']].copy()
dfi_['severity'] = dfi_['severity'].cat.add_categories(['MajorModerate','ModerateMinor'])
dfi_majmod = dfi_.copy()
dfi_modmin = dfi_.copy()
dfi_majmod.loc[ (dfi_majmod['severity'].isin(['Major','Moderate'])) , 'severity'] = 'MajorModerate'
dfi_modmin.loc[ (dfi_modmin['severity'].isin(['Moderate','Minor'])) , 'severity'] = 'ModerateMinor'
dfi_majmod_s = dfi_majmod.groupby('severity').agg({'inter':'count','id_user':pd.Series.nunique})
dfi_modmin_s = dfi_modmin.groupby('severity').agg({'inter':'count','id_user':pd.Series.nunique})
dfi_majmod_s.rename(columns={'inter':'n_ij_ddi','id_user':'users'}, inplace=True) # RENAME id_usuario to users
dfi_modmin_s.rename(columns={'inter':'n_ij_ddi','id_user':'users'}, inplace=True) # RENAME id_usuario to users
dfi_majmod_s['i_per'] = dfi_majmod_s['n_ij_ddi'] / dfi_majmod_s['n_ij_ddi'].sum()
dfi_majmod_s['u_per-pronto'] = dfi_majmod_s['users'] / n_user * 100
dfi_majmod_s['u_per-pop'] = dfi_majmod_s['users'] / city_pop * 100
dfi_modmin_s['i_per'] = dfi_modmin_s['n_ij_ddi'] / dfi_modmin_s['n_ij_ddi'].sum()
dfi_modmin_s['u_per-pronto'] = dfi_modmin_s['users'] / n_user * 100
dfi_modmin_s['u_per-pop'] = dfi_modmin_s['users'] / city_pop * 100
print dfi_majmod_s.to_latex(columns=columns)
print dfi_modmin_s.to_latex(columns=columns)
## Print summing Major-Moderate and Moderate-Minor only for ADULTs
dfi_ = dfiu.loc[ (dfiu['age']>=20) , ['severity','id_user']].copy()
dfi_['severity'] = dfi_['severity'].cat.add_categories(['MajorModerate','ModerateMinor'])
dfi_majmod = dfi_.copy()
dfi_modmin = dfi_.copy()
dfi_majmod.loc[ (dfi_majmod['severity'].isin(['Major','Moderate'])) , 'severity'] = 'MajorModerate'
dfi_modmin.loc[ (dfi_modmin['severity'].isin(['Moderate','Minor'])) , 'severity'] = 'ModerateMinor'
dfi_majmod_s = dfi_majmod.groupby('severity').agg({'id_user':pd.Series.nunique})
dfi_modmin_s = dfi_modmin.groupby('severity').agg({'id_user':pd.Series.nunique})
dfi_majmod_s.rename(columns={'id_user':'users'}, inplace=True)
dfi_modmin_s.rename(columns={'id_user':'users'}, inplace=True)
dfi_majmod_s['u_per-pronto-adult'] = dfi_majmod_s['users'] / n_user_adult * 100
dfi_modmin_s['u_per-pronto-adult'] = dfi_modmin_s['users'] / n_user_adult * 100
print dfi_majmod_s.to_latex()
print dfi_modmin_s.to_latex()
#
# DDI per Gender
#
print '--- DDI per Gender ---'
dfi_g = dfiu.groupby('gender').agg({'inter':'count','id_user': pd.Series.nunique})
dfi_g.rename(columns={'inter':'n_ij_ddi','id_user':'users'}, inplace=True) # RENAME id_usuario to users
dfi_g['i_per'] = dfi_g['n_ij_ddi'] / dfi_g['n_ij_ddi'].sum()
dfi_g['u_per-pronto'] = dfi_g['users'] / n_user * 100
dfi_g['u_per-pop'] = dfi_g['users'] / city_pop * 100
columns = ['n_ij_ddi','i_per','users','u_per-pronto','u_per-pop']
print dfi_g.to_latex(columns=columns)
print dfi_g.sum(axis=0)
#
# DDI per Age
#
print '--- DDI per Age ---'
dfu_y = dfu.loc[ (dfu['n_ij_ddi']>0) , : ].reset_index().groupby('age_group').agg({'n_ij_ddi':'sum','id_user':pd.Series.nunique})
dfu_y.rename(columns={'id_user':'n_u_ddi'}, inplace=True) # RENAME id_usuario to users
dfu_y['i_per'] = dfu_y['n_ij_ddi'] / dfu_y['n_ij_ddi'].sum()
dfu_y['u_per-pronto'] = dfu_y['n_u_ddi'] / n_user * 100
dfu_y['u_per-city'] = dfu_y['n_u_ddi'] / city_pop * 100
print dfu_y[['n_ij_ddi','i_per','n_u_ddi','u_per-pronto','u_per-city']].to_latex(escape=False)
print dfu_y.sum(axis=0)
# Print summing None and *
dfu_y = dfu_y.rename(index={'*':'NONE'}).rename(index={'NONE':'n/a'})
print dfu_y.groupby(dfu_y.index).agg(sum)[['n_ij_ddi','i_per','n_u_ddi','u_per-pronto','u_per-city']].to_latex(escape=False)
#
# RRC/RRI per Gender
#
pd.set_option('display.precision', 4)
pd.set_option('display.float_format', lambda x: '%.4f' % x)
print '--- RRC/RRI per Gender ---'
dfR_g = pd.concat([
dfu.reset_index().groupby('gender').agg({'id_user':pd.Series.nunique}).rename(columns={'id_user':'u'}),
dfu.loc[ (dfu['n_i']>=2) , :].reset_index().groupby('gender', sort=False).agg({'id_user':pd.Series.nunique}).rename(columns={'id_user':'u^{n2}'}),
dfu.loc[ (dfu['n_ij']>0) , :].reset_index().groupby('gender', sort=False).agg({'id_user':pd.Series.nunique}).rename(columns={'id_user':'u^{c}'}),
dfu.loc[ (dfu['n_ij_ddi']>0) , :].reset_index().groupby('gender', sort=False).agg({'id_user':pd.Series.nunique}).rename(columns={'id_user':'u^{i}'})
], axis=1)
dfR_g.to_csv('csv/gender.csv', encoding='utf-8')
dfR_g['RRC^{F}'] = (dfR_g['u^{c}'] / dfR_g['u']) / (dfR_g.loc['Male','u^{c}'] / dfR_g.loc['Male','u'])
dfR_g['RRI^{F}'] = (dfR_g['u^{i}'] / dfR_g['u']) / (dfR_g.loc['Male','u^{i}'] / dfR_g.loc['Male','u'])
print dfR_g.to_latex(escape=False)
#
# RRC/RRI per Severity and Gender
#
print '--- RRC/RRI per Severity & Gender ---'
dfR_gs = dfiu.groupby(['gender','severity']).agg({'id_user':pd.Series.nunique}).rename(columns={'id_user':'u'})
dfR_gs = dfR_gs.unstack(level=0)
dfR_gs.columns = ['%s^{i,%s}_{s}' % (i,j[0]) for i,j in dfR_gs.columns.values]
dfR_gs['RRI^{F}_{s}'] = (dfR_gs['u^{i,F}_{s}'] / n_user_female) / ( dfR_gs['u^{i,M}_{s}'] / n_user_male)
print dfR_gs.to_latex(escape=False)
#
# RRC/RRI per Age
#
print '--- RRC/RRI per Age ---'
dfR_y = pd.concat([
dfu.reset_index().groupby('age_group').agg({'id_user':pd.Series.nunique}).rename(columns={'id_user':'u'}),
dfu.loc[ (dfu['n_i']>=2) , :].reset_index().groupby('age_group', sort=False).agg({'id_user':pd.Series.nunique}).rename(columns={'id_user':'u^{n2}'}),
dfu.loc[ (dfu['n_ij']>0) , :].reset_index().groupby('age_group', sort=False).agg({'id_user':pd.Series.nunique}).rename(columns={'id_user':'u^{c}'}),
dfu.loc[ (dfu['n_ij_ddi']>0) , :].reset_index().groupby('age_group', sort=False).agg({'id_user':pd.Series.nunique}).rename(columns={'id_user':'u^{i}'})
], axis=1)
# Make Short Table (Concatenating edges values)
df00_89 = dfR_y.iloc[ 0:18, 0:4 ]
df00_89.index = df00_89.index.add_categories(['90+'])
df90_pl = dfR_y.iloc[ 18: , 0:4 ].sum(axis=0).to_frame(name='90+').T
dfRs_y = pd.concat([df00_89, df90_pl], axis=0)
dfR_y.to_csv('csv/age.csv', encoding='utf-8')
dfRs_y['RC^{y}'] = dfRs_y['u^{c}'] / dfRs_y['u^{n2}']
dfRs_y['RI^{y}'] = dfRs_y['u^{i}'] / dfRs_y['u^{c}']
print dfRs_y.to_latex(escape=False)
#dfRs_y.to_csv('csv/age_short.csv', encoding='utf-8')
#
# RRC/RRI per Age and Gender
#
print '--- RRC/RRI per Age and Gender ---'
dfR_gy_u = dfu.reset_index().groupby(['gender','age_group'], sort=False).agg({'id_user':pd.Series.nunique}).astype(np.int64)
dfR_gy_u.rename(columns={'id_user':'u'}, inplace=True)
dfR_gy_n = dfu.loc[ (dfu['n_i']>=2) , :].reset_index().groupby(['gender','age_group'], sort=False).agg({'id_user':pd.Series.nunique}).astype(np.int64)
dfR_gy_n.rename(columns={'id_user':'u^{n2}'}, inplace=True)
dfR_gy_c = dfu.loc[ (dfu['n_ij']>0) , :].reset_index().groupby(['gender','age_group'], sort=False).agg({'id_user':pd.Series.nunique}).astype(np.int64)
dfR_gy_c.rename(columns={'id_user':'u^{c}'}, inplace=True)
dfR_gy_i = dfu.loc[ (dfu['n_ij_ddi']>=1) , :].reset_index().groupby(['gender','age_group'], sort=False).agg({'id_user':pd.Series.nunique}).astype(np.int64)
dfR_gy_i.rename(columns={'id_user':'u^{i}'}, inplace=True)
for (gender,dftmp_u), (_,dftmp_n), (_, dftmp_c), (_,dftmp_i) in zip(dfR_gy_u.groupby(level=0), dfR_gy_n.groupby(level=0), dfR_gy_c.groupby(level=0), dfR_gy_i.groupby(level=0)):
print gender
dfR_gy = pd.concat([dftmp_u,dftmp_n,dftmp_c,dftmp_i], axis=1)
dfR_gy.index = dfR_gy.index.droplevel(level=0)
# Make Short Table (Concatenating edges values)
df00_89 = dfR_gy.iloc[ 0:18, 0:4 ]
df90_pl = dfR_gy.iloc[ 18: , 0:4 ].sum(axis=0).to_frame(name='90+').T
dfRs_gy = pd.concat([df00_89, df90_pl], axis=0)
dfR_gy.to_csv('csv/age_%s.csv' % (gender.lower()), encoding='utf-8')
dfRs_gy['RC^{y}'] = dfRs_gy['u^{c}'] / dfRs_gy['u^{n2}']
dfRs_gy['RI^{y}'] = dfRs_gy['u^{i}'] / dfRs_gy['u^{c}']
print dfRs_gy.to_latex(escape=False)
# Statistical Test Males and Females distribution per age are different
ui_gy_m = dfR_gy_i.loc[ slice['Male',:] , 'u^{i}'].values
ui_gy_f = dfR_gy_i.loc[ slice['Female',:] , 'u^{i}'].values
tstat, pvalue = stats.chisquare(ui_gy_f, f_exp=ui_gy_m)
print 'Chi Square the two samples are independent'
print 't-stat: {:.4f}, p-value: {:.4f}'.format(tstat, pvalue)
KS, pvalue = stats.ks_2samp(ui_gy_m, ui_gy_f)
print 'Kolmogorov-Sminov statistic two samples came from the same continuos distribution'
print 't-stat: {:.4f}, p-value: {:.4f}'.format(KS, pvalue)
#
# RRC/RRI per Number of Unique Drugs
#
dfR = pd.concat([
dfu.reset_index().groupby('n_i', sort=False).agg({'id_user':pd.Series.nunique}).rename(columns={'id_user':'u'}),
dfu.loc[ (dfu['n_i']>=2) , :].reset_index().groupby('n_i', sort=False).agg({'id_user':pd.Series.nunique}).rename(columns={'id_user':'u^{n2}'}),
dfu.loc[ (dfu['n_ij']>0) , :].reset_index().groupby('n_i', sort=False).agg({'id_user':pd.Series.nunique}).rename(columns={'id_user':'u^{c}'}),
dfu.loc[ (dfu['n_ij_ddi']>=1) , :].reset_index().groupby('n_i', sort=False).agg({'id_user':pd.Series.nunique}).rename(columns={'id_user':'u^{i}'})
], axis=1).fillna(0).astype(np.int64)
# Make | |
= descr[:-2] + ")"
return descr
def variance_bounds(self, resids: Float64Array, power: float = 2.0) -> Float64Array:
return super().variance_bounds(resids, self.power)
def _generate_name(self) -> str:
q, power = self.q, self.power
if power == 2.0:
if q == 0:
return "FIARCH"
else:
return "FIGARCH"
elif power == 1.0:
if q == 0:
return "FIAVARCH"
else:
return "FIAVGARCH"
else:
if q == 0:
return "Power FIARCH (power: {0:0.1f})".format(self.power)
else:
return "Power FIGARCH (power: {0:0.1f})".format(self.power)
def bounds(self, resids: Float64Array) -> List[Tuple[float, float]]:
eps_half = np.sqrt(np.finfo(np.float64).eps)
v = np.mean(abs(resids) ** self.power)
bounds = [(0.0, 10.0 * float(v))]
bounds.extend([(0.0, 0.5)] * self.p) # phi
bounds.extend([(0.0, 1.0 - eps_half)]) # d
bounds.extend([(0.0, 1.0 - eps_half)] * self.q) # beta
return bounds
def constraints(self) -> Tuple[Float64Array, Float64Array]:
# omega > 0 <- 1
# 0 <= d <= 1 <- 2
# 0 <= phi <= (1 - d) / 2 <- 2
# 0 <= beta <= d + phi <- 2
a = np.array(
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, -2, -1, 0],
[0, 0, 1, 0],
[0, 0, -1, 0],
[0, 0, 0, 1],
[0, 1, 1, -1],
]
)
b = np.array([0, 0, -1, 0, -1, 0, 0])
if not self.q:
a = a[:-2, :-1]
b = b[:-2]
if not self.p:
# Drop column 1 and rows 1 and 2
a = np.delete(a, (1,), axis=1)
a = np.delete(a, (1, 2), axis=0)
b = np.delete(b, (1, 2))
return a, b
def compute_variance(
self,
parameters: Float64Array,
resids: Float64Array,
sigma2: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
) -> Float64Array:
# fresids is abs(resids) ** power
power = self.power
fresids = np.abs(resids) ** power
p, q, truncation = self.p, self.q, self.truncation
nobs = resids.shape[0]
rec.figarch_recursion(
parameters, fresids, sigma2, p, q, nobs, truncation, backcast, var_bounds
)
inv_power = 2.0 / power
sigma2 **= inv_power
return sigma2
def backcast_transform(
self, backcast: Union[float, Float64Array]
) -> Union[float, Float64Array]:
backcast = super().backcast_transform(backcast)
return np.sqrt(backcast) ** self.power
def backcast(self, resids: Float64Array) -> Union[float, Float64Array]:
power = self.power
tau = min(75, resids.shape[0])
w = 0.94 ** np.arange(tau)
w = w / sum(w)
backcast = float(np.sum((abs(resids[:tau]) ** power) * w))
return backcast
def simulate(
self,
parameters: Union[Sequence[Union[int, float]], ArrayLike1D],
nobs: int,
rng: RNGType,
burn: int = 500,
initial_value: Union[None, float, Float64Array] = None,
) -> Tuple[Float64Array, Float64Array]:
parameters = ensure1d(parameters, "parameters", False)
truncation = self.truncation
p, q, power = self.p, self.q, self.power
lam = rec.figarch_weights(parameters[1:], p, q, truncation)
lam_rev = lam[::-1]
errors = rng(truncation + nobs + burn)
if initial_value is None:
persistence = np.sum(lam)
beta = parameters[-1] if q else 0.0
initial_value = parameters[0]
if beta < 1:
initial_value /= 1 - beta
if persistence < 1:
initial_value /= 1 - persistence
if persistence >= 1.0 or beta >= 1.0:
warn(initial_value_warning, InitialValueWarning)
assert initial_value is not None
sigma2 = np.empty(truncation + nobs + burn)
data = np.empty(truncation + nobs + burn)
fsigma = np.empty(truncation + nobs + burn)
fdata = np.empty(truncation + nobs + burn)
fsigma[:truncation] = initial_value
sigma2[:truncation] = initial_value ** (2.0 / power)
data[:truncation] = np.sqrt(sigma2[:truncation]) * errors[:truncation]
fdata[:truncation] = abs(data[:truncation]) ** power
omega = parameters[0]
beta = parameters[-1] if q else 0
omega_tilde = omega / (1 - beta)
for t in range(truncation, truncation + nobs + burn):
fsigma[t] = omega_tilde + lam_rev.dot(fdata[t - truncation : t])
sigma2[t] = fsigma[t] ** (2.0 / power)
data[t] = errors[t] * np.sqrt(sigma2[t])
fdata[t] = abs(data[t]) ** power
return data[truncation + burn :], sigma2[truncation + burn :]
def starting_values(self, resids: Float64Array) -> Float64Array:
truncation = self.truncation
ds = [0.2, 0.5, 0.7]
phi_ratio = [0.2, 0.5, 0.8] if self.p else [0]
beta_ratio = [0.1, 0.5, 0.9] if self.q else [0]
power = self.power
target = np.mean(abs(resids) ** power)
scale = np.mean(resids ** 2) / (target ** (2.0 / power))
target *= scale ** (power / 2)
all_starting_vals = []
for d in ds:
for pr in phi_ratio:
phi = (1 - d) / 2 * pr
for br in beta_ratio:
beta = (d + phi) * br
temp = [phi, d, beta]
lam = rec.figarch_weights(np.array(temp), 1, 1, truncation)
omega = (1 - beta) * target * (1 - np.sum(lam))
all_starting_vals.append((omega, phi, d, beta))
distinct_svs = set(all_starting_vals)
starting_vals = np.array(list(distinct_svs))
if not self.q:
starting_vals = starting_vals[:, :-1]
if not self.p:
starting_vals = np.c_[starting_vals[:, [0]], starting_vals[:, 2:]]
var_bounds = self.variance_bounds(resids)
backcast = self.backcast(resids)
llfs = np.zeros(len(starting_vals))
for i, sv in enumerate(starting_vals):
llfs[i] = self._gaussian_loglikelihood(sv, resids, backcast, var_bounds)
loc = np.argmax(llfs)
return starting_vals[int(loc)]
def parameter_names(self) -> List[str]:
names = ["omega"]
if self.p:
names += ["phi"]
names += ["d"]
if self.q:
names += ["beta"]
return names
def _check_forecasting_method(
self, method: ForecastingMethod, horizon: int
) -> None:
if horizon == 1:
return
if method == "analytic" and self.power != 2.0:
raise ValueError(
"Analytic forecasts not available for horizon > 1 when power != 2"
)
return
def _analytic_forecast(
self,
parameters: Float64Array,
resids: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
start: int,
horizon: int,
) -> VarianceForecast:
sigma2, forecasts = self._one_step_forecast(
parameters, resids, backcast, var_bounds, horizon, start
)
if horizon == 1:
return VarianceForecast(forecasts)
truncation = self.truncation
p, q = self.p, self.q
lam = rec.figarch_weights(parameters[1:], p, q, truncation)
lam_rev = lam[::-1]
t = resids.shape[0]
omega = parameters[0]
beta = parameters[-1] if q else 0.0
omega_tilde = omega / (1 - beta)
temp_forecasts = np.empty(truncation + horizon)
resids2 = resids ** 2
for i in range(start, t):
available = i + 1 - max(0, i - truncation + 1)
temp_forecasts[truncation - available : truncation] = resids2[
max(0, i - truncation + 1) : i + 1
]
if available < truncation:
temp_forecasts[: truncation - available] = backcast
for h in range(horizon):
lagged_forecasts = temp_forecasts[h : truncation + h]
temp_forecasts[truncation + h] = omega_tilde + lam_rev.dot(
lagged_forecasts
)
forecasts[i, :] = temp_forecasts[truncation:]
forecasts[:start] = np.nan
return VarianceForecast(forecasts)
def _simulation_forecast(
self,
parameters: Float64Array,
resids: Float64Array,
backcast: Union[float, Float64Array],
var_bounds: Float64Array,
start: int,
horizon: int,
simulations: int,
rng: RNGType,
) -> VarianceForecast:
sigma2, forecasts = self._one_step_forecast(
parameters, resids, backcast, var_bounds, horizon, start
)
t = resids.shape[0]
paths = np.empty((t - start, simulations, horizon))
shocks = np.empty((t - start, simulations, horizon))
power = self.power
truncation = self.truncation
p, q = self.p, self.q
lam = rec.figarch_weights(parameters[1:], p, q, truncation)
lam_rev = lam[::-1]
t = resids.shape[0]
omega = parameters[0]
beta = parameters[-1] if q else 0.0
omega_tilde = omega / (1 - beta)
fpath = np.empty((simulations, truncation + horizon))
fresids = np.abs(resids) ** power
for i in range(start, t):
std_shocks = rng((simulations, horizon))
available = i + 1 - max(0, i - truncation + 1)
fpath[:, truncation - available : truncation] = fresids[
max(0, i + 1 - truncation) : i + 1
]
if available < truncation:
fpath[:, : (truncation - available)] = backcast
for h in range(horizon):
# 1. Forecast transformed variance
lagged_forecasts = fpath[:, h : truncation + h]
temp = omega_tilde + lagged_forecasts.dot(lam_rev)
# 2. Transform variance
sigma2 = temp ** (2.0 / power)
# 3. Simulate new residual
path_loc = i - start
shocks[path_loc, :, h] = std_shocks[:, h] * np.sqrt(sigma2)
paths[path_loc, :, h] = sigma2
forecasts[path_loc, h] = sigma2.mean()
# 4. Transform new residual
fpath[:, truncation + h] = np.abs(shocks[path_loc, :, h]) ** power
return VarianceForecast(forecasts, paths, shocks)
class APARCH(VolatilityProcess, metaclass=AbstractDocStringInheritor):
r"""
Asymmetric Power ARCH (APARCH) volatility process
Parameters
----------
p : int
Order of the symmetric innovation. Must satisfy p>=o.
o : int
Order of the asymmetric innovation. Must satisfy o<=p.
q : int
Order of the lagged (transformed) conditional variance
delta : float, optional
Value to use for a fixed delta in the APARCH model. If
not provided, the value of delta is jointly estimated
with other model parameters. User provided delta is restricted
to lie in (0.05, 4.0).
common_asym : bool, optional
Restrict all asymmetry terms to share the same asymmetry
parameter. If False (default), then there are no | |
<filename>tests/unit/core/test_environment.py
# Copyright 2014 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import os
import subprocess
import unittest
import mock
from striker.common import utils
from striker.core import environment
import tests
class ExecResultTest(unittest.TestCase):
def test_init_success(self):
cmd = ['arg1', 'arg2 space', 'arg3"double', "arg4'single", 'arg5']
cmd_text = 'arg1 "arg2 space" "arg3\\"double" "arg4\'single" arg5'
result = environment.ExecResult(cmd, None, None, 0)
self.assertEqual(result.cmd, cmd)
self.assertEqual(result.cmd_text, cmd_text)
self.assertEqual(result.stdout, None)
self.assertEqual(result.stderr, None)
self.assertEqual(result.return_code, 0)
self.assertEqual(str(result), "'%s' succeeded" % cmd_text)
def test_init_stdout(self):
cmd = ['arg1', 'arg2 space', 'arg3"double', "arg4'single", 'arg5']
cmd_text = 'arg1 "arg2 space" "arg3\\"double" "arg4\'single" arg5'
result = environment.ExecResult(cmd, 'output', None, 0)
self.assertEqual(result.cmd, cmd)
self.assertEqual(result.cmd_text, cmd_text)
self.assertEqual(result.stdout, 'output')
self.assertEqual(result.stderr, None)
self.assertEqual(result.return_code, 0)
self.assertEqual(str(result), "'%s' said: output" % cmd_text)
def test_init_stderr(self):
cmd = ['arg1', 'arg2 space', 'arg3"double', "arg4'single", 'arg5']
cmd_text = 'arg1 "arg2 space" "arg3\\"double" "arg4\'single" arg5'
result = environment.ExecResult(cmd, 'output', 'error', 0)
self.assertEqual(result.cmd, cmd)
self.assertEqual(result.cmd_text, cmd_text)
self.assertEqual(result.stdout, 'output')
self.assertEqual(result.stderr, 'error')
self.assertEqual(result.return_code, 0)
self.assertEqual(str(result), "'%s' said: error" % cmd_text)
def test_init_failure(self):
cmd = ['arg1', 'arg2 space', 'arg3"double', "arg4'single", 'arg5']
cmd_text = 'arg1 "arg2 space" "arg3\\"double" "arg4\'single" arg5'
result = environment.ExecResult(cmd, 'output', 'error', 5)
self.assertEqual(result.cmd, cmd)
self.assertEqual(result.cmd_text, cmd_text)
self.assertEqual(result.stdout, 'output')
self.assertEqual(result.stderr, 'error')
self.assertEqual(result.return_code, 5)
self.assertEqual(str(result), "'%s' failed with return code 5" %
cmd_text)
def test_true(self):
result = environment.ExecResult(['cmd'], None, None, 0)
self.assertTrue(result)
def test_false(self):
result = environment.ExecResult(['cmd'], None, None, 1)
self.assertFalse(result)
class EnvironmentTest(unittest.TestCase):
@mock.patch.dict(os.environ, clear=True, TEST_VAR1='1', TEST_VAR2='2')
@mock.patch.object(os, 'getcwd', return_value='/some/path')
@mock.patch.object(environment.Environment, 'chdir')
def test_init_base(self, mock_chdir, mock_getcwd):
env = environment.Environment('logger')
self.assertEqual(env, {'TEST_VAR1': '1', 'TEST_VAR2': '2'})
self.assertEqual(env.logger, 'logger')
self.assertEqual(env.cwd, '/some/path')
self.assertEqual(env.venv_home, None)
self.assertFalse(mock_chdir.called)
@mock.patch.dict(os.environ, clear=True, TEST_VAR1='1', TEST_VAR2='2')
@mock.patch.object(os, 'getcwd', return_value='/some/path')
@mock.patch.object(environment.Environment, 'chdir')
def test_init_alt(self, mock_chdir, mock_getcwd):
environ = {
'TEST_VAR3': '3',
'TEST_VAR4': '4',
}
env = environment.Environment('logger', environ, '/other/path',
'/venv/home')
self.assertEqual(env, environ)
self.assertEqual(env.logger, 'logger')
self.assertEqual(env.cwd, '/some/path')
self.assertEqual(env.venv_home, '/venv/home')
mock_chdir.assert_called_once_with('/other/path')
@mock.patch.dict(os.environ, clear=True, TEST_VAR1='1', TEST_VAR2='2')
@mock.patch.object(os.path, 'join', tests.fake_join)
@mock.patch.object(os, 'pathsep', ':')
@mock.patch.object(os, 'getcwd', return_value='/some/path')
@mock.patch.object(environment.Environment, 'chdir')
@mock.patch.object(utils, 'canonicalize_path', return_value='/canon/path')
@mock.patch.object(utils, 'backoff', return_value=[0])
@mock.patch.object(subprocess, 'Popen', return_value=mock.Mock(**{
'returncode': 0,
'communicate.return_value': (None, None),
}))
def test_call_basic(self, mock_Popen, mock_backoff, mock_canonicalize_path,
mock_chdir, mock_getcwd):
logger = mock.Mock()
env = environment.Environment(logger)
result = env(['test', 'one', 'two'])
self.assertEqual(result.cmd, ['test', 'one', 'two'])
self.assertEqual(result.stdout, None)
self.assertEqual(result.stderr, None)
self.assertEqual(result.return_code, 0)
self.assertFalse(mock_canonicalize_path.called)
mock_backoff.assert_called_once_with(1)
mock_Popen.assert_called_once_with(
['test', 'one', 'two'], env=env, cwd='/some/path', close_fds=True)
logger.assert_has_calls([
mock.call.debug(
"Executing command: ['test', 'one', 'two'] (cwd /some/path)"),
])
self.assertEqual(len(logger.method_calls), 1)
@mock.patch.dict(os.environ, clear=True, TEST_VAR1='1', TEST_VAR2='2')
@mock.patch.object(os, 'getcwd', return_value='/some/path')
@mock.patch.object(environment.Environment, 'chdir')
@mock.patch.object(utils, 'canonicalize_path', return_value='/canon/path')
@mock.patch.object(utils, 'backoff', return_value=[0])
@mock.patch.object(subprocess, 'Popen', return_value=mock.Mock(**{
'returncode': 0,
'communicate.return_value': (None, None),
}))
def test_call_string(self, mock_Popen, mock_backoff,
mock_canonicalize_path, mock_chdir, mock_getcwd):
logger = mock.Mock()
env = environment.Environment(logger)
result = env("test one two")
self.assertEqual(result.cmd, ['test', 'one', 'two'])
self.assertEqual(result.stdout, None)
self.assertEqual(result.stderr, None)
self.assertEqual(result.return_code, 0)
self.assertFalse(mock_canonicalize_path.called)
mock_backoff.assert_called_once_with(1)
mock_Popen.assert_called_once_with(
['test', 'one', 'two'], env=env, cwd='/some/path', close_fds=True)
logger.assert_has_calls([
mock.call.debug(
"Notice: splitting command string 'test one two'"),
mock.call.debug(
"Executing command: ['test', 'one', 'two'] (cwd /some/path)"),
])
self.assertEqual(len(logger.method_calls), 2)
@mock.patch.dict(os.environ, clear=True, TEST_VAR1='1', TEST_VAR2='2')
@mock.patch.object(os, 'getcwd', return_value='/some/path')
@mock.patch.object(environment.Environment, 'chdir')
@mock.patch.object(utils, 'canonicalize_path', return_value='/canon/path')
@mock.patch.object(utils, 'backoff', return_value=[0])
@mock.patch.object(subprocess, 'Popen', return_value=mock.Mock(**{
'returncode': 0,
'communicate.return_value': (None, None),
}))
def test_call_cwd(self, mock_Popen, mock_backoff, mock_canonicalize_path,
mock_chdir, mock_getcwd):
logger = mock.Mock()
env = environment.Environment(logger)
result = env(['test', 'one', 'two'], cwd='/other/path')
self.assertEqual(result.cmd, ['test', 'one', 'two'])
self.assertEqual(result.stdout, None)
self.assertEqual(result.stderr, None)
self.assertEqual(result.return_code, 0)
mock_canonicalize_path.assert_called_once_with(
'/some/path', '/other/path')
mock_backoff.assert_called_once_with(1)
mock_Popen.assert_called_once_with(
['test', 'one', 'two'], env=env, cwd='/canon/path', close_fds=True)
logger.assert_has_calls([
mock.call.debug(
"Executing command: ['test', 'one', 'two'] (cwd /canon/path)"),
])
self.assertEqual(len(logger.method_calls), 1)
@mock.patch.dict(os.environ, clear=True, TEST_VAR1='1', TEST_VAR2='2')
@mock.patch.object(os, 'getcwd', return_value='/some/path')
@mock.patch.object(environment.Environment, 'chdir')
@mock.patch.object(utils, 'canonicalize_path', return_value='/canon/path')
@mock.patch.object(utils, 'backoff', return_value=[0])
@mock.patch.object(subprocess, 'Popen', return_value=mock.Mock(**{
'returncode': 0,
'communicate.return_value': ('output', 'error'),
}))
def test_call_capture(self, mock_Popen, mock_backoff,
mock_canonicalize_path, mock_chdir, mock_getcwd):
logger = mock.Mock()
env = environment.Environment(logger)
result = env(['test', 'one', 'two'], capture_output=True)
self.assertEqual(result.cmd, ['test', 'one', 'two'])
self.assertEqual(result.stdout, 'output')
self.assertEqual(result.stderr, 'error')
self.assertEqual(result.return_code, 0)
self.assertFalse(mock_canonicalize_path.called)
mock_backoff.assert_called_once_with(1)
mock_Popen.assert_called_once_with(
['test', 'one', 'two'], env=env, cwd='/some/path', close_fds=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
logger.assert_has_calls([
mock.call.debug(
"Executing command: ['test', 'one', 'two'] (cwd /some/path)"),
])
self.assertEqual(len(logger.method_calls), 1)
@mock.patch.dict(os.environ, clear=True, TEST_VAR1='1', TEST_VAR2='2')
@mock.patch.object(os, 'getcwd', return_value='/some/path')
@mock.patch.object(environment.Environment, 'chdir')
@mock.patch.object(utils, 'canonicalize_path', return_value='/canon/path')
@mock.patch.object(utils, 'backoff', return_value=[0])
@mock.patch.object(subprocess, 'Popen', return_value=mock.Mock(**{
'returncode': 1,
'communicate.return_value': (None, None),
}))
def test_call_failure_raise(self, mock_Popen, mock_backoff,
mock_canonicalize_path, mock_chdir,
mock_getcwd):
logger = mock.Mock()
env = environment.Environment(logger)
try:
result = env(['test', 'one', 'two'])
except environment.ExecResult as exc:
self.assertEqual(exc.cmd, ['test', 'one', 'two'])
self.assertEqual(exc.stdout, None)
self.assertEqual(exc.stderr, None)
self.assertEqual(exc.return_code, 1)
else:
self.fail("Expected ExecResult to be raised")
self.assertFalse(mock_canonicalize_path.called)
mock_backoff.assert_called_once_with(1)
mock_Popen.assert_called_once_with(
['test', 'one', 'two'], env=env, cwd='/some/path', close_fds=True)
logger.assert_has_calls([
mock.call.debug(
"Executing command: ['test', 'one', 'two'] (cwd /some/path)"),
])
self.assertEqual(len(logger.method_calls), 1)
@mock.patch.dict(os.environ, clear=True, TEST_VAR1='1', TEST_VAR2='2')
@mock.patch.object(os, 'getcwd', return_value='/some/path')
@mock.patch.object(environment.Environment, 'chdir')
@mock.patch.object(utils, 'canonicalize_path', return_value='/canon/path')
@mock.patch.object(utils, 'backoff', return_value=[0])
@mock.patch.object(subprocess, 'Popen', return_value=mock.Mock(**{
'returncode': 1,
'communicate.return_value': (None, None),
}))
def test_call_failure_noraise(self, mock_Popen, mock_backoff,
mock_canonicalize_path, mock_chdir,
mock_getcwd):
logger = mock.Mock()
env = environment.Environment(logger)
result = env(['test', 'one', 'two'], do_raise=False)
self.assertEqual(result.cmd, ['test', 'one', 'two'])
self.assertEqual(result.stdout, None)
self.assertEqual(result.stderr, None)
self.assertEqual(result.return_code, 1)
self.assertFalse(mock_canonicalize_path.called)
mock_backoff.assert_called_once_with(1)
mock_Popen.assert_called_once_with(
['test', 'one', 'two'], env=env, cwd='/some/path', close_fds=True)
logger.assert_has_calls([
mock.call.debug(
"Executing command: ['test', 'one', 'two'] (cwd /some/path)"),
])
self.assertEqual(len(logger.method_calls), 1)
@mock.patch.dict(os.environ, clear=True, TEST_VAR1='1', TEST_VAR2='2')
@mock.patch.object(os, 'getcwd', return_value='/some/path')
@mock.patch.object(environment.Environment, 'chdir')
@mock.patch.object(utils, 'canonicalize_path', return_value='/canon/path')
@mock.patch.object(utils, 'backoff', return_value=[0])
@mock.patch.object(subprocess, 'Popen', return_value=mock.Mock(**{
'returncode': 0,
'communicate.return_value': ('output', 'error'),
}))
def test_call_retry_success(self, mock_Popen, mock_backoff,
mock_canonicalize_path, mock_chdir,
mock_getcwd):
logger = mock.Mock()
env = environment.Environment(logger)
retry = mock.Mock(return_value=True)
result = env(['test', 'one', 'two'], retry=retry)
self.assertEqual(result.cmd, ['test', 'one', 'two'])
self.assertEqual(result.stdout, 'output')
self.assertEqual(result.stderr, 'error')
self.assertEqual(result.return_code, 0)
self.assertFalse(mock_canonicalize_path.called)
mock_backoff.assert_called_once_with(5)
mock_Popen.assert_called_once_with(
['test', 'one', 'two'], env=env, cwd='/some/path', close_fds=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
logger.assert_has_calls([
mock.call.debug(
"Executing command: ['test', 'one', 'two'] (cwd /some/path)"),
])
self.assertEqual(len(logger.method_calls), 1)
self.assertFalse(retry.called)
@mock.patch.dict(os.environ, clear=True, TEST_VAR1='1', TEST_VAR2='2')
@mock.patch.object(os, 'getcwd', return_value='/some/path')
@mock.patch.object(environment.Environment, 'chdir')
@mock.patch.object(utils, 'canonicalize_path', return_value='/canon/path')
@mock.patch.object(utils, 'backoff', return_value=[0])
@mock.patch.object(subprocess, 'Popen', return_value=mock.Mock(**{
'returncode': 0,
'communicate.return_value': (None, None),
}))
def test_call_retry_success_badretries(self, mock_Popen, mock_backoff,
mock_canonicalize_path, mock_chdir,
mock_getcwd):
logger = mock.Mock()
env = environment.Environment(logger)
retry = mock.Mock(return_value=True)
result = env(['test', 'one', 'two'], retry=retry, max_tries=-1)
self.assertEqual(result.cmd, ['test', 'one', 'two'])
self.assertEqual(result.stdout, None)
self.assertEqual(result.stderr, None)
self.assertEqual(result.return_code, 0)
self.assertFalse(mock_canonicalize_path.called)
mock_backoff.assert_called_once_with(1)
mock_Popen.assert_called_once_with(
['test', 'one', 'two'], env=env, cwd='/some/path', close_fds=True)
logger.assert_has_calls([
mock.call.debug(
"Executing command: ['test', 'one', 'two'] (cwd /some/path)"),
])
self.assertEqual(len(logger.method_calls), 1)
self.assertFalse(retry.called)
@mock.patch.dict(os.environ, clear=True, TEST_VAR1='1', TEST_VAR2='2')
@mock.patch.object(os, 'getcwd', return_value='/some/path')
@mock.patch.object(environment.Environment, 'chdir')
@mock.patch.object(utils, 'canonicalize_path', return_value='/canon/path')
@mock.patch.object(utils, 'backoff', return_value=[0, 1, 2, 3, 4, 5, 6])
@mock.patch.object(subprocess, 'Popen', return_value=mock.Mock(**{
'returncode': 0,
'communicate.return_value': ('output', 'error'),
}))
def test_call_retry_withtries(self, mock_Popen, mock_backoff,
mock_canonicalize_path, mock_chdir,
mock_getcwd):
logger = mock.Mock()
env = environment.Environment(logger)
retry = mock.Mock(return_value=True)
exec_results = [
mock.Mock(__nonzero__=mock.Mock(return_value=False),
__bool__=mock.Mock(return_value=False)),
mock.Mock(__nonzero__=mock.Mock(return_value=False),
__bool__=mock.Mock(return_value=False)),
mock.Mock(__nonzero__=mock.Mock(return_value=True),
__bool__=mock.Mock(return_value=True)),
]
with mock.patch.object(environment, 'ExecResult',
side_effect=exec_results) as mock_ExecResult:
result = env(['test', 'one', 'two'], retry=retry, max_tries=7)
self.assertEqual(result, exec_results[-1])
self.assertFalse(mock_canonicalize_path.called)
mock_backoff.assert_called_once_with(7)
mock_Popen.assert_has_calls([
mock.call(['test', 'one', 'two'], env=env, cwd='/some/path',
close_fds=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE),
mock.call(['test', 'one', 'two'], env=env, cwd='/some/path',
close_fds=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE),
mock.call(['test', 'one', 'two'], env=env, cwd='/some/path',
close_fds=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE),
])
self.assertEqual(mock_Popen.call_count, 3)
mock_ExecResult.assert_has_calls([
mock.call(['test', 'one', 'two'], 'output', 'error', 0),
mock.call(['test', 'one', 'two'], 'output', 'error', 0),
mock.call(['test', 'one', 'two'], 'output', 'error', 0),
])
self.assertEqual(mock_ExecResult.call_count, 3)
logger.assert_has_calls([
mock.call.debug(
"Executing command: ['test', 'one', 'two'] (cwd /some/path)"),
mock.call.warn('Failure caught; retrying command (try #2)'),
mock.call.warn('Failure caught; retrying command (try #3)'),
])
self.assertEqual(len(logger.method_calls), 3)
retry.assert_has_calls([mock.call(res) for res in exec_results[:-1]])
self.assertEqual(retry.call_count, len(exec_results) - 1)
@mock.patch.dict(os.environ, clear=True, TEST_VAR1='1', TEST_VAR2='2')
@mock.patch.object(os, 'getcwd', return_value='/some/path')
@mock.patch.object(environment.Environment, 'chdir')
@mock.patch.object(utils, 'canonicalize_path', return_value='/canon/path')
@mock.patch.object(utils, 'backoff', return_value=[0, 1])
@mock.patch.object(subprocess, 'Popen', return_value=mock.Mock(**{
'returncode': 0,
'communicate.return_value': ('output', 'error'),
}))
def test_call_retry_withtries_failure(self, mock_Popen, mock_backoff,
mock_canonicalize_path, mock_chdir,
mock_getcwd):
logger = mock.Mock()
env = environment.Environment(logger)
retry = mock.Mock(return_value=True)
exec_results = [
mock.Mock(__nonzero__=mock.Mock(return_value=False),
__bool__=mock.Mock(return_value=False)),
mock.Mock(__nonzero__=mock.Mock(return_value=False),
__bool__=mock.Mock(return_value=False)),
mock.Mock(__nonzero__=mock.Mock(return_value=True),
__bool__=mock.Mock(return_value=True)),
]
with mock.patch.object(environment, 'ExecResult',
side_effect=exec_results) as mock_ExecResult:
result = env(['test', 'one', 'two'], retry=retry, max_tries=2,
do_raise=False)
self.assertEqual(result, exec_results[-2])
self.assertFalse(mock_canonicalize_path.called)
mock_backoff.assert_called_once_with(2)
mock_Popen.assert_has_calls([
mock.call(['test', 'one', 'two'], env=env, cwd='/some/path',
close_fds=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE),
mock.call(['test', 'one', 'two'], env=env, cwd='/some/path',
close_fds=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE),
])
self.assertEqual(mock_Popen.call_count, 2)
mock_ExecResult.assert_has_calls([
mock.call(['test', 'one', 'two'], 'output', 'error', 0),
mock.call(['test', 'one', 'two'], 'output', 'error', 0),
])
self.assertEqual(mock_ExecResult.call_count, 2)
logger.assert_has_calls([
mock.call.debug(
"Executing command: ['test', 'one', 'two'] (cwd /some/path)"),
mock.call.warn('Failure caught; retrying command (try #2)'),
mock.call.warn('Unable to retry: too many attempts'),
])
self.assertEqual(len(logger.method_calls), 3)
retry.assert_has_calls([mock.call(res) for res in exec_results[:-2]])
self.assertEqual(retry.call_count, len(exec_results) - 1)
@mock.patch.dict(os.environ, clear=True, TEST_VAR1='1', TEST_VAR2='2')
@mock.patch.object(os, 'getcwd', return_value='/some/path')
@mock.patch.object(utils, 'canonicalize_path', return_value='/canon/path')
def test_chdir(self, mock_canonicalize_path, mock_getcwd):
with | |
<filename>data/utils.py
import requests
import re
import logging
import pytz
from time import sleep
from bs4 import BeautifulSoup
from django.conf import settings
from collections import deque
from dateutil.parser import parse
from datetime import datetime, timedelta
from .models import (Terminal, Route, Ferry, Sailing, Destination, Status,
SailingEvent, ArrivalTimeEvent, ArrivedEvent, StatusEvent,
FerryEvent, DepartureTimeEvent, DepartedEvent,
PercentFullEvent, CarWaitEvent, OversizeWaitEvent,
InPortEvent, UnderWayEvent, OfflineEvent, HeadingEvent,
DestinationEvent, StoppedEvent, CancelledEvent,
ParkingEvent, CarPercentFullEvent,
OversizePercentFullEvent, Amenity)
from collector.models import (ConditionsRun, DeparturesRun, LocationsRun,
SailingDetailRun, ConditionsRawHTML, DeparturesRawHTML,
LocationsRawHTML)
logger = logging.getLogger(__name__)
timezone = pytz.timezone("America/Vancouver")
def get_local_time(timestamp):
return timezone.localize(timestamp).strftime("%H:%M")
def get_local_day():
return datetime.now().astimezone(timezone).strftime("%Y-%m-%d")
def median_value(queryset, term):
count = queryset.count()
values = queryset.values_list(term, flat=True).order_by(term)
if count % 2 == 1:
return values[int(round(count/2))]
else:
return sum(values[count/2-1:count/2+1])/2.0
def get_actual_departures(input_file: str=None) -> bool:
""" Pull data from actualDepartures.asp page on the BC Ferries website.
This will query https://orca.bcferries.com/cc/marqui/actualDepartures.asp
and parse it. This page is an overview of all the routes BC Ferries
provides information for.
:param input_file: optional local file to read from
:type input_file: str
:returns: whether it succeeded or failed
:rtype: bool
"""
# Start a DeparturesRun
run = DeparturesRun()
# Build the URL to pull data from
url = "{}/{}".format(settings.BCF_BASE_URL, "actualDepartures.asp")
if input_file:
# If an input file is given, read from that instead
fp = open(input_file, 'r')
data = fp.read()
else:
try:
# Request page
logger.info("Querying BCF for data...")
response = requests.get(url)
if response.status_code == 200:
logger.debug("Successfully queried BCF for data")
data = response.text
else:
# Something went wrong - log that it went wrong and return
# TODO - fixup status
logger.error("Could not retrieve details from the BC Ferries website: {}".format(response.status_code))
run.status = "Could not retrieve details from the BC Ferries website (non-200 status code)"
run.successful = False
run.save()
return False
except:
# TODO - replace bare except
logger.error("Could not retrieve details from the BC Ferries website.")
run.status = "Could not retrieve details from the BC Ferries website (unknown reason)"
run.successful = False
run.save()
return False
# Data retrieved
run.set_status("Data retrieved from BCF")
raw_html = DeparturesRawHTML(
run=run,
data=data
)
raw_html.save()
# Load data into the BeautifulSoup parser
b = BeautifulSoup(data, 'html.parser')
# Parse out the routes
# Create a deque object containing the routes and sailings
routes = deque(
b.find('td', class_='content').find('table').find_all('table')
)
# Parse out the current date
date = b.find('span', class_='titleSmInv').text
routes_list = []
terminals = {}
# Iterate over the routes
while len(routes) != 0:
# Pop off the next route
route = routes.popleft()
# Parse out the route name and the sailing time
route_name, sailing_time = route.span.decode_contents().split('<br/>')
# Parse out the source, destination, the source code and route code
source, destination = re.search(r'(.*) to (.*)', route_name).groups()
source_code, route_code = re.search(r'#([A-Z]+)(\d+)', route.find_previous_sibling().attrs['name']).groups()
# Parse out the sailing time
sailing_time = re.search("Sailing time: (.*)", sailing_time).groups()[0]
# Some sailings (i.e. those to the Gulf Islands) are variable as BC Ferries doesn't provide live details for
# them. For these, we can't get a duration - so skip them.
if sailing_time != "Variable":
# Sailing time is fixed, so parse out the hours and minutes for the duration
times = re.search(r'(([0-9]+) hours?\s?)?(([0-9]+) minutes)?.*', sailing_time).groups()
# Convert minutes to an integer
if times[3]:
minutes = int(times[3])
else:
minutes = 0
# Convert hours to minutes
if times[1]:
minutes += int(times[1]) * 60
logger.debug("Minutes: {}".format(minutes))
# Add a mapping of terminal name to terminal source code
terminals[source] = source_code
# Pop off the sailings
sailings = routes.popleft()
# Create a dict with the details we've parsed out so far
# TODO - minutes could potentially be unassigned
route_details = {
"source_code": source_code,
"source": source,
"destination": destination,
"route_name": route_name,
"route_code": route_code,
"sailing_time": minutes
}
sailings_list = []
# Iterate over the sailings
for sailing in sailings.find_all('tr')[1:]:
# Parse out the ferry, the scheduled departure time, the actual departure time, the ETA (or arrival time)
# and the current status for this sailing
try:
ferry, scheduled, actual, eta_arrival, status = [td.text.strip() for td in sailing.find_all('td')]
except ValueError as e:
logger.error("Couldn't parse out sailing details: {}".format(e))
logger.error("Tried parsing: {}".format(sailing.find_all('td')))
run.info = sailing.find_all('td')
run.set_status("Failed during sailing parsing", False)
return False
# Append the parsed details as a dict to our sailings list
sailings_list.append({
"ferry": ferry,
"scheduled": scheduled,
"actual": actual,
"eta_or_arrival": eta_arrival,
"status": status
})
route_details.update({"sailings": sailings_list})
routes_list.append(route_details)
# At this point, we've successfully parsed out the data from the HTML
run.set_status("Data parsed")
# Iterate over each route
for route in routes_list:
logger.debug("--- Parsing new route ---")
# Get the source and destination name, and the source code
destination_name = route['destination']
source_name = route['source']
source_code = route['source_code']
# TODO - sailing_time is from above, probably shouldn't be here
logger.debug("Sailing time is '{}'".format(sailing_time))
# Get or create the Terminal object for the source
source_o, created = Terminal.objects.get_or_create(
name=source_name,
short_name=source_code
)
# Log if we found or created a new Terminal object
if created:
logger.info("Created terminal {} for {}".format(
source_code, source_name
))
else:
logger.debug("Found terminal {} for {}".format(
source_o.name, source_o.short_name
))
# See if the destination is a terminal, or just a description
if destination_name not in terminals:
logger.debug("{} not found in terminal list".format(destination_name))
# Create Destination object without an associated terminal
dest_o, created = Destination.objects.get_or_create(
name=destination_name
)
# Log if we found or created a new Destination object
if created:
logger.info("Created destination for {}".format(
destination_name
))
else:
logger.debug("Found destination for {}".format(
dest_o.name
))
else:
# Get or create the Terminal object for the destination
destination_o, created = Terminal.objects.get_or_create(
name=destination_name,
short_name=terminals[destination_name]
)
# Log if we found or created a new Terminal object for the destination
if created:
logger.info("Created terminal {} for {}".format(
destination_name, terminals[destination_name]
))
else:
logger.debug("Found terminal {} for {}".format(
destination_o.name, destination_o.short_name
))
# Create Destination object (different to the actual Terminal
# object for the destination)
dest_o, created = Destination.objects.get_or_create(
name=destination_name,
terminal=destination_o
)
# Log if we found or created a new Destination object
if created:
logger.info("Created destination for {} ({})".format(
destination_name, destination_o
))
else:
logger.debug("Found destination for {} ({})".format(
dest_o.name, dest_o.terminal
))
# Get the route code
route_code = route['route_code']
# Get or create a Route object for the route
route_o, created = Route.objects.get_or_create(
name=route['route_name'],
source=source_o,
destination=dest_o,
route_code=route_code
)
# Log if we found or created a new Route object
if created:
logger.info("Created route {} ({} -> {})".format(
route_o.route_code, route_o.source, route_o.destination
))
else:
logger.debug("Found route {} ({} -> {})".format(
route_o.route_code, route_o.source, route_o.destination
))
if not route_o.duration and route['sailing_time']:
# We didn't previously have a duration for this route, but we do now - so update the Route object with it
logger.debug("Setting sailing time to {}".format(route['sailing_time']))
route_o.duration = route['sailing_time']
route_o.save()
# Iterate over the sailings for this route
for sailing in route['sailings']:
logger.debug(">>>>>> Parsing new sailing")
ferry = sailing['ferry']
scheduled_departure = sailing['scheduled']
actual_departure = sailing['actual']
eta_or_arrival = sailing['eta_or_arrival']
status = sailing['status']
# Get or create a Ferry object for this sailing's ferry
ferry_o, created = Ferry.objects.get_or_create(
name=ferry
)
# Log if we found or created a new Ferry object
if created:
logger.info("Created ferry {}".format(ferry))
else:
logger.debug("Found ferry {}".format(ferry))
# Parse and convert the scheduled departure time
# TODO - this shouldn't reuse the same name - it's ugly
sched = parse("{} {}".format(date, scheduled_departure))
sched = timezone.localize(sched)
if actual_departure:
# This sailing has left, so parse and convert the acual departure time...
actual = parse("{} {}".format(date, actual_departure))
actual = timezone.localize(actual)
# ...and set the sailing to departed
departed = True
else:
# No actual departure time found
logger.debug("No actual departure time for this sailing")
actual = None
departed = False
# Usefully (not), the ETA/arrival time can be '...' (unknown). We need to handle this gracefully
if eta_or_arrival and eta_or_arrival != '...':
if 'ETA' in eta_or_arrival:
# We have an ETA, so parse it out
eta = re.search(r'ETA: (.*)', eta_or_arrival).groups()[0]
eta_or_arrival = parse("{} {}".format(date, eta))
eta_or_arrival = timezone.localize(eta_or_arrival)
logger.debug("ETA for this sailing is {}".format(eta_or_arrival))
# Since we have an ETA, this means this sailing hasn't arrived yet
arrived = False
else:
# No ETA but an arrival time means this sailing has arrived
eta_or_arrival = parse("{} {}".format(date, eta_or_arrival))
eta_or_arrival = | |
* coupling_dict[id2.replace("E","P") + id3.replace("E","P")]
analytic_cov[i * n_bins:(i + 1) * n_bins, j * n_bins:(j + 1) * n_bins] = bin_mat(M, binning_file, lmax)
analytic_cov = np.triu(analytic_cov) + np.tril(analytic_cov.T, -1)
mbb_inv_ab = extract_TTTEEE_mbb(mbb_inv_ab)
mbb_inv_cd = extract_TTTEEE_mbb(mbb_inv_cd)
analytic_cov = np.dot(np.dot(mbb_inv_ab, analytic_cov), mbb_inv_cd.T)
return analytic_cov
def extract_TTTEEE_mbb(mbb_inv):
"""The mode coupling marix is computed for T,E,B but for now we only construct analytical covariance matrix for T and E
The B modes is complex with important E->B leakage, this routine extract the T and E part of the mode coupling matrix
Parameters
----------
mbb_inv: 2d array
the inverse spin0 and 2 mode coupling matrix
"""
mbb_inv_array = so_mcm.coupling_dict_to_array(mbb_inv)
mbb_array = np.linalg.inv(mbb_inv_array)
nbins = int(mbb_array.shape[0] / 9)
mbb_array_select = np.zeros((4*nbins, 4*nbins))
# TT
mbb_array_select[0*nbins:1*nbins, 0*nbins:1*nbins] = mbb_array[0*nbins:1*nbins, 0*nbins:1*nbins]
# TE
mbb_array_select[1*nbins:2*nbins, 1*nbins:2*nbins] = mbb_array[1*nbins:2*nbins, 1*nbins:2*nbins]
# ET
mbb_array_select[2*nbins:3*nbins, 2*nbins:3*nbins] = mbb_array[3*nbins:4*nbins, 3*nbins:4*nbins]
# EE
mbb_array_select[3*nbins:4*nbins, 3*nbins:4*nbins] = mbb_array[5*nbins:6*nbins, 5*nbins:6*nbins]
mbb_inv_array = np.linalg.inv(mbb_array_select)
return mbb_inv_array
def extract_EEEBBB_mbb(mbb_inv):
"""this routine extract the E and B part of the mode coupling matrix
Parameters
----------
mbb_inv: 2d array
the inverse spin0 and 2 mode coupling matrix
"""
mbb_inv_array = so_mcm.coupling_dict_to_array(mbb_inv)
mbb_array = np.linalg.inv(mbb_inv_array)
nbins = int(mbb_array.shape[0] / 9)
mbb_array_select = np.zeros((4*nbins, 4*nbins))
# EE
mbb_array_select[0*nbins:1*nbins, 0*nbins:1*nbins] = mbb_array[5*nbins:6*nbins, 5*nbins:6*nbins]
# EB
mbb_array_select[1*nbins:2*nbins, 1*nbins:2*nbins] = mbb_array[6*nbins:7*nbins, 6*nbins:7*nbins]
# BE
mbb_array_select[2*nbins:3*nbins, 2*nbins:3*nbins] = mbb_array[7*nbins:8*nbins, 7*nbins:8*nbins]
# BB
mbb_array_select[3*nbins:4*nbins, 3*nbins:4*nbins] = mbb_array[8*nbins:9*nbins, 8*nbins:9*nbins]
# EE-BB
mbb_array_select[0*nbins:1*nbins, 3*nbins:4*nbins] = mbb_array[5*nbins:6*nbins, 8*nbins:9*nbins]
# BB-EE
mbb_array_select[3*nbins:4*nbins, 0*nbins:1*nbins] = mbb_array[8*nbins:9*nbins, 5*nbins:6*nbins]
mbb_inv_array = np.linalg.inv(mbb_array_select)
return mbb_inv_array
def cov2corr(cov, remove_diag=True):
"""Go from covariance to correlation matrix, also setting the diagonal to zero
Parameters
----------
cov: 2d array
the covariance matrix
"""
d = np.sqrt(cov.diagonal())
corr = ((cov.T/d).T)/d
if remove_diag == True:
corr -= np.identity(cov.shape[0])
return corr
def selectblock(cov, spectra, n_bins, block="TTTT"):
"""Select a block in a spin0 and 2 covariance matrix
Parameters
----------
cov: 2d array
the covariance matrix
spectra: list of strings
the arangement of the different block
n_bins: int
the number of bins for each block
block: string
the block you want to look at
"""
if spectra == None:
print ("cov mat of spin 0, no block selection needed")
return
else:
blockindex = {}
for c1,s1 in enumerate(spectra):
for c2,s2 in enumerate(spectra):
blockindex[s1 + s2] = [c1 * n_bins, c2 * n_bins]
id1 = blockindex[block][0]
id2 = blockindex[block][1]
cov_select = cov[id1:id1 + n_bins, id2:id2 + n_bins]
return cov_select
def delta2(a, b):
"""Simple delta function
"""
if a == b:
return 1
else:
return 0
def delta3(a, b, c):
"""Delta function (3 variables)
"""
if (a == b) & (b == c):
return 1
else:
return 0
def delta4(a, b, c, d):
"""Delta function (4 variables)
"""
if (a == b) & (b == c) & (c == d):
return 1
else:
return 0
def f(a, b, c, d, ns):
"""f combination factor in the covariance computation
"""
result = 1. * ns[a] * (ns[c] * ns[d] * delta2(a, b) - ns[c] * delta3(a, b, d) - ns[d] * delta3(a, b, c) + delta4(a, b, c, d))
result /= (ns[c] * ns[d] * (ns[a] - delta2(a, c)) * (ns[b] - delta2(b, d)))
return result
def g(a, b, c, d, ns):
"""g combination factor in the covariance computation
"""
result = 1. * ns[a] * (ns[c] * delta2(a,b) * delta2(c, d) - delta4(a, b, c, d))
result /= (ns[a] * ns[b] * (ns[c] - delta2(a, c)) * (ns[d] - delta2(b, d)))
return result
def chi(alpha, gamma, beta, eta, ns, Dl, DNl, id="TTTT"):
"""doc not ready yet
"""
exp_alpha, f_alpha = alpha.split("_")
exp_beta, f_beta = beta.split("_")
exp_gamma, f_gamma = gamma.split("_")
exp_eta, f_eta = eta.split("_")
RX = id[0] + id[1]
SY = id[2] + id[3]
chi = Dl[alpha, gamma, RX] * Dl[beta, eta, SY]
chi += Dl[alpha, gamma, RX] * DNl[beta, eta, SY] * f(exp_beta, exp_eta, exp_alpha, exp_gamma, ns)
chi += Dl[beta, eta, SY] * DNl[alpha, gamma, RX] * f(exp_alpha, exp_gamma, exp_beta, exp_eta, ns)
chi += g(exp_alpha, exp_gamma, exp_beta, exp_eta, ns) * DNl[alpha, gamma, RX] * DNl[beta, eta, SY]
chi= symmetrize(chi, mode="arithm")
return chi
def chi_planck(alpha, gamma, beta, eta, ns, Dl, DNl, id="TTTT"):
"""doc not ready yet
"""
exp_alpha, f_alpha = alpha.split("_")
exp_beta, f_beta = beta.split("_")
exp_gamma, f_gamma = gamma.split("_")
exp_eta, f_eta = eta.split("_")
RX = id[0] + id[1]
SY = id[2] + id[3]
if RX == "TE":
Dl[alpha, gamma, RX] = symmetrize(Dl[alpha, gamma, RX], mode="arithm")
DNl[alpha, gamma, RX] = symmetrize(DNl[alpha, gamma, RX], mode="arithm")
else:
Dl[alpha, gamma, RX] = symmetrize(Dl[alpha, gamma, RX], mode="geo")
DNl[alpha, gamma, RX] = symmetrize(DNl[alpha, gamma, RX], mode="geo")
if SY == "TE":
Dl[beta, eta, SY] = symmetrize(Dl[beta, eta, SY] , mode="arithm")
DNl[beta, eta, SY] = symmetrize(DNl[beta, eta, SY] , mode="arithm")
else:
Dl[beta, eta, SY] = symmetrize(Dl[beta, eta, SY] , mode="geo")
DNl[beta, eta, SY] = symmetrize(DNl[beta, eta, SY] , mode="geo")
chi = Dl[alpha, gamma, RX] * Dl[beta, eta, SY]
chi += Dl[alpha, gamma, RX] * DNl[beta, eta, SY] * f(exp_beta, exp_eta, exp_alpha, exp_gamma, ns)
chi += Dl[beta, eta, SY] * DNl[alpha, gamma, RX] * f(exp_alpha, exp_gamma, exp_beta, exp_eta, ns)
chi += g(exp_alpha, exp_gamma, exp_beta, exp_eta, ns) * DNl[alpha, gamma, RX] * DNl[beta, eta, SY]
chi= symmetrize(chi, mode="arithm")
return chi
def plot_cov_matrix(mat, color_range=None, color="pwhite", file_name=None):
"""plot the covariance matrix at full resolution using pixell plotting routines
Parameters
----------
mat: 2d array
the covariance matrix
color_range: float
the range of the plot
color: pixell colormap
the colormap for the plot (have to be pixell compatible)
file_name: string
file_name is the name of the png file that will be created, if None the plot
will be displayed.
"""
mat_plot = mat.copy()
try:
colorize.mpl_setdefault(color)
except KeyError:
raise KeyError("Color name must be a pixell color map name {}!".format(
list(colorize.schemes.keys())))
if color_range is None:
max_range = np.maximum(np.max(mat_plot),np.abs(np.min(mat_plot)))
color_range = "%s" % (max_range)
# We need to revert the order of the array to make the plot it similar to matplotlib imshow
mat_plot = mat_plot[::-1, ::-1]
wcs = enmap.create_wcs(mat_plot.shape, proj="car")
mat_plot = enmap.enmap(mat_plot,wcs)
plots = enplot.get_plots(mat_plot,
color=color,
range=color_range,
colorbar=1,
grid=0)
for plot in plots:
if file_name is not None:
enplot.write(file_name + ".png", plot)
else:
plot.img.show()
#def chi_old(alpha, gamma, beta, eta, ns, ls, Dl, DNl, id="TTTT"):
# """doc not ready yet
# """
# exp_alpha, f_alpha = alpha.split("_")
# exp_beta, f_beta = beta.split("_")
# exp_gamma, f_gamma = gamma.split("_")
# exp_eta, f_eta = eta.split("_")
# RX = id[0] + id[2]
# SY = id[1] + id[3]
# chi = Dl[alpha, gamma, RX] * Dl[beta, eta, SY]
# chi += Dl[alpha, gamma, RX] * DNl[beta, eta, SY] * f(exp_beta, exp_eta, exp_alpha, exp_gamma, ns)
# chi += Dl[beta, eta, SY] * DNl[alpha, gamma, RX] *f(exp_alpha, exp_gamma, exp_beta, exp_eta, ns)
# chi += g(exp_alpha, exp_gamma, exp_beta, exp_eta, ns) * DNl[alpha, gamma, RX] * DNl[beta, eta, SY]
# print ("RX",RX)
# print ("SY",SY)
# print ("ns",ns)
# print (r"f_{%s %s}^{%s %s}"%(exp_beta,exp_eta,exp_alpha,exp_gamma),f(exp_beta,exp_eta,exp_alpha,exp_gamma,ns))
# print (r"f_{%s %s}^{%s %s}"%(exp_alpha,exp_gamma,exp_beta,exp_eta),f(exp_alpha,exp_gamma,exp_beta,exp_eta,ns))
# print (r"g_{%s %s %s %s}"%(exp_alpha,exp_gamma,exp_beta,exp_eta),g(exp_alpha,exp_gamma,exp_beta,exp_eta,ns))
# chi= symmetrize(chi, mode="arithm")
# return chi
# def calc_cov_lensed(noise_uK_arcmin,
# fwhm_arcmin,
# lmin,
# lmax,
# camb_lensed_theory_file,
# camb_unlensed_theory_file,
# output_dir,
# overwrite=False):
# """ Wrapper around lenscov (https://github.com/JulienPeloton/lenscov). heavily borrowed
# from covariance.py compute lensing induced non-gaussian part of covariance matrix
# """
# try:
# import lib_covariances, lib_spectra, misc, util
# except:
# print(
# "[ERROR] failed to load lenscov modules. Make sure that lenscov is properly installed")
# print("[ERROR] Note: lenscov is not yet python3 compatible")
# print("[WARNING] calc_cov_lensed requires MPI to be abled")
# from pspy import so_mpi
# from mpi4py import MPI
# so_mpi.init(True)
# rank, size = so_mpi.rank, so_mpi.size
# ## The available blocks in the code
# blocks = ["TTTT", "EEEE", "BBBB", "EEBB", "TTEE", "TTBB", "TETE", "TTTE", "EETE", "TEBB"]
# ## Initialization of spectra
# cls_unlensed = lib_spectra.get_camb_cls(fname=os.path.abspath(camb_unlensed_theory_file),
# lmax=lmax)
# cls_lensed = lib_spectra.get_camb_cls(fname=os.path.abspath(camb_lensed_theory_file),
# lmax=lmax)
# file_manager = util.file_manager("covariances_CMBxCMB",
# "pspy",
# spec="v1",
# lmax=lmax,
# force_recomputation=overwrite,
# folder=output_dir,
# rank=rank)
# if file_manager.FileExist is True:
# if rank == 0:
# print("Already computed in %s/" % output_dir)
# else:
# cov_order0_tot, cov_order1_tot, cov_order2_tot, junk = lib_covariances.analytic_covariances_CMBxCMB(
# cls_unlensed,
# cls_lensed,
# lmin=lmin,
# blocks=blocks,
# noise_uK_arcmin=noise_uK_arcmin,
# TTcorr=False,
# fwhm_arcmin=fwhm_arcmin,
# MPI=MPI,
# use_corrfunc=True,
# exp="pspy",
# folder_cache=output_dir)
# array_to_save = [cov_order0_tot, cov_order1_tot, cov_order2_tot, blocks]
# if file_manager.FileExist is False and rank == 0:
# file_manager.save_data_on_disk(array_to_save)
# def load_cov_lensed(cov_lensed_file, include_gaussian_part=False):
# """wrapper around lenscov (https://github.com/JulienPeloton/lenscov).
# include_gaussian_part: if False, it returns only lensing induced non-gaussin parts
# """
# | |
import numpy as nm
import string as st
import pylab as pl
from ThreadClasses import *
from Protein_properties_v3 import *
from AminoData import *
import copy as cp
import time
from ReadDate import *
def build_path_matrix_v3(row,col,Structure_rules):
'''
Build an empty mpath matrix
'''
tmp1 =[]
mp = []
base_length = len(Structure_rules)
for i in range(row+1):
for j in range(col):
tmp2 = ScoreMatrix()
jj = (j+1)%base_length - 1
# 0:Not a corner, 1:can cut a corner, 3:after corner cut
if j>1:
tmp2.CornerFlag = Structure_rules[jj].CornerFlag # 0:not cut, 1:corner cut allowed
else:
tmp2.CornerFlag = 0
# 0:not a corner, 1:side start, 2:side end, -3:physical corner
tmp2.CornerMarker = Structure_rules[jj].CornerMarker
tmp2.PointInOut = Structure_rules[jj].PointInOut
tmp2.PosNum = Structure_rules[jj].PosNum
tmp2.Special = Structure_rules[jj].Special
tmp2.Turn = Structure_rules[jj].Turn
tmp2.DiSulDis = Structure_rules[jj].DiSulDis
tmp2.ResSize = Structure_rules[jj].ResSize
tmp2.ResSizeSigma = Structure_rules[jj].ResSizeSigma
tmp1 = tmp1+[tmp2]
mp.extend([tmp1])
tmp1=[]
return mp
# End of build_path_matrix_v3
def StartCol(seq1,base_length):
'''
Looking for the starting position
Note that the column in the matrix is starts with 0
seq1 is the sequence with the loop and corner cuting marks
'''
countFlag = True
i = 0
col = 0
while seq1[i] != '|':
if countFlag: col += 1
if i == 50:
print 'Problem with StartCol'
break
if seq1[i] == '(':
countFlag = False
col -= 1
if seq1[i] == ')': countFlag = True
i += 1
col = base_length - col
return col
def GetThreadScores(seq,seq1,Structure_rules,ScoresP,BaseData):
'''
Returns the score of a know thread seq1
Scores : the position score (as a class variable)
tot : the total score
seq : is the sequence without the thread marks
seq1 : the actual thread, with loop, corners and corner cuts
'''
# get the list of amino acid letters
AAlist = AminoList()
# list of symbols allowed in a thread
ThreadMarks = ['|','-','(',')']
# Dictionary of amino acids and their properties
aminotable = AminoTable()
# the size of our scoring matrix
row = len(seq)
# basic repeated length in the structure. for LBH-18 6, for LBH-15 5
base_length = len(Structure_rules)
# calculate column number according to possible number of corners cut
col = end_col(row,Structure_rules)
# initializing a list of lists that will store the best routes in mpath matrix
mpath = build_path_matrix_v3(row,col,Structure_rules)
# Check at what column the thread starts
icol = StartCol(seq1,base_length)
irow = 0
NoLoopFlag = True
PScore = 0.0 # The side chain score
i = 0 # Counts the steps in the seq1
tot = 0.0
# Score data collection
# Collected for information on score distribution
HphobicS = 0
ChargeS = 0
PolarS = 0
SpecialS = 0
SideChainVolS = 0
CornerCutS = 0
TurnVolS = 0
LoopHydroS = 0
SideCahinBondsS = 0
LoopPenalty = 0
LoopNotOnCornerS = 0
TotalScoreS = 0
for t in seq1:
if t== '(': NoLoopFlag = False # we've riched a loop
if NoLoopFlag:
if t not in ThreadMarks:
AA = aminotable[t]
Mpos = mpath[irow][icol]
if t == 'C': turnLenght = mpath[irow][icol].DiSulDis
else: turnLenght = mpath[irow][icol].Turn
tempS = threadSidescore(t,seq1[0:i],turnLenght,ScoresP) # Side chain bond score
tot += tempS
SideCahinBondsS += tempS
#PScore += ThreadVolumeScore(seq1[0:i],BaseData)*ScoresP.TurnVolPenaly
tempS = AA_VolumeScore(seq1[0:i+1],BaseData)*ScoresP.TurnVolPenaly
tot += tempS
TurnVolS += tempS
tmpScr,total_scr = PosScore(AA,Mpos,ScoresP) # position score
HphobicS += tmpScr.hydrophobic_scr
ChargeS += tmpScr.charge_scr
PolarS += tmpScr.polar_scr
SideChainVolS += tmpScr.size_scr
SpecialS += tmpScr.special_scr
tot += total_scr
icol += 1
# if AA.name in ['H','K','R','D','E','Y','Q','S','N']:
#print '~~~~~~~~~~~~~~~~~~~~~~~'
#print AA.name,t
#print seq1[0:i]
#print 'side score',threadSidescore(t,seq1[0:i],turnLenght,ScoresP),PScore
#print 'turn vol',AA_VolumeScore(seq1[0:i+1],BaseData)*ScoresP.TurnVolPenaly
#print 'pos score',total_scr
#print 'tot',tot
if not NoLoopFlag: # we are in a loop
# Score for loops
if t in AAlist:
# print '++++++++++++================='
AA = aminotable[t]
tot += AA.loop * ScoresP.SpecialRatio # Adding special score for selected AA
temp1 = AA.Hydrophobic * ScoresP.HydroRatio * ScoresP.LoopWeight # adding the hydrophobicity score of AA on loop
tot -= temp1
temp2 = AA.polar * ScoresP.PolarRatio * ScoresP.LoopWeight # adding the polarity score of AA on loop
tot += temp2
temp3 = AA.charge * ScoresP.ChargeRatio * ScoresP.LoopWeight # adding the charge score of AA on loop
tot += temp3
LoopHydroS += (temp2 + temp3 - temp1)
if t in AAlist: irow += 1
if t == '-': # Corner cut
icol += 1 # Continue counting
tempS = ScoresP.CornerCutPenalty # panelty for corner cutting
tot -= tempS
CornerCutS -= tempS
if t == ')':
NoLoopFlag = True # End of loop
temp1 = ScoresP.LoopPenalty / 3.0 # Small panelty for loops
tot -= temp1
LoopPenalty -= temp1
temp2 = (mpath[irow][icol].CornerMarker in [0,2]) * ScoresP.LoopPenalty # apply loop penalty if not in corner
tot -= temp2
LoopNotOnCornerS -= temp2
i += 1
# print 'tot un normalized',tot,float(row)
tot += PScore # the total score
nrows = float(row)
tot = tot / nrows # Normelizing the to the number of amino acid
# Print the scores
print 'Total Score: ',tot
print 'H-phobic Score: ',HphobicS/nrows
print 'Charge Score: ',ChargeS/nrows
print 'Polar Score: ',PolarS/nrows
print 'Special Score: ',SpecialS/nrows
print 'Side Chain Volume Score: ',SideChainVolS/nrows
print 'Corner Cut Score: ',CornerCutS/nrows
print 'Turn Volume Score: ',TurnVolS/nrows
print 'Loop Hydro Score: ',LoopHydroS/nrows
print 'Side Cahin Bonds Score: ',SideCahinBondsS/nrows
print 'Loop Penalty: ',LoopPenalty/nrows
print 'Loop Not On Corner Score: ',LoopNotOnCornerS/nrows
# Return the totoal score
return tot
def end_col(row,Structure_rules):
# Getting the size of mpath (columns)
# It looks to me that col = row + (base_length - 1) + int(row+1)/int(base_length-1) should do the
# same, at least for the LHBH-18
# This function over complecation is to make it possible to expand the program
# to other structure types
add_col = 0
base_length = len(Structure_rules)
for i in range(row):
j = (i+1)%base_length - 1 # the relative location (mod) in the base stracture
tmp1 = (Structure_rules[j].CornerFlag > 1) # 3:after corner cut
tmp2 = (j>0) # we are not immidiatly after a corner
if tmp1 and tmp2: add_col += 1
col = row + (base_length - 1) + add_col
return col
# end end column calculation
def result_output_v4(seq,highscore,threadlist1,dScore):
'''
Printing results to file file_name_out.txt and to the screen
'''
# Collection of the best threads
threadlist = [x.thread for x in threadlist1]
# Collection of the best scores, sorting and keeping only unique values
threadlistScore = [x.score for x in threadlist1]
threadlistScore.sort()
threadlistScore = uniqueList(threadlistScore)
# Numebr of threads with scores up to dScore points difference from the highest score
nThreads = len(threadlist)
# open file 'file_name_out.txt' for writing
#f_name = file_name
#f_path = file_path
#filestr = f_path + 'out_' + f_name + '.txt'
#f = open(filestr, 'w')
# setting strings for printing
highscoreStr = 'Highest score (per amino acid):%.2f' % highscore
bThreads = 'Numebr of threads with scores up to %g points difference from the highest score: %g' %(dScore,nThreads)
# print to screen
#print 'See %s.txt for results \n' % file_name
print highscoreStr
print bThreads
# Presentation of results:
print 'Collection of those best threads'
while threadlistScore != []:
for thrd in threadlist1:
if thrd.score == threadlistScore[-1]:
# Printing all threads
cThreads = '%.1f :: %s' %(thrd.score, thrd.thread)
print cThreads
del(threadlistScore[-1])
print 'end'
# End resultoutput
def mpathCalc(Structure_rules,seq,BaseData,ScoresP):
shortl = BaseData.shortl
longl = BaseData.longl
# the size of our scoring matrix
row = len(seq)
# Add extra colomns to allow corner cutting
base_length = len(Structure_rules)
# Checking how many corners can be cut
col = end_col(row,Structure_rules)
# initializing a list of lists that will store the best routes in mpath matrix
mpath = build_path_matrix_v3(row,col,Structure_rules)
# mpath2 = build_path_matrix_v3(row,col,Structure_rules)
# Scoring - creating mpath
for irow in range(0,row):
# Evaluating the possible range of colomns for each row
# for each row, valid columns valuse are between startcol and endcol
startcol = Start_col(irow,shortl,longl,BaseData.maxLoopFlag)
endcol = end_col((irow + 1),Structure_rules) # - (irow == 0)
# Start scoring
for icol in range(startcol,endcol):
# startrow is the lowest row from the previous colomn, considering possible loops
startrow = Start_row(irow,shortl,longl)
# Geting the score
mpath[irow][icol] = score_v6(irow,icol,startrow,mpath,seq,BaseData,ScoresP)
# Get the high score from the last row
yy = nm.array([x.withLoops.Score for x in mpath[row-1]])
highScore = max(yy)
return mpath,row,col,highScore
# End mpathCalc
def PosScore(AA,Mpos,ScoresP):
'''
Calculate the location score NOT including the loop scores
tmpScr is the score as a class variable
total_scr is the sum of tmpScr
'''
tmpScr = ScoreFunction()
#tmpScr.hydrophobic_scr = AA.Hydropathy * Mpos.PointInOut
tmpScr.hydrophobic_scr = AA.Hydrophobic * Mpos.PointInOut
tmpScr.polar_scr = -AA.polar * Mpos.PointInOut
tmpScr.charge_scr = -AA.charge * Mpos.PointInOut
tmpScr.size_scr = ProbabilityScore(AA.SideChainVol,Mpos.ResSize,Mpos.ResSizeSigma)
# tmpScr.size_scr = ProbabilityScore(AA.ResWeight,Mpos.ResSize,Mpos.ResSizeSigma)
tmpScr.corner_scr = AA.corner * (Mpos.CornerMarker > 0) # AA.corner = 0 at current setup
try:
tmpScr.special_scr = AA.SpecialRes[Mpos.Special]
except:
tmpScr.special_scr = 0
tmpScr,total_scr = SumScore(tmpScr,ScoresP)
return tmpScr,total_scr
# End PosScore
def SumScore(RawScore,ScoresP):
'''
Calculates the weighted scores
Not including loop scores
'''
hydrophobic_scr = RawScore.hydrophobic_scr * ScoresP.HydroRatio
polar_scr = RawScore.polar_scr * ScoresP.PolarRatio
charge_scr = RawScore.charge_scr * ScoresP.ChargeRatio
size_scr = RawScore.size_scr * ScoresP.SizeRatio
corner_scr = RawScore.corner_scr * ScoresP.CornerRatio # AA.corner = 0 at current setup
special_scr = RawScore.special_scr * ScoresP.SpecialRatio
# Calculate the weighted scores
tmpScr = ScoreFunction()
tmpScr.hydrophobic_scr = hydrophobic_scr
tmpScr.polar_scr = polar_scr
tmpScr.charge_scr = charge_scr
tmpScr.size_scr = size_scr
tmpScr.corner_scr = corner_scr
tmpScr.special_scr = special_scr
tot1 = hydrophobic_scr + polar_scr + charge_scr
tot2 = size_scr + corner_scr + special_scr
tot = tot1 + tot2
return tmpScr,tot
# End SumScore
def SideChainScore(AA1,AA2,ScoresP):
'''
Test if side bond is possible:
sideBond :
's' - disulfide bond
'h' - hydrogen bond
returns-
BondScore:
0 no bond
1 we have hydrogen bond
1 * s_factor we have disulfide bond
AA1/2 are amino acids
n1 and n2 are the numbers that indicate if we can have hydrogen bond and give it a value to
bond contribution.
n = -1 when the amino acid (AA) residue has an N donor, short residue.
n = -2 when the AA residue has an O acceptor, short residue.
n = -3 when the AA residue has an N donor, long residue that able to bond across two turns.
n = -5 when | |
<reponame>thanever/SOC
from numpy import array
def scigrid_2011_01_07_17():
ppc = {"version": '2'}
ppc["baseMVA"] = 100.0
ppc["bus"] = array([
[586, 3, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[589, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[590, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[593, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[594, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[595, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[597, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[598, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[599, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[600, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[601, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[602, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[603, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[607, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[608, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[609, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[610, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[612, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[613, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[614, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[616, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[617, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[618, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[619, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[621, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[623, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[624, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[628, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[629, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[631, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[632, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[637, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[638, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[639, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[640, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[641, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[642, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[643, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[646, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[647, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[650, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[652, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[655, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[657, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[658, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[661, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[662, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[663, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[666, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[668, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[670, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[672, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[675, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[676, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[677, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[678, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[679, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[681, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[683, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[687, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[689, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[691, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[693, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[694, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[695, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[696, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[697, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[698, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[699, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[700, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[701, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[702, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[704, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[705, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[707, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[708, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[711, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[713, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[714, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[716, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[717, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[719, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[721, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[722, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[723, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[724, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[725, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[726, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[727, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[728, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[730, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[731, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[732, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[733, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[735, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[736, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[737, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[738, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[739, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[741, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[742, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[743, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[745, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[746, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[747, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[748, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[749, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[750, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[753, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[758, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[760, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[761, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[762, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[763, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[765, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[767, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[769, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[771, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[772, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[774, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[775, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[776, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[777, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[778, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[781, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[784, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[785, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[787, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[788, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[789, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[790, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[791, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[792, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[793, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[794, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[795, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[796, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[798, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[800, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[801, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[802, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[805, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[806, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[808, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[809, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[811, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[814, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[815, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[816, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[817, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[818, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[821, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[822, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[825, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[826, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[830, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[833, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[834, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[835, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[836, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[837, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[839, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[840, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[841, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[842, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[843, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[844, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[845, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[848, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[849, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[850, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[851, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[852, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[853, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[855, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[856, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[857, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[858, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[859, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[860, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[862, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[863, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[864, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[865, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[866, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[867, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[869, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[870, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[872, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[873, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[874, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[875, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[876, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[877, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[881, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[882, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[883, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[884, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[885, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[886, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[888, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[889, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[890, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[893, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[894, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[895, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[896, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[897, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[898, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[899, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[900, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[902, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[903, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[905, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[906, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[907, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[908, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[909, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[911, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[913, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[914, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[915, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[916, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[917, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[918, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[919, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[920, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[921, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[922, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[923, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[924, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[925, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[928, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[931, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[934, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[935, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[936, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[937, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[939, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[940, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[942, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[943, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[944, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[945, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[946, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[948, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[950, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[952, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[956, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[957, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[958, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[959, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[960, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[963, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[965, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[966, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[967, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[968, 2, 0, 0, 0, 0, 0, 0.999497, 0, 220.0, 0, 1.1, 0.9 ],
[969, 2, 0, 0, 0, 0, 0, 0.999497, 0, 220.0, 0, 1.1, 0.9 ],
[971, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[972, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[973, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[974, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[975, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[976, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[977, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[978, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[980, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[981, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[982, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[983, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[984, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[985, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[986, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[987, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[988, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[990, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[993, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[994, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[995, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[996, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[997, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[998, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[999, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1000, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1001, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1002, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1003, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1006, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1007, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1008, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1010, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1011, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1012, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1014, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1018, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1019, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1023, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1025, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1026, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1027, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1028, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1029, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1030, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1031, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1032, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1033, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1034, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1035, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1036, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1037, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1038, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1039, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1040, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1041, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1042, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1043, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1044, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1046, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1047, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1048, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1049, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1050, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1051, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1052, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1053, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1054, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1055, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1056, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1057, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1058, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1059, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1060, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1061, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1062, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1063, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1064, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1065, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1066, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1067, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1068, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1069, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1070, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1071, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1072, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1073, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1074, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1075, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1076, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1077, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1078, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1079, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1080, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1081, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1082, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1083, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1084, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1085, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1086, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1087, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1088, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1089, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1090, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1091, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1092, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1093, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1094, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1095, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1096, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1097, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1098, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1099, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1100, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1101, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1102, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1103, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1104, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1105, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1106, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1107, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1108, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1109, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1110, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1111, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1112, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1113, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1114, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1115, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1116, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1117, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1118, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1119, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1120, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1121, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1122, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1123, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1124, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1125, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1126, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1127, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1128, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1129, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1130, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1131, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1132, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1133, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1134, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1135, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1136, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1137, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1138, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1139, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1140, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1141, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1142, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1143, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1144, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1145, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1146, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1147, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1148, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1149, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1150, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1151, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1152, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1153, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1154, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1155, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1156, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1157, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1158, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1159, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1160, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1161, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1162, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1163, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1164, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1165, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1166, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1167, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1168, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1169, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1170, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1171, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1172, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1173, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1174, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1175, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1176, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1177, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1178, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1179, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1180, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1181, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1182, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1183, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1184, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1185, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1186, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1187, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1188, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1189, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1190, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1191, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1192, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1193, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1194, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1195, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1196, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1197, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1198, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1199, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1200, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1201, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1202, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1203, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1204, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1205, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1206, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1207, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1208, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1209, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1210, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1211, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1212, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1213, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1214, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1215, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1216, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1217, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1218, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1219, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1220, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1221, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1222, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1223, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1224, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1225, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1226, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1227, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1228, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1229, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1230, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1231, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1232, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1233, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1235, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1236, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1237, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1238, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1239, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1240, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1241, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1242, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1243, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1244, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1245, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1246, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1247, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1248, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1249, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1250, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1251, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1252, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1253, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1254, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1255, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1256, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1257, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1258, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1259, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1260, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1261, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1262, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1263, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1264, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1265, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1266, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1267, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1268, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1269, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1270, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1271, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1272, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1273, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1274, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1275, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1276, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1277, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1278, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1279, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1280, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1281, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1282, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1283, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1284, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1285, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1286, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1287, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1288, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1289, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1290, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1291, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1292, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1293, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1294, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1295, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1296, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1297, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1298, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1299, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1300, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1301, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1302, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1303, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1304, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1305, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1306, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1307, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1308, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1309, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1310, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1311, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1312, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1313, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1314, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1315, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1316, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1317, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1318, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1319, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1320, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1321, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1322, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1323, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1324, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1325, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1326, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1327, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1328, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1329, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1330, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1331, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1332, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1333, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1334, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1335, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1336, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1337, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1338, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1339, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1340, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1341, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1342, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1344, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1345, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1346, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1348, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1349, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1350, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1351, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1352, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1355, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1356, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1357, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1358, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1359, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1360, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1361, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1362, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1363, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1364, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1365, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1366, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1367, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1368, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1369, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1370, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1372, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1373, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1374, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1375, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1376, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1377, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1378, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1379, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1380, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1381, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1382, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1383, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1384, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1385, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1386, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1387, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1388, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1389, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1390, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1391, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1392, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1393, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1394, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1395, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1396, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1397, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1398, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1399, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1400, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1401, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1402, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1403, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1404, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1405, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1406, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1407, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1408, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1409, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1410, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1411, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1412, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1413, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1414, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1415, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1416, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1417, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1418, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1419, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1420, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1421, 2, 0, 0, 0, 0, 0, 0.999497, 0, 220.0, 0, 1.1, 0.9 ],
[1422, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1423, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1424, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1425, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1426, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1427, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1428, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1429, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1430, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1431, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1432, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1433, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1434, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1435, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1436, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1437, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1438, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1439, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1440, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1441, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1442, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1443, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1444, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1445, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1446, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1447, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1448, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1449, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1450, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1451, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1452, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1453, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1454, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1455, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1456, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1457, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1458, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1459, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1460, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1461, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1462, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1463, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1464, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1465, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1466, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1467, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1468, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1469, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1470, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1471, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1472, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1473, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1474, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1475, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1476, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1477, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1479, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1480, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1481, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1482, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1483, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1484, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1485, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1486, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1487, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1488, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1489, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1490, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1491, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1492, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1493, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1494, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1495, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1496, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1497, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1498, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1499, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1500, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1501, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1502, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1503, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1504, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1505, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1506, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1507, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1508, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1510, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1511, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1512, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1513, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1514, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1516, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1517, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1518, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1519, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1, 1, 333.250317, 66.650063, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[2, 1, 0, 0, 0, 0, 0, 1.000014, 0, 380.0, 0, 1.1, 0.9 ],
[3, 1, 58.409817, 11.681963, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[4, 1, 96.056881, 19.211376, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[5, 1, 0, 0, 0, 0, 0, 1.000076, 0, 380.0, 0, 1.1, 0.9 ],
[6, 1, 282.06282, 56.412564, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[7, 1, 212.569411, 42.513882, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[8, 1, 177.862895, 35.572579, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[9, 1, 120.285896, 24.057179, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[10, 1, 0, 0, 0, 0, 0, 0.999274, 0, 380.0, 0, 1.1, 0.9 ],
[11, 1, 105.39095, 21.07819, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[12, 1, 0, 0, 0, 0, 0, 1.000766, 0, 380.0, 0, 1.1, 0.9 ],
[13, 1, 0, 0, 0, 0, 0, 1.000079, 0, 380.0, 0, 1.1, 0.9 ],
[14, 1, 252.056491, 50.411298, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[15, 1, 0, 0, 0, 0, 0, 1.000266, 0, 380.0, 0, 1.1, 0.9 ],
[16, 1, 429.873148, 85.97463, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[17, 1, 101.246419, 20.249284, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[18, 1, 0, 0, 0, 0, 0, 1.001086, 0, 380.0, 0, 1.1, 0.9 ],
[19, 1, 250.141735, 50.028347, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[20, 1, 0, 0, 0, 0, 0, 0.996796, 0, 380.0, 0, 1.1, 0.9 ],
[21, 1, 1075.647828, 215.129566, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[22, 1, 0, 0, 0, 0, 0, 0.99961, 0, 380.0, 0, 1.1, 0.9 ],
[23, 1, 140.838771, 28.167754, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[24, 1, 0, 0, 0, 0, 0, 0.999992, 0, 380.0, 0, 1.1, 0.9 ],
[25, 1, 67.364166, 13.472833, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[26, 1, 0, 0, 0, 0, 0, 1.000227, 0, 380.0, 0, 1.1, 0.9 ],
[27, 1, 82.691379, 16.538276, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[28, 1, 244.328251, 48.86565, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[29, 1, 89.746853, 17.949371, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[30, 1, 0, 0, 0, 0, 0, 0.999365, 0, 380.0, 0, 1.1, 0.9 ],
[31, 1, 176.61949, 35.323898, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[32, 1, 0, 0, 0, 0, 0, 0.999353, 0, 380.0, 0, 1.1, 0.9 ],
[33, 1, 221.44765, 44.28953, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[34, 1, 43.934175, 8.786835, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[35, 1, 2.908675, 0.581735, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[36, 1, 9.630203, 1.926041, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[37, 1, 0, 0, 0, 0, 0, 1.003115, 0, 380.0, 0, 1.1, 0.9 ],
[38, 1, 232.013099, 46.40262, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[39, 1, 75.972337, 15.194467, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[40, 1, 79.355481, 15.871096, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[41, 1, 85.289157, 17.057831, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[42, 1, 0, 0, 0, 0, 0, 1.000839, 0, 380.0, 0, 1.1, 0.9 ],
[43, 1, 130.794766, 26.158953, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[44, 1, 167.33251, 33.466502, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[45, 1, 88.823839, 17.764768, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[46, 1, 0, 0, 0, 0, 0, 1.000239, 0, 380.0, 0, 1.1, 0.9 ],
[47, 1, 386.21318, 77.242636, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[48, 1, 265.47013, 53.094026, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[49, 1, 67.15055, 13.43011, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[50, 1, 97.780264, 19.556053, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[51, 1, 126.71684, 25.343368, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[52, 1, 0, 0, 0, 0, 0, 1.000142, 0, 380.0, 0, 1.1, 0.9 ],
[53, 1, 192.272509, 38.454502, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[54, 1, 97.685629, 19.537126, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[55, 1, 95.801055, 19.160211, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[56, 1, 0, 0, 0, 0, 0, 0.999664, 0, 380.0, 0, 1.1, 0.9 ],
[57, 1, 114.356533, 22.871307, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[58, 1, 261.951033, 52.390207, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[59, 1, 74.814816, 14.962963, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[60, 1, 39.444447, 7.888889, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[61, 1, 0, 0, 0, 0, 0, 0.999989, 0, 380.0, 0, 1.1, 0.9 ],
[62, 1, 300.715757, 60.143151, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[63, 1, 177.509937, 35.501987, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[64, 1, 1883.740163, 376.748033, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[65, 1, 6.276654, 1.255331, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[66, 1, 199.151068, 39.830214, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[67, 1, 427.212589, 85.442518, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[68, 1, 0, 0, 0, 0, 0, 0.998569, 0, 380.0, 0, 1.1, 0.9 ],
[69, 1, 0, 0, 0, 0, 0, 0.999548, 0, 380.0, 0, 1.1, 0.9 ],
[70, 1, 808.188777, 161.637755, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[71, 1, 187.812662, 37.562532, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[72, 1, 307.611368, 61.522274, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[73, 1, 98.47799, 19.695598, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[74, 1, 0, 0, 0, 0, 0, 1.001301, 0, 380.0, 0, 1.1, 0.9 ],
[75, 1, 122.738236, 24.547647, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[76, 1, 118.469327, 23.693865, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[77, 1, 114.745639, 22.949128, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[78, 1, 0, 0, 0, 0, 0, 0.998247, 0, 380.0, 0, 1.1, 0.9 ],
[79, 1, 118.483716, 23.696743, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[80, 1, 125.847988, 25.169598, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[81, 1, 142.06524, 28.413048, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[82, 1, 4.728015, 0.945603, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[83, 1, 316.339042, 63.267808, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[84, 1, 31.141627, 6.228325, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[85, 1, 107.993116, 21.598623, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[86, 1, 0, 0, 0, 0, 0, 0.999996, 0, 380.0, 0, 1.1, 0.9 ],
[87, 1, 0, 0, 0, 0, 0, 1.000106, 0, 380.0, 0, 1.1, 0.9 ],
[88, 1, 87.164757, 17.432951, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[89, 1, 108.141223, 21.628245, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[90, 1, 124.898338, 24.979668, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[91, 1, 43.383464, 8.676693, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[92, 1, 47.346579, 9.469316, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[93, 1, 46.437508, 9.287502, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[94, 1, 0, 0, 0, 0, 0, 1.001084, 0, 380.0, 0, 1.1, 0.9 ],
[95, 1, 0, 0, 0, 0, 0, 1.000731, 0, 380.0, 0, 1.1, 0.9 ],
[96, 1, 0, 0, 0, 0, 0, 0.999999, 0, 380.0, 0, 1.1, 0.9 ],
[97, 1, 6.531089, 1.306218, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[98, 1, 120.080452, 24.01609, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[99, 1, 0, 0, 0, 0, 0, 1.000257, 0, 380.0, 0, 1.1, 0.9 ],
[100, 1, 0, 0, 0, 0, 0, 1.002124, 0, 380.0, 0, 1.1, 0.9 ],
[101, 1, 85.029205, 17.005841, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[102, 1, 164.57799, 32.915598, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[103, 1, 192.423517, 38.484703, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[104, 1, 0, 0, 0, 0, 0, 1.000053, 0, 380.0, 0, 1.1, 0.9 ],
[105, 1, 0, 0, 0, 0, 0, 1.000106, 0, 380.0, 0, 1.1, 0.9 ],
[106, 1, 0, 0, 0, 0, 0, 0.999912, 0, 380.0, 0, 1.1, 0.9 ],
[107, 1, 0, 0, 0, 0, 0, 1.000004, 0, 380.0, 0, 1.1, 0.9 ],
[108, 1, 135.731332, 27.146266, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[109, 1, 54.9553, 10.99106, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[110, 1, 71.334182, 14.266836, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[111, 1, 125.710104, 25.142021, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[112, 1, 63.625159, 12.725032, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[113, 1, 100.296299, 20.05926, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[114, 1, 147.711923, 29.542385, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[115, 1, 95.221193, 19.044239, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[116, 1, 159.339571, 31.867914, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[117, 1, 0, 0, 0, 0, 0, 1.000048, 0, 380.0, 0, 1.1, 0.9 ],
[118, 1, 246.714526, 49.342905, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[119, 1, 47.823406, 9.564681, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[120, 1, 0, 0, 0, 0, 0, 1.000773, 0, 380.0, 0, 1.1, 0.9 ],
[121, 1, 64.944207, 12.988841, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[122, 1, 56.857994, 11.371599, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[123, 1, 0, 0, 0, 0, 0, 1.000044, 0, 380.0, 0, 1.1, 0.9 ],
[124, 1, 0, 0, 0, 0, 0, 1.000002, 0, 380.0, 0, 1.1, 0.9 ],
[125, 1, 0, 0, 0, 0, 0, 0.999399, 0, 380.0, 0, 1.1, 0.9 ],
[126, 1, 298.107875, 59.621575, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[127, 1, 230.468749, 46.09375, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[128, 1, 0, 0, 0, 0, 0, 1.000816, 0, 380.0, 0, 1.1, 0.9 ],
[129, 1, 0, 0, 0, 0, 0, 0.999993, 0, 380.0, 0, 1.1, 0.9 ],
[130, 1, 317.77448, 63.554896, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[131, 1, 70.16433, 14.032866, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[132, 1, 182.697308, 36.539462, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[133, 1, 61.196441, 12.239288, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[134, 1, 60.945842, 12.189168, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[135, 1, 61.026646, 12.205329, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[136, 1, 59.118313, 11.823663, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[137, 1, 47.289209, 9.457842, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[138, 1, 0, 0, 0, 0, 0, 1.00008, 0, 380.0, 0, 1.1, 0.9 ],
[139, 1, 92.634767, 18.526953, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[140, 1, 64.060908, 12.812182, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[141, 1, 75.900869, 15.180174, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[142, 1, 83.518051, 16.70361, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[143, 1, 0, 0, 0, 0, 0, 0.999983, 0, 380.0, 0, 1.1, 0.9 ],
[144, 1, 76.076309, 15.215262, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[145, 1, 221.307996, 44.261599, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[146, 1, 285.307638, 57.061528, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[147, 1, 174.876782, 34.975356, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[148, 1, 246.783244, 49.356649, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[149, 1, 159.099371, 31.819874, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[150, 1, 207.720748, 41.54415, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[151, 1, 48.949077, 9.789815, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[152, 1, 101.613208, 20.322642, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[153, 1, 181.294489, 36.258898, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[154, 1, 186.225417, 37.245083, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[155, 1, 193.97023, 38.794046, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[156, 1, 0, 0, 0, 0, 0, 0.999986, 0, 380.0, 0, 1.1, 0.9 ],
[157, 1, 0, 0, 0, 0, 0, 1.001112, 0, 380.0, 0, 1.1, 0.9 ],
[158, 1, 51.104696, 10.220939, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[159, 1, 0, 0, 0, 0, 0, 1.001047, 0, 380.0, 0, 1.1, 0.9 ],
[160, 1, 0, 0, 0, 0, 0, 1.000005, 0, 380.0, 0, 1.1, 0.9 ],
[161, 1, 158.650815, 31.730163, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[162, 1, 237.135951, 47.42719, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[163, 1, 47.424951, 9.48499, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[164, 1, 47.615675, 9.523135, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[165, 1, 0, 0, 0, 0, 0, 1.000005, 0, 380.0, 0, 1.1, 0.9 ],
[166, 1, 55.670427, 11.134085, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[167, 1, 78.314278, 15.662856, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[168, 1, 53.448495, 10.689699, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[169, 1, 182.96961, 36.593922, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[170, 1, 137.486233, 27.497247, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[171, 1, 117.344448, 23.46889, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[172, 1, 57.589458, 11.517892, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[173, 1, 55.014978, 11.002996, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[174, 1, 82.55777, 16.511554, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[175, 1, 54.978921, 10.995784, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[176, 1, 191.581126, 38.316225, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[177, 1, 31.240094, 6.248019, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[178, 1, 165.455201, 33.09104, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[179, 1, 60.964531, 12.192906, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[180, 1, 53.589383, 10.717877, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[181, 1, 40.447723, 8.089545, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[182, 1, 1.832301, 0.36646, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[183, 1, 548.465245, 109.693049, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[184, 1, 0, 0, 0, 0, 0, 0.999324, 0, 380.0, 0, 1.1, 0.9 ],
[185, 1, 117.286121, 23.457224, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[186, 1, 63.157967, 12.631593, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[187, 1, 36.940979, 7.388196, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[188, 1, 54.978921, 10.995784, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[189, 1, 201.73818, 40.347636, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[190, 1, 266.836487, 53.367297, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[191, 1, 0, 0, 0, 0, 0, 1.000009, 0, 380.0, 0, 1.1, 0.9 ],
[192, 1, 64.262309, 12.852462, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[193, 1, 54.890236, 10.978047, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[194, 1, 37.891609, 7.578322, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[195, 1, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[196, 1, 53.159718, 10.631944, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[197, 1, 84.224517, 16.844903, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[198, 1, 49.839559, 9.967912, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[199, 1, 64.166774, 12.833355, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[200, 1, 54.980197, 10.996039, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[201, 1, 0, 0, 0, 0, 0, 0.999078, 0, 380.0, 0, 1.1, 0.9 ],
[202, 1, 56.339095, 11.267819, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[203, 1, 7.423181, 1.484636, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[204, 1, 217.571945, 43.514389, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[205, 1, 108.795767, 21.759153, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[206, 1, 52.214365, 10.442873, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[207, 1, 155.263032, 31.052606, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[208, 1, 45.71884, 9.143768, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[209, 1, 63.533212, 12.706642, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[210, 1, 72.987769, 14.597554, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[211, 1, 256.49538, 51.299076, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[212, 1, 64.286949, 12.85739, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[213, 1, 301.362847, 60.272569, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[214, 1, 202.778996, 40.555799, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[215, 1, 428.786308, 85.757262, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[216, 1, 144.581054, 28.916211, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[217, 1, 46.328904, 9.265781, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[218, 1, 141.14262, 28.228524, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[219, 1, 226.833392, 45.366678, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[220, 1, 0, 0, 0, 0, 0, 0.999482, 0, 380.0, 0, 1.1, 0.9 ],
[221, 1, 129.397814, 25.879563, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[222, 1, 0.0, 0.0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[223, 1, 128.241243, 25.648249, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[224, 1, 149.1269, 29.82538, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[225, 1, 267.765904, 53.553181, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[226, 1, 93.538904, 18.707781, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[227, 1, 116.530504, 23.306101, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[228, 1, 114.2546, 22.85092, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[229, 1, 252.825942, 50.565188, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[230, 1, 60.642099, 12.12842, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[231, 1, 0, 0, 0, 0, 0, 1.000697, 0, 380.0, 0, 1.1, 0.9 ],
[232, 1, 0, 0, 0, 0, 0, 0.999966, 0, 380.0, 0, 1.1, 0.9 ],
[233, 1, 0, 0, 0, 0, 0, 0.999772, 0, 380.0, 0, 1.1, 0.9 ],
[234, 1, 216.013902, 43.20278, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[235, 1, 70.244842, 14.048968, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[236, 1, 0, 0, 0, 0, 0, 0.999979, 0, 380.0, 0, 1.1, 0.9 ],
[237, 1, 0.581356, 0.116271, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[238, 1, 79.483316, 15.896663, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[239, 1, 109.816168, 21.963234, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[240, 1, 692.699329, 138.539866, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[241, 1, 512.57344, 102.514688, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[242, 1, 186.637265, 37.327453, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[243, 1, 150.579055, 30.115811, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[244, 1, 179.403759, 35.880752, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[245, 1, 0, 0, 0, 0, 0, 1.001312, 0, 380.0, 0, 1.1, 0.9 ],
[246, 1, 0, 0, 0, 0, 0, 1.000291, 0, 380.0, 0, 1.1, 0.9 ],
[247, 1, 35.601662, 7.120332, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[248, 1, 0, 0, 0, 0, 0, 1.000002, 0, 380.0, 0, 1.1, 0.9 ],
[249, 1, 0, 0, 0, 0, 0, 1.000002, 0, 380.0, 0, 1.1, 0.9 ],
[250, 1, 0, 0, 0, 0, 0, 1.000003, 0, 380.0, 0, 1.1, 0.9 ],
[251, 1, 88.35525, 17.67105, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[252, 1, 226.590798, 45.31816, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[253, 1, 99.482007, 19.896401, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[254, 1, 31.762954, 6.352591, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[255, 1, 156.20756, 31.241512, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[256, 1, 179.14289, 35.828578, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[257, 1, 86.458322, 17.291664, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[258, 1, 281.757228, 56.351446, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[259, 1, 0, 0, 0, 0, 0, 0.999227, 0, 380.0, 0, 1.1, 0.9 ],
[260, 1, 175.354631, 35.070926, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[261, 1, 0, 0, 0, 0, 0, 1.001153, 0, 380.0, 0, 1.1, 0.9 ],
[262, 1, 0, 0, 0, 0, 0, 1.001076, 0, 380.0, 0, 1.1, 0.9 ],
[263, 1, 251.545989, 50.309198, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[264, 1, 325.639851, 65.12797, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[265, 1, 0, 0, 0, 0, 0, 1.000003, 0, 380.0, 0, 1.1, 0.9 ],
[266, 1, 156.936716, 31.387343, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[267, 1, 198.490896, 39.698179, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[268, 1, 69.023696, 13.804739, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[269, 1, 55.428616, 11.085723, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[270, 1, 0, 0, 0, 0, 0, 1.000015, 0, 380.0, 0, 1.1, 0.9 ],
[271, 1, 0.0, 0.0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[272, 1, 1.130948, 0.22619, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[273, 1, 154.657659, 30.931532, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[274, 1, 300.634116, 60.126823, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[275, 1, 56.280348, 11.25607, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[276, 1, 219.395103, 43.879021, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[277, 1, 0, 0, 0, 0, 0, 0.998764, 0, 380.0, 0, 1.1, 0.9 ],
[278, 1, 171.273745, 34.254749, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[279, 1, 0, 0, 0, 0, 0, 0.999222, 0, 380.0, 0, 1.1, 0.9 ],
[280, 1, 0, 0, 0, 0, 0, 0.999147, 0, 380.0, 0, 1.1, 0.9 ],
[281, 1, 226.232105, 45.246421, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[282, 1, 319.927233, 63.985447, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[283, 1, 128.240727, 25.648145, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[284, 1, 194.547122, 38.909424, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[285, 1, 86.761191, 17.352238, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[286, 1, 181.837443, 36.367489, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[287, 1, 111.761287, 22.352257, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[288, 1, 71.884082, 14.376816, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[289, 1, 113.052812, 22.610562, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[290, 1, 0, 0, 0, 0, 0, 1.004434, 0, 380.0, 0, 1.1, 0.9 ],
[291, 1, 74.39872, 14.879744, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[292, 1, 146.673667, 29.334733, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[293, 1, 129.26905, 25.85381, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[294, 1, 34.448248, 6.88965, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[295, 1, 72.077734, 14.415547, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[296, 1, 204.628857, 40.925771, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[297, 1, 215.067224, 43.013445, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[298, 1, 113.559769, 22.711954, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[299, 1, 109.981882, 21.996376, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[300, 1, 299.620426, 59.924085, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[301, 1, 0, 0, 0, 0, 0, 0.999166, 0, 380.0, 0, 1.1, 0.9 ],
[302, 1, 252.393556, 50.478711, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[303, 1, 129.63665, 25.92733, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[304, 1, 111.319082, 22.263816, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[305, 1, 0, 0, 0, 0, 0, 0.999589, 0, 380.0, 0, 1.1, 0.9 ],
[306, 1, 0, 0, 0, 0, 0, 1.001594, 0, 380.0, 0, 1.1, 0.9 ],
[307, 1, 132.034777, 26.406955, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[308, 1, 162.781288, 32.556258, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[309, 1, 266.333079, 53.266616, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[310, 1, 0, 0, 0, 0, 0, 1.000071, 0, 380.0, 0, 1.1, 0.9 ],
[311, 1, 226.225708, 45.245142, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[312, 1, 101.739995, 20.347999, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[313, 1, 0, 0, 0, 0, 0, 1.000199, 0, 380.0, 0, 1.1, 0.9 ],
[314, 1, 315.125745, 63.025149, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[315, 1, 0, 0, 0, 0, 0, 1.001442, 0, 380.0, 0, 1.1, 0.9 ],
[316, 1, 123.470364, 24.694073, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[317, 1, 166.248322, 33.249664, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[318, 1, 273.207366, 54.641473, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[319, 1, 9.78738, 1.957476, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[320, 1, 0, 0, 0, 0, 0, 0.999998, 0, 380.0, 0, 1.1, 0.9 ],
[321, 1, 231.524249, 46.30485, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[322, 1, 29.474527, 5.894905, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[323, 1, 3.066574, 0.613315, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[324, 1, 542.096033, 108.419207, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[325, 1, 176.590119, 35.318024, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[326, 1, 14.317379, 2.863476, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[327, 1, 123.21082, 24.642164, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[328, 1, 209.970174, 41.994035, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[329, 1, 315.813859, 63.162772, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[330, 1, 0, 0, 0, 0, 0, 1.001563, 0, 380.0, 0, 1.1, 0.9 ],
[331, 1, 25.074546, 5.014909, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[332, 1, 0, 0, 0, 0, 0, 0.998006, 0, 380.0, 0, 1.1, 0.9 ],
[333, 1, 263.464899, 52.69298, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[334, 1, 0, 0, 0, 0, 0, 0.999569, 0, 380.0, 0, 1.1, 0.9 ],
[335, 1, 268.885806, 53.777161, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[336, 1, 0, 0, 0, 0, 0, 0.997906, 0, 380.0, 0, 1.1, 0.9 ],
[337, 1, 106.954889, 21.390978, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[338, 1, 290.290768, 58.058154, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[339, 1, 179.540825, 35.908165, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[340, 1, 151.798139, 30.359628, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[341, 1, 137.22855, 27.44571, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[342, 1, 238.04638, 47.609276, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[343, 1, 130.595715, 26.119143, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[344, 1, 327.434737, 65.486947, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[345, 1, 358.036991, 71.607398, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[346, 1, 355.439454, 71.087891, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[347, 1, 124.303346, 24.860669, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[348, 1, 324.937134, 64.987427, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[349, 1, 0, 0, 0, 0, 0, 1.000619, 0, 380.0, 0, 1.1, 0.9 ],
[350, 1, 170.466763, 34.093353, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[351, 1, 0, 0, 0, 0, 0, 1.000448, 0, 380.0, 0, 1.1, 0.9 ],
[352, 1, 1128.369672, 225.673934, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[353, 1, 3.392256, 0.678451, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[354, 1, 23.046695, 4.609339, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[355, 1, 0.0, 0.0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[356, 1, 0.0, 0.0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[357, 1, 0.057771, 0.011554, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[358, 1, 0, 0, 0, 0, 0, 1.00101, 0, 380.0, 0, 1.1, 0.9 ],
[359, 1, 3.373032, 0.674606, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[360, 1, 0, 0, 0, 0, 0, 1.000695, 0, 380.0, 0, 1.1, 0.9 ],
[361, 1, 86.32971, 17.265942, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[362, 1, 246.084353, 49.216871, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[363, 1, 362.315919, 72.463184, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[364, 1, 85.483453, 17.096691, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[365, 1, 76.725938, 15.345188, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[366, 1, 152.070565, 30.414113, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[367, 1, 73.504594, 14.700919, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[368, 1, 36.194871, 7.238974, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[369, 1, 29.742539, 5.948508, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[370, 1, 87.562886, 17.512577, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[371, 1, 440.577889, 88.115578, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[372, 1, 255.497448, 51.09949, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[373, 1, 172.409863, 34.481973, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[374, 1, 88.408859, 17.681772, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[375, 1, 290.011753, 58.002351, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[376, 1, 318.088273, 63.617655, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[377, 1, 227.619055, 45.523811, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[378, 1, 227.180935, 45.436187, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[379, 1, 78.299538, 15.659908, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[380, 1, 0, 0, 0, 0, 0, 1.001511, 0, 380.0, 0, 1.1, 0.9 ],
[381, 1, 261.838428, 52.367686, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[382, 1, 0, 0, 0, 0, 0, 1.000132, 0, 380.0, 0, 1.1, 0.9 ],
[383, 1, 0, 0, 0, 0, 0, 0.999174, 0, 380.0, 0, 1.1, 0.9 ],
[384, 1, 92.396277, 18.479255, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[385, 1, 116.622235, 23.324447, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[386, 1, 93.70247, 18.740494, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[387, 1, 190.828907, 38.165781, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[388, 1, 1024.748438, 204.949688, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[389, 1, 0, 0, 0, 0, 0, 0.999909, 0, 380.0, 0, 1.1, 0.9 ],
[390, 1, 84.611082, 16.922216, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[391, 1, 96.379238, 19.275848, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[392, 1, 184.950789, 36.990158, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[393, 1, 230.968932, 46.193786, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[394, 1, 83.072884, 16.614577, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[395, 1, 115.133885, 23.026777, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[396, 1, 81.548152, 16.30963, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[397, 1, 653.926355, 130.785271, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[398, 1, 283.22963, 56.645926, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[399, 1, 120.676451, 24.13529, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[400, 1, 64.294391, 12.858878, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[401, 1, 0, 0, 0, 0, 0, 1.00065, 0, 380.0, 0, 1.1, 0.9 ],
[402, 1, 0, 0, 0, 0, 0, 1.000415, 0, 380.0, 0, 1.1, 0.9 ],
[403, 1, 31.92366, 6.384732, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[404, 1, 112.469031, 22.493806, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[405, 1, 847.905301, 169.58106, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[406, 1, 64.243488, 12.848698, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[407, 1, 127.171393, 25.434279, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[408, 1, 367.708353, 73.541671, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[409, 1, 0, 0, 0, 0, 0, 0.999953, 0, 380.0, 0, 1.1, 0.9 ],
[410, 1, 47.607164, 9.521433, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[411, 1, 45.014524, 9.002905, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[412, 1, 3.161778, 0.632356, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[413, 1, 157.841641, 31.568328, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[414, 1, 13.402463, 2.680493, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[415, 1, 0, 0, 0, 0, 0, 1.000299, 0, 380.0, 0, 1.1, 0.9 ],
[416, 1, 190.865174, 38.173035, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[417, 1, 7.46819, 1.493638, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[418, 1, 155.632582, 31.126516, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[419, 1, 83.184509, 16.636902, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[420, 1, 83.749896, 16.749979, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[421, 1, 120.63959, 24.127918, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[422, 1, 88.384607, 17.676921, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[423, 1, 185.627205, 37.125441, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[424, 1, 13.383243, 2.676649, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[425, 1, 109.910195, 21.982039, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[426, 1, 9.106398, 1.82128, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[427, 1, 76.530318, 15.306064, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[428, 1, 34.313819, 6.862764, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[429, 1, 387.223309, 77.444662, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[430, 1, 206.260538, 41.252108, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[431, 1, 137.929589, 27.585918, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[432, 1, 161.231229, 32.246246, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[433, 1, 82.417106, 16.483421, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[434, 1, 42.893876, 8.578775, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[435, 1, 171.548501, 34.3097, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[436, 1, 91.586866, 18.317373, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[437, 1, 20.857948, 4.17159, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[438, 1, 55.97702, 11.195404, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[439, 1, 104.221976, 20.844395, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[440, 1, 88.07822, 17.615644, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[441, 1, 67.523756, 13.504751, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[442, 1, 89.356787, 17.871357, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[443, 1, 193.733926, 38.746785, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[444, 1, 0, 0, 0, 0, 0, 0.999997, 0, 380.0, 0, 1.1, 0.9 ],
[445, 1, 88.030457, 17.606091, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[446, 1, 40.818933, 8.163787, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[447, 1, 77.604768, 15.520954, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[448, 1, 57.031624, 11.406325, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[449, 1, 287.572758, 57.514552, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[450, 1, 175.980805, 35.196161, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[451, 1, 75.197466, 15.039493, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[452, 1, 0, 0, 0, 0, 0, 0.999998, 0, 380.0, 0, 1.1, 0.9 ],
[453, 1, 50.396892, 10.079378, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[454, 1, 35.160196, 7.032039, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[455, 1, 57.325741, 11.465148, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[456, 1, 57.325741, 11.465148, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[457, 1, 175.803671, 35.160734, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[458, 1, 167.211459, 33.442292, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[459, 1, 203.502566, 40.700513, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[460, 1, 267.4443, 53.48886, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[461, 1, 278.200065, 55.640013, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[462, 1, 85.102842, 17.020568, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[463, 1, 43.607229, 8.721446, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[464, 1, 43.65994, 8.731988, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[465, 1, 70.522736, 14.104547, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[466, 1, 57.25554, 11.451108, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[467, 1, 52.837383, 10.567477, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[468, 1, 86.632423, 17.326485, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[469, 1, 53.684377, 10.736875, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[470, 1, 136.713504, 27.342701, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[471, 1, 134.606773, 26.921355, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[472, 1, 47.081392, 9.416278, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[473, 1, 86.452662, 17.290532, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[474, 1, 44.651896, 8.930379, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[475, 1, 43.819067, 8.763813, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[476, 1, 49.522755, 9.904551, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[477, 1, 79.919015, 15.983803, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[478, 1, 100.392849, 20.07857, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[479, 1, 181.934139, 36.386828, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[480, 1, 79.74503, 15.949006, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[481, 1, 69.254275, 13.850855, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[482, 1, 78.63525, 15.72705, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[483, 1, 66.873517, 13.374703, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[484, 1, 52.425584, 10.485117, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[485, 1, 78.309948, 15.66199, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[486, 1, 720.413269, 144.082654, 0, 0, 0, 0.999497, 0, 220.0, 0, 1.1, 0.9 ],
[487, 1, 182.549392, 36.509878, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[488, 1, 526.007446, 105.201489, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[489, 1, 138.443663, 27.688733, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[490, 1, 43.078505, 8.615701, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[491, 1, 59.233497, 11.846699, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[492, 1, 92.369333, 18.473867, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[493, 1, 119.053014, 23.810603, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[494, 1, 162.712809, 32.542562, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[495, 1, 128.084062, 25.616812, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[496, 1, 9.072408, 1.814482, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[497, 1, 1134.501766, 226.900353, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[498, 1, 53.20711, 10.641422, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[499, 1, 74.268408, 14.853682, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[500, 1, 40.66108, 8.132216, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[501, 1, 68.791536, 13.758307, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[502, 1, 271.505947, 54.301189, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[503, 1, 83.15168, 16.630336, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[504, 1, 54.451626, 10.890325, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[505, 1, 386.21318, 77.242636, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[506, 1, 121.227565, 24.245513, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[507, 1, 115.313069, 23.062614, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[508, 1, 167.639964, 33.527993, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[509, 1, 220.916222, 44.183244, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[510, 1, 139.565975, 27.913195, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[511, 1, 121.74417, 24.348834, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[512, 1, 80.419541, 16.083908, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[513, 1, 44.302585, 8.860517, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[514, 1, 110.264847, 22.052969, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[515, 1, 98.362795, 19.672559, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[516, 1, 110.044821, 22.008964, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[517, 1, 51.690687, 10.338137, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[518, 1, 291.125222, 58.225044, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[519, 1, 28.652052, 5.73041, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[520, 1, 115.679424, 23.135885, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[521, 1, 104.497803, 20.899561, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[522, 1, 89.471865, 17.894373, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[523, 1, 48.161687, 9.632337, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[524, 1, 139.788875, 27.957775, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[525, 1, 166.535898, 33.30718, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[526, 1, 50.490552, 10.09811, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[527, 1, 55.435078, 11.087016, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[528, 1, 120.992242, 24.198448, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[529, 1, 155.094137, 31.018827, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[530, 1, 65.722561, 13.144512, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[531, 1, 66.82248, 13.364496, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[532, 1, 64.137932, 12.827586, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[533, 1, 57.475326, 11.495065, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[534, 1, 158.549115, 31.709823, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[535, 1, 198.493317, 39.698663, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[536, 1, 156.455509, 31.291102, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[537, 1, 52.046301, 10.40926, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[538, 1, 38.906263, 7.781253, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[539, 1, 41.281938, 8.256388, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[540, 1, 37.172572, 7.434514, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[541, 1, 96.01996, 19.203992, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[542, 1, 131.901746, 26.380349, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[543, 1, 72.044085, 14.408817, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[544, 1, 134.183119, 26.836624, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[545, 1, 288.918262, 57.783652, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[546, 1, 144.810196, 28.962039, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[547, 1, 187.176694, 37.435339, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[548, 1, 60.58987, 12.117974, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[549, 1, 51.809519, 10.361904, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[550, 1, 42.751664, 8.550333, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[551, 1, 41.211573, 8.242315, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[552, 1, 204.65203, 40.930406, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[553, 1, 1.415875, 0.283175, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[554, 1, 207.333872, 41.466774, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[555, 1, 78.9965, 15.7993, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[556, 1, 122.210215, 24.442043, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[557, 1, 259.652741, 51.930548, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[558, 1, 153.106496, 30.621299, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[559, 1, 81.941122, 16.388224, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[560, 1, 128.011418, 25.602284, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[561, 1, 70.197725, 14.039545, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[562, 1, 191.774925, 38.354985, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[563, 1, 134.833402, 26.96668, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[564, 1, 266.228926, 53.245785, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[565, 1, 200.882917, 40.176583, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[566, 1, 0.322661, 0.064532, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[567, 1, 326.54419, 65.308838, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[568, 1, 301.974369, 60.394874, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[569, 1, 212.471287, 42.494257, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[570, 1, 331.705939, 66.341188, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[571, 1, 244.227154, 48.845431, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[572, 1, 430.775923, 86.155185, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[573, 1, 125.393223, 25.078645, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[574, 1, 238.921975, 47.784395, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[575, 1, 4.489772, 0.897954, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[576, 1, 290.527519, 58.105504, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[577, 1, 320.276302, 64.05526, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[578, 1, 305.789089, 61.157818, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[579, 1, 111.560205, 22.312041, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[580, 1, 23.225175, 4.645035, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[581, 1, 0.133454, 0.026691, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[582, 1, 84.028801, 16.80576, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[583, 1, 96.377946, 19.275589, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[584, 1, 55.29705, 11.05941, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[585, 1, 96.002482, 19.200496, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ]
])
ppc["gen"] = array([
[586, 272.0, 0, 9999, -9999, 1.0, 100, 1, 272.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[589, 63.1, 0, 9999, -9999, 1.0, 100, 1, 63.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[590, 38.0, 0, 9999, -9999, 1.0, 100, 1, 38.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[593, 11.1, 0, 9999, -9999, 1.0, 100, 1, 11.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[594, 19.0, 0, 9999, -9999, 1.0, 100, 1, 19.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[595, 1053.911309, 0, 9999, -9999, 1.0, 100, 1, 4730.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[597, 95.0, 0, 9999, -9999, 1.0, 100, 1, 95.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[598, 12.0, 0, 9999, -9999, 1.0, 100, 1, 12.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[599, 9.3, 0, 9999, -9999, 1.0, 100, 1, 9.3, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[600, 16.9, 0, 9999, -9999, 1.0, 100, 1, 16.9, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[601, 61.5, 0, 9999, -9999, 1.0, 100, 1, 61.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[602, 24.6, 0, 9999, -9999, 1.0, 100, 1, 24.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[603, 737.399503, 0, 9999, -9999, 1.0, 100, 1, 3455.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[607, 1800.0, 0, 9999, -9999, 1.0, 100, 1, 1800.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[608, 24.0, 0, 9999, -9999, 1.0, 100, 1, 24.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[609, 36.4, 0, 9999, -9999, 1.0, 100, 1, 36.4, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[610, 61.5, 0, 9999, -9999, 1.0, 100, 1, 61.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[612, 30.0, 0, 9999, -9999, 1.0, 100, 1, 30.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[613, 85.0, 0, 9999, -9999, 1.0, 100, 1, 85.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[614, 30.0, 0, 9999, -9999, 1.0, 100, 1, 30.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[616, 29.0, 0, 9999, -9999, 1.0, 100, 1, 29.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[617, 137.0, 0, 9999, -9999, 1.0, 100, 1, 137.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[618, 33.4, 0, 9999, -9999, 1.0, 100, 1, 33.4, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[619, 118.0, 0, 9999, -9999, 1.0, 100, 1, 118.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[621, 765.0, 0, 9999, -9999, 1.0, 100, 1, 765.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[623, 760.0, 0, 9999, -9999, 1.0, 100, 1, 760.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[624, 27.0, 0, 9999, -9999, 1.0, 100, 1, 27.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[628, 449.0, 0, 9999, -9999, 1.0, 100, 1, 449.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[629, 75.3, 0, 9999, -9999, 1.0, 100, 1, 75.3, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[631, 79.8, 0, 9999, -9999, 1.0, 100, 1, 79.8, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[632, 45.1, 0, 9999, -9999, 1.0, 100, 1, 45.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[637, 53.7, 0, 9999, -9999, 1.0, 100, 1, 53.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[638, 128.7, 0, 9999, -9999, 1.0, 100, 1, 128.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[639, 15.8, 0, 9999, -9999, 1.0, 100, 1, 15.8, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[640, 12.0, 0, 9999, -9999, 1.0, 100, 1, 12.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[641, 12.6, 0, 9999, -9999, 1.0, 100, 1, 12.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[642, 28.9, 0, 9999, -9999, 1.0, 100, 1, 28.9, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[643, 857.0, 0, 9999, -9999, 1.0, 100, 1, 857.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[646, 103.0, 0, 9999, -9999, 1.0, 100, 1, 103.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[647, 14.0, 0, 9999, -9999, 1.0, 100, 1, 14.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[650, 1324.5, 0, 9999, -9999, 1.0, 100, 1, 1324.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[652, 46.9, 0, 9999, -9999, 1.0, 100, 1, 46.9, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[655, 61.5, 0, 9999, -9999, 1.0, 100, 1, 61.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[657, 38.0, 0, 9999, -9999, 1.0, 100, 1, 38.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[658, 95.0, 0, 9999, -9999, 1.0, 100, 1, 95.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[661, 32.7, 0, 9999, -9999, 1.0, 100, 1, 32.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[662, 9.2, 0, 9999, -9999, 1.0, 100, 1, 9.2, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[663, 15.0, 0, 9999, -9999, 1.0, 100, 1, 15.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[666, 28.9, 0, 9999, -9999, 1.0, 100, 1, 28.9, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[668, 766.0, 0, 9999, -9999, 1.0, 100, 1, 766.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[670, 24.0, 0, 9999, -9999, 1.0, 100, 1, 24.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[672, 33.1, 0, 9999, -9999, 1.0, 100, 1, 33.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[675, 10.6, 0, 9999, -9999, 1.0, 100, 1, 10.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[676, 370.0, 0, 9999, -9999, 1.0, 100, 1, 370.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[677, 13.4, 0, 9999, -9999, 1.0, 100, 1, 13.4, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[678, 1017.0, 0, 9999, -9999, 1.0, 100, 1, 1017.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[679, 548.435805, 0, 9999, -9999, 1.0, 100, 1, 695.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[681, 40.1, 0, 9999, -9999, 1.0, 100, 1, 40.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[683, 27.5, 0, 9999, -9999, 1.0, 100, 1, 27.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[687, 1329.0, 0, 9999, -9999, 1.0, 100, 1, 1329.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[689, 310.0, 0, 9999, -9999, 1.0, 100, 1, 310.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[691, 26.0, 0, 9999, -9999, 1.0, 100, 1, 26.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[693, 194.0, 0, 9999, -9999, 1.0, 100, 1, 194.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[694, 16.4, 0, 9999, -9999, 1.0, 100, 1, 16.4, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[695, 14.7, 0, 9999, -9999, 1.0, 100, 1, 14.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[696, 721.0, 0, 9999, -9999, 1.0, 100, 1, 721.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[697, 11.6, 0, 9999, -9999, 1.0, 100, 1, 11.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[698, 24.0, 0, 9999, -9999, 1.0, 100, 1, 24.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[699, 104.6, 0, 9999, -9999, 1.0, 100, 1, 104.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[700, 27.0, 0, 9999, -9999, 1.0, 100, 1, 27.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[701, 47.2, 0, 9999, -9999, 1.0, 100, 1, 47.2, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[702, 73.4, 0, 9999, -9999, 1.0, 100, 1, 73.4, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[704, 508.0, 0, 9999, -9999, 1.0, 100, 1, 508.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[705, 17.0, 0, 9999, -9999, 1.0, 100, 1, 17.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[707, 34.0, 0, 9999, -9999, 1.0, 100, 1, 34.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[708, 7.8, 0, 9999, -9999, 1.0, 100, 1, 7.8, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[711, 100.393639, 0, 9999, -9999, 1.0, 100, 1, 176.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[713, 13.4, 0, 9999, -9999, 1.0, 100, 1, 13.4, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[714, 15.0, 0, 9999, -9999, 1.0, 100, 1, 15.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[716, 0.1, 0, 9999, -9999, 1.0, 100, 1, 0.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[717, 11.0, 0, 9999, -9999, 1.0, 100, 1, 11.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[719, 1356.941759, 0, 9999, -9999, 1.0, 100, 1, 1958.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[721, 4.0, 0, 9999, -9999, 1.0, 100, 1, 4.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[722, 20.7, 0, 9999, -9999, 1.0, 100, 1, 20.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[723, 19.7, 0, 9999, -9999, 1.0, 100, 1, 19.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[724, 12.1, 0, 9999, -9999, 1.0, 100, 1, 12.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[725, 800.0, 0, 9999, -9999, 1.0, 100, 1, 800.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[726, 126.0, 0, 9999, -9999, 1.0, 100, 1, 126.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[727, 61.5, 0, 9999, -9999, 1.0, 100, 1, 61.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[728, 510.0, 0, 9999, -9999, 1.0, 100, 1, 510.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[730, 633.2, 0, 9999, -9999, 1.0, 100, 1, 633.2, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[731, 729.355955, 0, 9999, -9999, 1.0, 100, 1, 895.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[732, 14.6, 0, 9999, -9999, 1.0, 100, 1, 14.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[733, 396.6, 0, 9999, -9999, 1.0, 100, 1, 396.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[735, 84.8, 0, 9999, -9999, 1.0, 100, 1, 84.8, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[736, 32.0, 0, 9999, -9999, 1.0, 100, 1, 32.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[737, 28.0, 0, 9999, -9999, 1.0, 100, 1, 28.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[738, 138.5, 0, 9999, -9999, 1.0, 100, 1, 138.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[739, 59.9, 0, 9999, -9999, 1.0, 100, 1, 59.9, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[741, 214.0, 0, 9999, -9999, 1.0, 100, 1, 214.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[742, 9.0, 0, 9999, -9999, 1.0, 100, 1, 9.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[743, 233.320633, 0, 9999, -9999, 1.0, 100, 1, 1410.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[745, 42.0, 0, 9999, -9999, 1.0, 100, 1, 42.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[746, 100.0, 0, 9999, -9999, 1.0, 100, 1, 100.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[747, 12.5, 0, 9999, -9999, 1.0, 100, 1, 12.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[748, 110.0, 0, 9999, -9999, 1.0, 100, 1, 110.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[749, 16.0, 0, 9999, -9999, 1.0, 100, 1, 16.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[750, 90.8, 0, 9999, -9999, 1.0, 100, 1, 90.8, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[753, 104.904813, 0, 9999, -9999, 1.0, 100, 1, 311.8, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[758, 18.5, 0, 9999, -9999, 1.0, 100, 1, 18.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[760, 337.568547, 0, 9999, -9999, 1.0, 100, 1, 794.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[761, 15.7, 0, 9999, -9999, 1.0, 100, 1, 15.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[762, 1105.0, 0, 9999, -9999, 1.0, 100, 1, 1105.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[763, 20.3, 0, 9999, -9999, 1.0, 100, 1, 20.3, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[765, 59.0, 0, 9999, -9999, 1.0, 100, 1, 59.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[767, 11.2, 0, 9999, -9999, 1.0, 100, 1, 11.2, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[769, 43.3, 0, 9999, -9999, 1.0, 100, 1, 43.3, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[771, 690.0, 0, 9999, -9999, 1.0, 100, 1, 690.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[772, 18.8, 0, 9999, -9999, 1.0, 100, 1, 18.8, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[774, 33.5, 0, 9999, -9999, 1.0, 100, 1, 33.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[775, 35.331039, 0, 9999, -9999, 1.0, 100, 1, 128.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[776, 56.0, 0, 9999, -9999, 1.0, 100, 1, 56.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[777, 79.0, 0, 9999, -9999, 1.0, 100, 1, 79.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[778, 14.7, 0, 9999, -9999, 1.0, 100, 1, 14.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[781, 985.008953, 0, 9999, -9999, 1.0, 100, 1, 1310.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[784, 812.140567, 0, 9999, -9999, 1.0, 100, 1, 1275.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[785, 3.0, 0, 9999, -9999, 1.0, 100, 1, 3.0, 0.0, 0, 0, 0, 0, 0, 0, 0, | |
<gh_stars>0
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Some portions of this file Copyright (c) 2017, Maxpoint
# and licensed under the BSD license.
#
from bigdl.util.common import *
from zoo.common.utils import callZooFunc
from zoo.util.utils import set_python_home
import warnings
import multiprocessing
import os
import threading
import sys
def init_spark_on_local(cores=2, conf=None, python_location=None, spark_log_level="WARN",
redirect_spark_log=True):
"""Saves the Trainer state to the provided checkpoint path.
Args:
checkpoint (str): Path to target checkpoint file.
"""
from zoo.util.spark import SparkRunner
runner = SparkRunner(spark_log_level=spark_log_level,
redirect_spark_log=redirect_spark_log)
set_python_home()
return runner.init_spark_on_local(cores=cores, conf=conf,
python_location=python_location)
def init_spark_on_yarn(hadoop_conf,
conda_name,
num_executors,
executor_cores,
executor_memory="2g",
driver_cores=4,
driver_memory="1g",
extra_executor_memory_for_ray=None,
extra_python_lib=None,
penv_archive=None,
additional_archive=None,
hadoop_user_name="root",
spark_yarn_archive=None,
spark_log_level="WARN",
redirect_spark_log=True,
jars=None,
conf=None):
"""Returns the local TrainingOperator object.
Be careful not to perturb its state, or else you can cause the system
to enter an inconsistent state.
Returns:
TrainingOperator: The local TrainingOperator object.
"""
from zoo.util.spark import SparkRunner
runner = SparkRunner(spark_log_level=spark_log_level,
redirect_spark_log=redirect_spark_log)
set_python_home()
sc = runner.init_spark_on_yarn(
hadoop_conf=hadoop_conf,
conda_name=conda_name,
num_executors=num_executors,
executor_cores=executor_cores,
executor_memory=executor_memory,
driver_cores=driver_cores,
driver_memory=driver_memory,
extra_executor_memory_for_ray=extra_executor_memory_for_ray,
extra_python_lib=extra_python_lib,
penv_archive=penv_archive,
additional_archive=additional_archive,
hadoop_user_name=hadoop_user_name,
spark_yarn_archive=spark_yarn_archive,
jars=jars,
conf=conf)
return sc
def init_spark_standalone(num_executors,
executor_cores,
executor_memory="2g",
driver_cores=4,
driver_memory="1g",
master=None,
extra_executor_memory_for_ray=None,
extra_python_lib=None,
spark_log_level="WARN",
redirect_spark_log=True,
conf=None,
jars=None,
python_location=None,
enable_numa_binding=False):
"""Returns the local TrainingOperator object.
Be careful not to perturb its state, or else you can cause the system
to enter an inconsistent state.
Returns:
TrainingOperator: The local TrainingOperator object.
"""
from zoo.util.spark import SparkRunner
runner = SparkRunner(spark_log_level=spark_log_level,
redirect_spark_log=redirect_spark_log)
set_python_home()
sc = runner.init_spark_standalone(
num_executors=num_executors,
executor_cores=executor_cores,
executor_memory=executor_memory,
driver_cores=driver_cores,
driver_memory=driver_memory,
master=master,
extra_executor_memory_for_ray=extra_executor_memory_for_ray,
extra_python_lib=extra_python_lib,
conf=conf,
jars=jars,
python_location=python_location,
enable_numa_binding=enable_numa_binding)
return sc
def init_spark_on_k8s(master,
container_image,
num_executors,
executor_cores,
executor_memory="2g",
driver_memory="1g",
driver_cores=4,
extra_executor_memory_for_ray=None,
extra_python_lib=None,
spark_log_level="WARN",
redirect_spark_log=True,
jars=None,
conf=None,
python_location=None):
"""Returns the local TrainingOperator object.
Be careful not to perturb its state, or else you can cause the system
to enter an inconsistent state.
Args:
num_steps (int): Number of batches to compute update steps on
per worker. This corresponds also to the number of times
``TrainingOperator.validate_batch`` is called per worker.
profile (bool): Returns time stats for the evaluation procedure.
reduce_results (bool): Whether to average all metrics across
all workers into one dict. If a metric is a non-numerical
value (or nested dictionaries), one value will be randomly
selected among the workers. If False, returns a list of dicts.
info (dict): Optional dictionary passed to the training
operator for `validate` and `validate_batch`.
Returns:
TrainingOperator: The local TrainingOperator object.
"""
from zoo.util.spark import SparkRunner
runner = SparkRunner(spark_log_level=spark_log_level,
redirect_spark_log=redirect_spark_log)
sc = runner.init_spark_on_k8s(
master=master,
container_image=container_image,
num_executors=num_executors,
executor_cores=executor_cores,
executor_memory=executor_memory,
driver_memory=driver_memory,
driver_cores=driver_cores,
extra_executor_memory_for_ray=extra_executor_memory_for_ray,
extra_python_lib=extra_python_lib,
jars=jars,
conf=conf,
python_location=python_location)
return sc
def stop_spark_standalone():
"""
Stop the Spark standalone cluster created from init_spark_standalone (master not specified).
"""
from zoo.util.spark import SparkRunner
SparkRunner.stop_spark_standalone()
class ZooContextMeta(type):
_log_output = False
"""Train a PyTorch model using distributed PyTorch.
Launches a set of actors which connect via distributed PyTorch and
coordinate gradient updates to train the provided model. If Ray is not
initialized, TorchTrainer will automatically initialize a local Ray
cluster for you. Be sure to run `ray.init(address="auto")` to leverage
multi-node training.
.. code-block:: python
class MyTrainingOperator(TrainingOperator):
def setup(self, config):
model = nn.Linear(1, 1)
optimizer = torch.optim.SGD(
model.parameters(), lr=config.get("lr", 1e-4))
loss = torch.nn.MSELoss()
batch_size = config["batch_size"]
train_data, val_data = LinearDataset(2, 5), LinearDataset(2, 5)
train_loader = DataLoader(train_data, batch_size=batch_size)
val_loader = DataLoader(val_data, batch_size=batch_size)
self.model, self.optimizer = self.register(
models=model,
optimizers=optimizer,
criterion=loss)
self.register_data(
train_loader=train_loader,
validation_loader=val_loader)
trainer = TorchTrainer(
training_operator_cls=MyTrainingOperator,
config={"batch_size": 32},
use_gpu=True
)
for i in range(4):
trainer.train()
Args:
training_operator_cls (type): Custom training operator class
that subclasses the TrainingOperator class. This class
will be copied onto all remote workers and used to specify
training components and custom training and validation operations.
initialization_hook (function): A function to call on all training
workers when they are first initialized. This could be useful to
set environment variables for all the worker processes.
config (dict): Custom configuration value to be passed to
all operator constructors.
num_workers (int): the number of workers used in distributed
training. If 1, the worker will not be wrapped with
DistributedDataParallel. TorchTrainer will scale down the number
of workers if enough resources are not available, and will scale
back up once they are. The total number of
workers will never exceed `num_workers` amount.
"""
@property
def log_output(cls):
"""
Whether to redirect Spark driver JVM's stdout and stderr to the current
python process. This is useful when running Analytics Zoo in jupyter notebook.
Default to be False. Needs to be set before initializing SparkContext.
"""
return cls._log_output
@log_output.setter
def log_output(cls, value):
if SparkContext._active_spark_context is not None:
raise AttributeError("log_output cannot be set after SparkContext is created."
" Please set it before init_nncontext, init_spark_on_local"
"or init_spark_on_yarn")
assert isinstance(value, bool), "log_output should either be True or False"
cls._log_output = value
class ZooContext(metaclass=ZooContextMeta):
"""Train a PyTorch model using distributed PyTorch.
Launches a set of actors which connect via distributed PyTorch and
coordinate gradient updates to train the provided model. If Ray is not
initialized, TorchTrainer will automatically initialize a local Ray
cluster for you. Be sure to run `ray.init(address="auto")` to leverage
multi-node training.
Args:
training_operator_cls (type): Custom training operator class
that subclasses the TrainingOperator class. This class
will be copied onto all remote workers and used to specify
training components and custom training and validation operations.
initialization_hook (function): A function to call on all training
workers when they are first initialized. This could be useful to
set environment variables for all the worker processes.
config (dict): Custom configuration value to be passed to
all operator constructors.
num_workers (int): the number of workers used in distributed
training. If 1, the worker will not be wrapped with
DistributedDataParallel. TorchTrainer will scale down the number
of workers if enough resources are not available, and will scale
back up once they are. The total number of
workers will never exceed `num_workers` amount.
"""
pass
# The following function copied from
# https://github.com/Valassis-Digital-Media/spylon-kernel/blob/master/
# spylon_kernel/scala_interpreter.py
def _read_stream(fd, fn):
"""Reads bytes from a file descriptor, utf-8 decodes them, and passes them
to the provided callback function on the next IOLoop tick.
Assumes fd.read will block and should be used in a thread.
"""
while True:
# Specify a max read size so the read doesn't block indefinitely
# Using a value less than the typical default max pipe size
# and greater than a single system page.
buff = fd.read(8192)
if buff:
fn(buff.decode('utf-8'))
def init_nncontext(conf=None, spark_log_level="WARN", redirect_spark_log=True):
# The following code copied and modified from
# https://github.com/Valassis-Digital-Media/spylon-kernel/blob/master/
# spylon_kernel/scala_interpreter.py
if ZooContext.log_output:
import subprocess
import pyspark.java_gateway
spark_jvm_proc = None
"""Returns the local TrainingOperator object.
Be careful not to perturb its state, or else you can cause the system
to enter an inconsistent state.
Returns:
TrainingOperator: The local TrainingOperator object.
"""
def Popen(*args, **kwargs):
"""Wraps subprocess.Popen to force stdout and stderr from the child process
to pipe to this process without buffering.
"""
nonlocal spark_jvm_proc
# Override these in kwargs to avoid duplicate value errors
# Set streams to unbuffered so that we read whatever bytes are available
# when ready, https://docs.python.org/3.6/library/subprocess.html#popen-constructor
kwargs['bufsize'] = 0
# Capture everything from stdout for display in the notebook
kwargs['stdout'] = subprocess.PIPE
# Optionally capture stderr, otherwise it'll go to the kernel log
kwargs['stderr'] = subprocess.PIPE
spark_jvm_proc = subprocess.Popen(*args, **kwargs)
return spark_jvm_proc
pyspark.java_gateway.Popen = Popen
if isinstance(conf, six.string_types):
sc = getOrCreateSparkContext(conf=None, appName=conf)
else:
sc = getOrCreateSparkContext(conf=conf)
sc.setLogLevel(spark_log_level)
if ZooContext.log_output:
if spark_jvm_proc.stdout is not None:
stdout_reader = threading.Thread(target=_read_stream,
daemon=True,
kwargs=dict(
fd=spark_jvm_proc.stdout,
fn=sys.stdout.write))
stdout_reader.start()
if spark_jvm_proc.stderr is not None:
stderr_reader = threading.Thread(target=_read_stream,
daemon=True,
kwargs=dict(
fd=spark_jvm_proc.stderr,
fn=sys.stderr.write))
stderr_reader.start()
check_version()
if redirect_spark_log:
redire_spark_logs()
show_bigdl_info_logs()
init_engine()
set_python_home()
return sc
def getOrCreateSparkContext(conf=None, appName=None):
"""
Get the current active SparkContext or create a new SparkContext.
:param conf: An instance of SparkConf. | |
memory_mi_b(self) -> Optional[pulumi.Input['EC2FleetMemoryMiBRequestArgs']]:
return pulumi.get(self, "memory_mi_b")
@memory_mi_b.setter
def memory_mi_b(self, value: Optional[pulumi.Input['EC2FleetMemoryMiBRequestArgs']]):
pulumi.set(self, "memory_mi_b", value)
@property
@pulumi.getter(name="networkInterfaceCount")
def network_interface_count(self) -> Optional[pulumi.Input['EC2FleetNetworkInterfaceCountRequestArgs']]:
return pulumi.get(self, "network_interface_count")
@network_interface_count.setter
def network_interface_count(self, value: Optional[pulumi.Input['EC2FleetNetworkInterfaceCountRequestArgs']]):
pulumi.set(self, "network_interface_count", value)
@property
@pulumi.getter(name="onDemandMaxPricePercentageOverLowestPrice")
def on_demand_max_price_percentage_over_lowest_price(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "on_demand_max_price_percentage_over_lowest_price")
@on_demand_max_price_percentage_over_lowest_price.setter
def on_demand_max_price_percentage_over_lowest_price(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "on_demand_max_price_percentage_over_lowest_price", value)
@property
@pulumi.getter(name="requireHibernateSupport")
def require_hibernate_support(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "require_hibernate_support")
@require_hibernate_support.setter
def require_hibernate_support(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "require_hibernate_support", value)
@property
@pulumi.getter(name="spotMaxPricePercentageOverLowestPrice")
def spot_max_price_percentage_over_lowest_price(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "spot_max_price_percentage_over_lowest_price")
@spot_max_price_percentage_over_lowest_price.setter
def spot_max_price_percentage_over_lowest_price(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "spot_max_price_percentage_over_lowest_price", value)
@property
@pulumi.getter(name="totalLocalStorageGB")
def total_local_storage_gb(self) -> Optional[pulumi.Input['EC2FleetTotalLocalStorageGBRequestArgs']]:
return pulumi.get(self, "total_local_storage_gb")
@total_local_storage_gb.setter
def total_local_storage_gb(self, value: Optional[pulumi.Input['EC2FleetTotalLocalStorageGBRequestArgs']]):
pulumi.set(self, "total_local_storage_gb", value)
@property
@pulumi.getter(name="vCpuCount")
def v_cpu_count(self) -> Optional[pulumi.Input['EC2FleetVCpuCountRangeRequestArgs']]:
return pulumi.get(self, "v_cpu_count")
@v_cpu_count.setter
def v_cpu_count(self, value: Optional[pulumi.Input['EC2FleetVCpuCountRangeRequestArgs']]):
pulumi.set(self, "v_cpu_count", value)
@pulumi.input_type
class EC2FleetMaintenanceStrategiesArgs:
def __init__(__self__, *,
capacity_rebalance: Optional[pulumi.Input['EC2FleetCapacityRebalanceArgs']] = None):
if capacity_rebalance is not None:
pulumi.set(__self__, "capacity_rebalance", capacity_rebalance)
@property
@pulumi.getter(name="capacityRebalance")
def capacity_rebalance(self) -> Optional[pulumi.Input['EC2FleetCapacityRebalanceArgs']]:
return pulumi.get(self, "capacity_rebalance")
@capacity_rebalance.setter
def capacity_rebalance(self, value: Optional[pulumi.Input['EC2FleetCapacityRebalanceArgs']]):
pulumi.set(self, "capacity_rebalance", value)
@pulumi.input_type
class EC2FleetMemoryGiBPerVCpuRequestArgs:
def __init__(__self__, *,
max: Optional[pulumi.Input[float]] = None,
min: Optional[pulumi.Input[float]] = None):
if max is not None:
pulumi.set(__self__, "max", max)
if min is not None:
pulumi.set(__self__, "min", min)
@property
@pulumi.getter
def max(self) -> Optional[pulumi.Input[float]]:
return pulumi.get(self, "max")
@max.setter
def max(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "max", value)
@property
@pulumi.getter
def min(self) -> Optional[pulumi.Input[float]]:
return pulumi.get(self, "min")
@min.setter
def min(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "min", value)
@pulumi.input_type
class EC2FleetMemoryMiBRequestArgs:
def __init__(__self__, *,
max: Optional[pulumi.Input[int]] = None,
min: Optional[pulumi.Input[int]] = None):
if max is not None:
pulumi.set(__self__, "max", max)
if min is not None:
pulumi.set(__self__, "min", min)
@property
@pulumi.getter
def max(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max")
@max.setter
def max(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max", value)
@property
@pulumi.getter
def min(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "min")
@min.setter
def min(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min", value)
@pulumi.input_type
class EC2FleetNetworkInterfaceCountRequestArgs:
def __init__(__self__, *,
max: Optional[pulumi.Input[int]] = None,
min: Optional[pulumi.Input[int]] = None):
if max is not None:
pulumi.set(__self__, "max", max)
if min is not None:
pulumi.set(__self__, "min", min)
@property
@pulumi.getter
def max(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max")
@max.setter
def max(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max", value)
@property
@pulumi.getter
def min(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "min")
@min.setter
def min(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min", value)
@pulumi.input_type
class EC2FleetOnDemandOptionsRequestArgs:
def __init__(__self__, *,
allocation_strategy: Optional[pulumi.Input[str]] = None,
capacity_reservation_options: Optional[pulumi.Input['EC2FleetCapacityReservationOptionsRequestArgs']] = None,
max_total_price: Optional[pulumi.Input[str]] = None,
min_target_capacity: Optional[pulumi.Input[int]] = None,
single_availability_zone: Optional[pulumi.Input[bool]] = None,
single_instance_type: Optional[pulumi.Input[bool]] = None):
if allocation_strategy is not None:
pulumi.set(__self__, "allocation_strategy", allocation_strategy)
if capacity_reservation_options is not None:
pulumi.set(__self__, "capacity_reservation_options", capacity_reservation_options)
if max_total_price is not None:
pulumi.set(__self__, "max_total_price", max_total_price)
if min_target_capacity is not None:
pulumi.set(__self__, "min_target_capacity", min_target_capacity)
if single_availability_zone is not None:
pulumi.set(__self__, "single_availability_zone", single_availability_zone)
if single_instance_type is not None:
pulumi.set(__self__, "single_instance_type", single_instance_type)
@property
@pulumi.getter(name="allocationStrategy")
def allocation_strategy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "allocation_strategy")
@allocation_strategy.setter
def allocation_strategy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "allocation_strategy", value)
@property
@pulumi.getter(name="capacityReservationOptions")
def capacity_reservation_options(self) -> Optional[pulumi.Input['EC2FleetCapacityReservationOptionsRequestArgs']]:
return pulumi.get(self, "capacity_reservation_options")
@capacity_reservation_options.setter
def capacity_reservation_options(self, value: Optional[pulumi.Input['EC2FleetCapacityReservationOptionsRequestArgs']]):
pulumi.set(self, "capacity_reservation_options", value)
@property
@pulumi.getter(name="maxTotalPrice")
def max_total_price(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "max_total_price")
@max_total_price.setter
def max_total_price(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_total_price", value)
@property
@pulumi.getter(name="minTargetCapacity")
def min_target_capacity(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "min_target_capacity")
@min_target_capacity.setter
def min_target_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_target_capacity", value)
@property
@pulumi.getter(name="singleAvailabilityZone")
def single_availability_zone(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "single_availability_zone")
@single_availability_zone.setter
def single_availability_zone(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "single_availability_zone", value)
@property
@pulumi.getter(name="singleInstanceType")
def single_instance_type(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "single_instance_type")
@single_instance_type.setter
def single_instance_type(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "single_instance_type", value)
@pulumi.input_type
class EC2FleetPlacementArgs:
def __init__(__self__, *,
affinity: Optional[pulumi.Input[str]] = None,
availability_zone: Optional[pulumi.Input[str]] = None,
group_name: Optional[pulumi.Input[str]] = None,
host_id: Optional[pulumi.Input[str]] = None,
host_resource_group_arn: Optional[pulumi.Input[str]] = None,
partition_number: Optional[pulumi.Input[int]] = None,
spread_domain: Optional[pulumi.Input[str]] = None,
tenancy: Optional[pulumi.Input[str]] = None):
if affinity is not None:
pulumi.set(__self__, "affinity", affinity)
if availability_zone is not None:
pulumi.set(__self__, "availability_zone", availability_zone)
if group_name is not None:
pulumi.set(__self__, "group_name", group_name)
if host_id is not None:
pulumi.set(__self__, "host_id", host_id)
if host_resource_group_arn is not None:
pulumi.set(__self__, "host_resource_group_arn", host_resource_group_arn)
if partition_number is not None:
pulumi.set(__self__, "partition_number", partition_number)
if spread_domain is not None:
pulumi.set(__self__, "spread_domain", spread_domain)
if tenancy is not None:
pulumi.set(__self__, "tenancy", tenancy)
@property
@pulumi.getter
def affinity(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "affinity")
@affinity.setter
def affinity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "affinity", value)
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "availability_zone")
@availability_zone.setter
def availability_zone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "availability_zone", value)
@property
@pulumi.getter(name="groupName")
def group_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "group_name")
@group_name.setter
def group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_name", value)
@property
@pulumi.getter(name="hostId")
def host_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "host_id")
@host_id.setter
def host_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_id", value)
@property
@pulumi.getter(name="hostResourceGroupArn")
def host_resource_group_arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "host_resource_group_arn")
@host_resource_group_arn.setter
def host_resource_group_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_resource_group_arn", value)
@property
@pulumi.getter(name="partitionNumber")
def partition_number(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "partition_number")
@partition_number.setter
def partition_number(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "partition_number", value)
@property
@pulumi.getter(name="spreadDomain")
def spread_domain(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "spread_domain")
@spread_domain.setter
def spread_domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "spread_domain", value)
@property
@pulumi.getter
def tenancy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "tenancy")
@tenancy.setter
def tenancy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenancy", value)
@pulumi.input_type
class EC2FleetSpotOptionsRequestArgs:
def __init__(__self__, *,
allocation_strategy: Optional[pulumi.Input['EC2FleetSpotOptionsRequestAllocationStrategy']] = None,
instance_interruption_behavior: Optional[pulumi.Input['EC2FleetSpotOptionsRequestInstanceInterruptionBehavior']] = None,
instance_pools_to_use_count: Optional[pulumi.Input[int]] = None,
maintenance_strategies: Optional[pulumi.Input['EC2FleetMaintenanceStrategiesArgs']] = None,
max_total_price: Optional[pulumi.Input[str]] = None,
min_target_capacity: Optional[pulumi.Input[int]] = None,
single_availability_zone: Optional[pulumi.Input[bool]] = None,
single_instance_type: Optional[pulumi.Input[bool]] = None):
if allocation_strategy is not None:
pulumi.set(__self__, "allocation_strategy", allocation_strategy)
if instance_interruption_behavior is not None:
pulumi.set(__self__, "instance_interruption_behavior", instance_interruption_behavior)
if instance_pools_to_use_count is not None:
pulumi.set(__self__, "instance_pools_to_use_count", instance_pools_to_use_count)
if maintenance_strategies is not None:
pulumi.set(__self__, "maintenance_strategies", maintenance_strategies)
if max_total_price is not None:
pulumi.set(__self__, "max_total_price", max_total_price)
if min_target_capacity is not None:
pulumi.set(__self__, "min_target_capacity", min_target_capacity)
if single_availability_zone is not None:
pulumi.set(__self__, "single_availability_zone", single_availability_zone)
if single_instance_type is not None:
pulumi.set(__self__, "single_instance_type", single_instance_type)
@property
@pulumi.getter(name="allocationStrategy")
def allocation_strategy(self) -> Optional[pulumi.Input['EC2FleetSpotOptionsRequestAllocationStrategy']]:
return pulumi.get(self, "allocation_strategy")
@allocation_strategy.setter
def allocation_strategy(self, value: Optional[pulumi.Input['EC2FleetSpotOptionsRequestAllocationStrategy']]):
pulumi.set(self, "allocation_strategy", value)
@property
@pulumi.getter(name="instanceInterruptionBehavior")
def instance_interruption_behavior(self) -> Optional[pulumi.Input['EC2FleetSpotOptionsRequestInstanceInterruptionBehavior']]:
return pulumi.get(self, "instance_interruption_behavior")
@instance_interruption_behavior.setter
def instance_interruption_behavior(self, value: Optional[pulumi.Input['EC2FleetSpotOptionsRequestInstanceInterruptionBehavior']]):
pulumi.set(self, "instance_interruption_behavior", value)
@property
@pulumi.getter(name="instancePoolsToUseCount")
def instance_pools_to_use_count(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "instance_pools_to_use_count")
@instance_pools_to_use_count.setter
def instance_pools_to_use_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "instance_pools_to_use_count", value)
@property
@pulumi.getter(name="maintenanceStrategies")
def maintenance_strategies(self) -> Optional[pulumi.Input['EC2FleetMaintenanceStrategiesArgs']]:
return pulumi.get(self, "maintenance_strategies")
@maintenance_strategies.setter
def maintenance_strategies(self, value: Optional[pulumi.Input['EC2FleetMaintenanceStrategiesArgs']]):
pulumi.set(self, "maintenance_strategies", value)
@property
@pulumi.getter(name="maxTotalPrice")
def max_total_price(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "max_total_price")
@max_total_price.setter
def max_total_price(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_total_price", value)
@property
@pulumi.getter(name="minTargetCapacity")
def min_target_capacity(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "min_target_capacity")
@min_target_capacity.setter
def min_target_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_target_capacity", value)
@property
@pulumi.getter(name="singleAvailabilityZone")
def single_availability_zone(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "single_availability_zone")
@single_availability_zone.setter
def single_availability_zone(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "single_availability_zone", value)
@property
@pulumi.getter(name="singleInstanceType")
def single_instance_type(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "single_instance_type")
@single_instance_type.setter
def single_instance_type(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "single_instance_type", value)
@pulumi.input_type
class EC2FleetTagSpecificationArgs:
def __init__(__self__, *,
resource_type: Optional[pulumi.Input['EC2FleetTagSpecificationResourceType']] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['EC2FleetTagArgs']]]] = None):
if resource_type is not None:
pulumi.set(__self__, "resource_type", resource_type)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceType")
def resource_type(self) -> Optional[pulumi.Input['EC2FleetTagSpecificationResourceType']]:
return pulumi.get(self, "resource_type")
@resource_type.setter
def resource_type(self, value: Optional[pulumi.Input['EC2FleetTagSpecificationResourceType']]):
pulumi.set(self, "resource_type", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EC2FleetTagArgs']]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EC2FleetTagArgs']]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class EC2FleetTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class EC2FleetTargetCapacitySpecificationRequestArgs:
def __init__(__self__, *,
total_target_capacity: pulumi.Input[int],
default_target_capacity_type: Optional[pulumi.Input['EC2FleetTargetCapacitySpecificationRequestDefaultTargetCapacityType']] = None,
on_demand_target_capacity: Optional[pulumi.Input[int]] = None,
spot_target_capacity: Optional[pulumi.Input[int]] = None,
target_capacity_unit_type: Optional[pulumi.Input['EC2FleetTargetCapacitySpecificationRequestTargetCapacityUnitType']] = None):
pulumi.set(__self__, "total_target_capacity", total_target_capacity)
if default_target_capacity_type is not None:
pulumi.set(__self__, "default_target_capacity_type", default_target_capacity_type)
if on_demand_target_capacity is not None:
pulumi.set(__self__, "on_demand_target_capacity", on_demand_target_capacity)
if spot_target_capacity is not None:
pulumi.set(__self__, "spot_target_capacity", spot_target_capacity)
if target_capacity_unit_type is not None:
pulumi.set(__self__, "target_capacity_unit_type", target_capacity_unit_type)
@property
@pulumi.getter(name="totalTargetCapacity")
def total_target_capacity(self) -> pulumi.Input[int]:
return pulumi.get(self, "total_target_capacity")
@total_target_capacity.setter
def total_target_capacity(self, value: pulumi.Input[int]):
pulumi.set(self, "total_target_capacity", value)
@property
@pulumi.getter(name="defaultTargetCapacityType")
def default_target_capacity_type(self) -> Optional[pulumi.Input['EC2FleetTargetCapacitySpecificationRequestDefaultTargetCapacityType']]:
return pulumi.get(self, "default_target_capacity_type")
@default_target_capacity_type.setter
def default_target_capacity_type(self, value: Optional[pulumi.Input['EC2FleetTargetCapacitySpecificationRequestDefaultTargetCapacityType']]):
pulumi.set(self, "default_target_capacity_type", value)
@property
@pulumi.getter(name="onDemandTargetCapacity")
def on_demand_target_capacity(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "on_demand_target_capacity")
@on_demand_target_capacity.setter
def on_demand_target_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "on_demand_target_capacity", value)
@property
@pulumi.getter(name="spotTargetCapacity")
def spot_target_capacity(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "spot_target_capacity")
@spot_target_capacity.setter
def spot_target_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "spot_target_capacity", value)
@property
@pulumi.getter(name="targetCapacityUnitType")
def target_capacity_unit_type(self) -> Optional[pulumi.Input['EC2FleetTargetCapacitySpecificationRequestTargetCapacityUnitType']]:
return pulumi.get(self, "target_capacity_unit_type")
@target_capacity_unit_type.setter
def target_capacity_unit_type(self, value: Optional[pulumi.Input['EC2FleetTargetCapacitySpecificationRequestTargetCapacityUnitType']]):
pulumi.set(self, "target_capacity_unit_type", value)
@pulumi.input_type
class EC2FleetTotalLocalStorageGBRequestArgs:
def __init__(__self__, *,
max: Optional[pulumi.Input[float]] = None,
min: Optional[pulumi.Input[float]] = None):
if max is not None:
pulumi.set(__self__, "max", max)
if min is not None:
pulumi.set(__self__, "min", min)
@property
@pulumi.getter
def max(self) -> Optional[pulumi.Input[float]]:
return pulumi.get(self, "max")
@max.setter
def max(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, | |
# -*- coding: utf-8 -*-
# Copyright (c) 2021 <NAME>, zju-ufhb
# This module is part of the WATex core package, which is released under a
# MIT- licence.
"""
.. Synopsis: Module features collects geo-electricals features computed from
:class:`watex.core.erp.ERP` and :class:`watex.core.ves.VES` and keeps
on a new dataFrame for analysis and modeling.
Created on Mon Jul 5 16:51:14 2021
@author: @Daniel03
"""
from __future__ import print_function
import os
import re
import pandas as pd
import json
import numpy as np
import pandas as pd
import xml.etree.ElementTree as ET
from .erp import ERP_collection
from .ves import VES_collection
from .geology import Geology, Borehole
from ..utils.__init__ import savepath as savePath
from ..utils.decorator import writef
import watex.utils.exceptions as Wex
import watex.utils.gis_tools as gisf
# import watex.utils.tricks as wfunc
import watex.utils.gis_tools as gis
from watex.utils._watexlog import watexlog
__docformat__='restructuredtext'
class GeoFeatures:
"""
Features class. Deals with Electrical Resistivity profile (VES),
Vertical electrical Sounding (VES), Geological (Geol) data and
Borehole data(Boreh). Set all features values of differents
investigation sites. Features class is composed of ::
- `erp` class get from :class:`watex.core.erp.ERP_colection`
- `ves` collected from :class:`watex.core.ves.VES_collection
- `geol` obtained from :class:`watex.core.geol.Geology`
- `boreh ` get from :class:`watex.core.boreh.Borehole`
Arguments:
-----------
*features_fn* :str , Path_like
File to geoelectical features files
*ErpColObjs*: object
Collection object from `erp` survey lines.
*vesObjs*: object,
Collection object from vertical electrical sounding (VES)
curves.
*geoObjs*: object,
Collection object from `geol` class.
see :doc:`watex.core.geol.Geology`
*boreholeObjs*: object
Collection of boreholes of all investigation sites.
Refer to :doc:`watex.core.boreh.Borehole`
:Note:
Be sure to not miss any coordinates files. Indeed,
each selected anomaly should have a borehole performed at
that place for supervising learing. That means, each selected
anomaly referenced by location coordinates and `id` on `erp` must
have it own `ves`, `geol` and `boreh` data.
...
Holds on others optionals infos in ``kwargs`` arguments:
============ ======================== ===================================
Attributes Type Description
============ ======================== ===================================
df pd.core.DataFrame Container of all features composed
of :attr:`~Features.featureLabels`
site_ids array_like ID of each survey locations.
site_names array_like Survey locations names.
gFname str Filename of `features_fn`.
ErpColObjs obj ERP `erp` class object.
vesObjs obj VES `ves` class object.
geoObjs obj Geology `geol` class object.
borehObjs obj Borehole `boreh` class obj.
============ ======================== ===================================
:Note:
For furher details about classes object , please refer to the classes
documentation aforementionned.
:Example:
>>> from watex.core.features import GeoFeatures
>>> featurefn ='data/geo_fdata/BagoueDataset2.xlsx'
>>> featObj =Features(features_fn= featurefn)
>>> featObj.site_ids
>>> featObj.site_names
>>> featObj.df
"""
readFeafmt ={
".csv":pd.read_csv,
".xlsx":pd.read_excel,
".json":pd.read_json,
".html":pd.read_json,
".sql" : pd.read_sql
}
featureLabels = [
'id',
'east',
"north",
'power',
"magnitude",
"shape",
"type",
"sfi",
'ohmS',
'lwi',
'geol',
'flow'
]
def __init__(self, features_fn =None, ErpColObjs=None , vesObjs=None,
geoObjs=None, boreholeObjs=None, **kwargs):
self._logging = watexlog.get_watex_logger(self.__class__.__name__)
self.features_fn =features_fn
self.ErpColObjs=ErpColObjs
self.vesObjs=vesObjs
self.geoObjs=geoObjs
self.borehObjs=boreholeObjs
self.gFname= None
for key in list(kwargs.keys()):
setattr(self, key, kwargs[key])
self._readFeatures_()
@property
def fn (self):
""" Control the Feature-file extension provide. Usefull to select
pd.DataFrame construction."""
return self._fn
@fn.setter
def fn(self, features_fn) :
""" Get the Features file and seek for pd.Core methods
construction."""
if not os.path.isfile(features_fn):
raise Wex.WATexError_file_handling(
'No file detected. Could read `{0}`,`{1}`,`{2}`,'
'`{3}` and `{4}`files.'.format(*list(self.readFeafmt.keys())))
self.gFname, exT=os.path.splitext(features_fn)
if exT in self.readFeafmt.keys(): self._fn =exT
else: self._fn ='?'
self._df = self.readFeafmt[exT](features_fn)
self.gFname = os.path.basename(self.gFname)
def _readFeatures_(self, features_fn =None, ErpColObjs=None , vesObjs=None,
geoObjs=None, boreholeObjs=None, **kws):
"""
Reading class and attributes populating. Please refer to
:doc:`~.core.geofeatures.Features` for arguments details.
"""
fimp, objsCoun =0,0
for nname, vval in zip(['features_fn' , 'ErpColObjs' , 'vesObjs',
'geoObjs', 'borehObjs'],[features_fn , ErpColObjs , vesObjs,
geoObjs, boreholeObjs]):
if vval is not None:
setattr(self,nname, vval )
if nname !='features_fn':
objsCoun +=1
# call object
for fObjs in ['ErpColObjs' , 'vesObjs',
'geoObjs', 'borehObjs']:
if getattr(self, fObjs) is None :
fimp =1
if self.features_fn is None and fimp ==1:
raise Wex.WATexError_geoFeatures(
'Features file is not given. Please provide specific'
' objects from`erp`, `ves`, `geology` and `borehole` data'
'Call each specific collection class to build each'
' collection features.')
elif self.features_fn is not None :
self.fn = self.features_fn
self.sanitize_fdataset()
try :
self.site_names =np.copy(self.df['id'].to_numpy())
except KeyError:
# force to set id
self.df=self.df.rename(columns = {'name':'id'})
self.site_names =np.copy(self.df['id'].to_numpy())
# self._index_col_id ='id'
if self.utm_flag ==0 :
# convert lat and lon to utm
self._easting = np.zeros_like(self.df['lat'].to_numpy())
self._northing =np.zeros_like (self._easting)
for ii in range(len(self._northing)):
try :
self.utm_zone, utm_easting, utm_northing = gisf.ll_to_utm(
reference_ellipsoid=23,
lat=self.df['lon'].to_numpy()[ii],
lon = self.df['lat'].to_numpy()[ii])
except :
utm_easting, utm_northing, \
self.utm_zone= gisf.project_point_ll2utm(
lat=self.df['lat'].to_numpy()[ii],
lon = self.df['lon'].to_numpy()[ii])
self._easting[ii] = utm_easting
self._northing [ii] = utm_northing
self.df.insert(loc=1, column ='east', value = self._easting)
self.df.insert(loc=2, column='north', value=self._northing)
try :
del self.df['lat']
del self.df['lon']
except :
try :
self.df = self.df.drop(['lat'], axis=1)
self.df = self.df.drop(['lon'], axis=1)
except :
try:
self.df.pop('lat')
self.df.pop('lon')
except:
self._logging.debug(
'No way to remove `lat` and `lon` in features '
"dataFrame. It seems there is no `lat` & `lon`"
" pd.series in your dataFrame.")
#Keep location names
self.df['id']=np.array(['e{0}'.format(id(name.lower()))
for name in self.df['id']])
self.id =np.copy(self.df['id'].to_numpy())
self.id_ = np.array(['e{0:07}'.format(ii+1)
for ii in range(len(self.df['id']))])
# rebuild the dataframe from main features
self.df = pd.concat({
featkey: self.df[featkey]
for featkey in self.featureLabels}, axis =1)
if objsCoun ==4 :
# mean all object is provided corrected
# self.ErpColObjs.fnames
#initit df
temlen= [len(obj) for obj in [self.ErpColObjs.erpdf['id'],
self.borehObjs.borehdf['id'],
self.geoObjs.geoldf['id'],
self.vesObjs.vesdf['id'] ]]
if all(temlen) is False:
raise Wex.WATexError_geoFeatures(
'`ERP`, `VES`, `Geology` and `Borehole` Features must '
'have the same length. You give <{0},{1},{2}, and '
'{3} respectively.'.format(*temlen))
self.df =pd.DataFrame(data = np.array((len(self.ErpColObjs.fnames),
len(self.featureLabels))),
columns = self.featureLabels)
self.id_= self.controlObjId(
erpObjID=self.ErpColObjs.erpdf['id'],
boreObjID=self.borehObjs.borehdf['id'],
geolObjID=self.geoObjs.geoldf['id'],
vesObjsID= self.vesObjs.vesdf['id']
)
self.df =self.merge(self.ErpColObjs.erpdf, #.drop(['id'], axis=1),
self.vesObjs.vesdf['ohmS'],
self.geoObjs.geoldf['geol'],
self.borehObjs.borehdf[['lwi', 'flow']],
right_index=True,
left_index=True)
#self.df.insert(loc=0, column ='id', value = newID)
self.id =self.ErpColObjs.erpdf['id'].to_numpy()
self.df.set_index('id', inplace =True)
self.df =self.df.astype({'east':np.float,
'north':np.float,
'power':np.float,
'magnitude':np.float,
'sfi':np.float,
'ohmS': np.float,
'lwi':np.float,
'flow':np.float
})
# populate site names attributes
for attr_ in self.site_names:
if not hasattr(self, attr_):
setattr(self, attr_, ID()._findFeaturePerSite_(
_givenATTR=attr_,
sns=self.site_names,
df_=self.df,
id_=self.id,
id_cache= self.id_))
def sanitize_fdataset(self):
""" Sanitize the feature dataset. Recognize the columns provided
by the users and resset according to the features labels disposals
:attr:`~Features.featureLabels`."""
self.utm_flag =0
OptsList, paramsList =[['bore', 'for'],
['x','east'],
['y', 'north'],
['pow', 'puiss', 'pa'],
['magn', 'amp', 'ma'],
['shape', 'form'],
['type'],
['sfi', 'if'],
['lat'],
['lon'],
['lwi', 'wi'],
['ohms', 'surf'],
['geol'],
['flow', 'deb']
], ['id',
'east',
'north',
'power',
'magnitude',
'shape',
'type',
'sfi',
'lat',
'lon',
'lwi',
'ohmS',
'geol',
'flow'
]
def getandReplace(optionsList, params, df):
"""
Function to get parames and replace to the main features params.
:param optionsList:
User options to qualified the features headlines.
:type optionsList: list
:param params: Exhaustive parameters names.
:type params: list
:param df: pd.DataFrame collected from `features_fn`.
:return: sanitize columns
:rtype: list
"""
columns = [c.lower() for c in df.columns]
for ii, celemnt in enumerate(columns):
for listOption, param in zip(optionsList, params):
for option in listOption:
if param =='lwi':
if celemnt.find('eau')>=0 :
columns[ii]=param
break
if re.match(r'^{0}+'.format(option), celemnt):
columns[ii]=param
if columns[ii] =='east':
self.utm_flag=1
break
return columns
new_df_columns= getandReplace(optionsList=OptsList, params=paramsList,
df= self._df)
self.df = pd.DataFrame(data=self._df.to_numpy(),
columns= new_df_columns)
def from_csv(self, erp_fn):
"""
Method essentially created to read file from csv , collected
| |
should be a list of
[frequency, S11, S12, S13,S14,S21, S22,S23,S24,S31,S32,S33,S34,S41,S42,S43,S44], etc are complex numbers
Designed to use S2P.sparameter_complex and SNP.sparameter_complex"""
# first create 4 separate matrix lists for 16 term correction
s1_matrix_list=[]
s2_matrix_list=[]
s3_matrix_list=[]
s4_matrix_list=[]
# Then populate them with the right values
for index,correction in enumerate(sixteen_term_correction):
[frequency, S11, S12, S13,S14,S21, S22,S23,S24,S31,S32,S33,S34,S41,S42,S43,S44]=correction
s1_matrix_list.append([frequency,np.matrix([[S11,S12],[S21,S22]])])
s2_matrix_list.append([frequency,np.matrix([[S13,S14],[S23,S24]])])
s3_matrix_list.append([frequency,np.matrix([[S31,S32],[S41,S42]])])
s4_matrix_list.append([frequency,np.matrix([[S33,S34],[S43,S44]])])
sparameter_matrix_list=two_port_complex_to_matrix_form(sparameters_complex)
# Apply the correction
sparameter_out=[]
for index,sparameter in enumerate(sparameter_matrix_list):
frequency=sparameter[0]
s_matrix=sparameter[1]
[s11_matrix,s12_matrix,s21_matrix,s22_matrix]=[s1_matrix_list[index][1],s2_matrix_list[index][1],
s3_matrix_list[index][1],s4_matrix_list[index][1]]
corrected_s_matrix=np.linalg.inv(s21_matrix*np.linalg.inv(s_matrix-s11_matrix)*s12_matrix+s22_matrix)
# This flips S12 and S21
sparameter_out.append([frequency,corrected_s_matrix[0,0],corrected_s_matrix[1,0],
corrected_s_matrix[0,1],corrected_s_matrix[1,1]])
return sparameter_out
def uncorrect_sparameters_sixteen_term(sparameters_complex,sixteen_term_correction):
"""Removes the sixteen term correction to sparameters and returns a new sparameter list.
The sparameters should be a list of [frequency, S11, S21, S12, S22] where S terms are complex numbers.
The sixteen term correction should be a list of
[frequency, S11, S12, S13,S14,S21, S22,S23,S24,S31,S32,S33,S34,S41,S42,S43,S44], etc are complex numbers
Designed to use S2P.sparameter_complex and SNP.sparameter_complex.
Inverse of correct_sparameters_sixteen_term"""
# first create 4 separate matrix lists for 16 term correction
s1_matrix_list=[]
s2_matrix_list=[]
s3_matrix_list=[]
s4_matrix_list=[]
# Then populate them with the right values
for index,correction in enumerate(sixteen_term_correction):
[frequency, S11, S12, S13,S14,S21, S22,S23,S24,S31,S32,S33,S34,S41,S42,S43,S44]=correction
s1_matrix_list.append([frequency,np.matrix([[S11,S12],[S21,S22]])])
s2_matrix_list.append([frequency,np.matrix([[S13,S14],[S23,S24]])])
s3_matrix_list.append([frequency,np.matrix([[S31,S32],[S41,S42]])])
s4_matrix_list.append([frequency,np.matrix([[S33,S34],[S43,S44]])])
sparameter_matrix_list=two_port_complex_to_matrix_form(sparameters_complex)
# Apply the correction
sparameter_out=[]
for index,sparameter in enumerate(sparameter_matrix_list):
frequency=sparameter[0]
s_matrix=sparameter[1]
[s11_matrix,s12_matrix,s21_matrix,s22_matrix]=[s1_matrix_list[index][1],s2_matrix_list[index][1],
s3_matrix_list[index][1],s4_matrix_list[index][1]]
uncorrected_s_matrix=np.linalg.inv(np.linalg.inv(s21_matrix)*(np.linalg.inv(s_matrix)-s22_matrix)*\
np.linalg.inv(s12_matrix))+s11_matrix
# This flips S12 and S21
sparameter_out.append([frequency,uncorrected_s_matrix[0,0],uncorrected_s_matrix[1,0],
uncorrected_s_matrix[0,1],uncorrected_s_matrix[1,1]])
return sparameter_out
def correct_sparameters_twelve_term(sparameters_complex,twelve_term_correction,reciprocal=True):
"""Applies the twelve term correction to sparameters and returns a new sparameter list.
The sparameters should be a list of [frequency, S11, S21, S12, S22] where S terms are complex numbers.
The twelve term correction should be a list of
[frequency,Edf,Esf,Erf,Exf,Elf,Etf,Edr,Esr,Err,Exr,Elr,Etr] where Edf, etc are complex numbers"""
if len(sparameters_complex) != len(twelve_term_correction):
raise TypeError("s parameter and twelve term correction must be the same length")
sparameter_out=[]
phase_last=0.
for index,row in enumerate(sparameters_complex):
frequency=row[0]
Sm=np.matrix(row[1:]).reshape((2,2))
[frequency,Edf,Esf,Erf,Exf,Elf,Etf,Edr,Esr,Err,Exr,Elr,Etr]=twelve_term_correction[index]
# frequency Edf Esf Erf Exf Elf Etf Edr Esr Err Exr Elr Etr.
# print [frequency,Edf,Esf,Erf,Exf,Elf,Etf,Edr,Esr,Err,Exr,Elr,Etr]
# print Sm[0,0]
D =(1+(Sm[0,0]-Edf)*(Esf/Erf))*(1+(Sm[1,1]-Edr)*(Esr/Err))-(Sm[0,1]*Sm[1,0]*Elf*Elr)/(Etf*Etr)
# print D
S11 =(Sm[0,0]-Edf)/(D*Erf)*(1+(Sm[1,1]-Edr)*(Esr/Err))-(Sm[0,1]*Sm[1,0]*Elf)/(D*Etf*Etr)
S21 =((Sm[1,0]-Exr)/(D*Etf))*(1+(Sm[1,1]-Edr)*(Esr-Elf)/Err)
S12 = ((Sm[0,1]-Exf)/(D*Etr))*(1+(Sm[0,0]-Edf)*(Esf-Elr)/Erf)
S22 = (Sm[1,1]-Edr)/(D*Err)*(1+(Sm[0,0]-Edf)*(Esf/Erf))-(Sm[0,1]*Sm[1,0]*Elr)/(D*Etf*Etr)
# S12 and S21 are averaged together in a weird way that makes phase continuous
geometric_mean=cmath.sqrt(S21*S12)
root_select=1
phase_new=cmath.phase(geometric_mean)
# if the phase jumps by >180 but less than 270, then pick the other root
if abs(phase_new-phase_last)>math.pi/2 and abs(phase_new-phase_last)<3*math.pi/2:
root_select=-1
mean_S12_S21=root_select*cmath.sqrt(S21*S12)
if reciprocal:
sparameter_out.append([frequency,S11,mean_S12_S21,mean_S12_S21,S22])
else:
sparameter_out.append([frequency,S11,S21,S12,S22])
phase_last=cmath.phase(mean_S12_S21)
return sparameter_out
def uncorrect_sparameters_twelve_term(sparameters_complex,twelve_term_correction,reciprocal=True):
"""Removes the twelve term correction to sparameters and returns a new sparameter list.
The sparameters should be a list of [frequency, S11, S21, S12, S22] where S terms are complex numbers.
The twelve term correction should be a list of
[frequency,Edf,Esf,Erf,Exf,Elf,Etf,Edr,Esr,Err,Exr,Elr,Etr] where Edf, etc are complex numbers"""
if len(sparameters_complex) != len(twelve_term_correction):
raise TypeError("s parameter and twelve term correction must be the same length")
sparameter_out=[]
phase_last=0.
for index,row in enumerate(sparameters_complex):
frequency=row[0]
Sa=np.matrix(row[1:]).reshape((2,2))
[frequency,Edf,Esf,Erf,Exf,Elf,Etf,Edr,Esr,Err,Exr,Elr,Etr]=twelve_term_correction[index]
# frequency Edf Esf Erf Exf Elf Etf Edr Esr Err Exr Elr Etr.
# print [frequency,Edf,Esf,Erf,Exf,Elf,Etf,Edr,Esr,Err,Exr,Elr,Etr]
# print Sm[0,0]
delta=Sa[0,0]*Sa[1,1]-Sa[0,1]*Sa[1,0]
# print D
S11 =Edf+(Erf)*(Sa[0,0]-Elf*delta)/(1-Esf*Sa[0,0]-Elf*Sa[1,1]+Esf*Elf*delta)
S21 =Etf*(Sa[1,0])/(1-Esf*Sa[0,0]-Elf*Sa[1,1]-Esf*Elf*delta)
S12 = Etr*(Sa[0,1])/(1-Elr*Sa[0,0]-Esr*Sa[1,1]-Esr*Elr*delta)
S22 = Edr+Err*(Sa[1,1]-Elr*delta)/(1-Elr*Sa[0,0]-Esr*Sa[1,1]-Esr*Elr*delta)
# S12 and S21 are averaged together in a weird way that makes phase continuous
geometric_mean=cmath.sqrt(S21*S12)
root_select=1
phase_new=cmath.phase(geometric_mean)
# if the phase jumps by >180 but less than 270, then pick the other root
if abs(phase_new-phase_last)>math.pi/2 and abs(phase_new-phase_last)<3*math.pi/2:
root_select=-1
mean_S12_S21=root_select*cmath.sqrt(S21*S12)
if reciprocal:
sparameter_out.append([frequency,S11,mean_S12_S21,mean_S12_S21,S22])
else:
sparameter_out.append([frequency,S11,S21,S12,S22])
phase_last=cmath.phase(mean_S12_S21)
return sparameter_out
#TODO: Check that this works the way it should
def correct_sparameters(sparameters,correction,**options):
"""Correction sparamters trys to return a corrected set of sparameters given uncorrected sparameters
and a correction. Correct sparameters will accept file_name's, pyMez classes,
complex lists or a mixture, returns value in the form it was entered. Correction is assumed reciprocal
unless reciprocal=False"""
defaults={"reciprocal":True,"output_type":None,"file_path":None}
correction_options={}
for key,value in defaults.items():
correction_options[key]=value
for key,value in options.items():
correction_options[key]=value
try:
# import and condition sparameters and correction
if isinstance(sparameters, StringType):
# Assume sparameters is given by file name
sparameters_table=S2PV1(sparameters)
sparameters=sparameters_table.sparameter_complex
output_type='file'
elif re.search('S2PV1',type(sparameters)):
output_type='S2PV1'
sparameters=sparameters.sparameter_complex
elif isinstance(sparameters, ListType):
# check to see if it is a list of complex variables or matrix
if isinstance(sparameters[1], ComplexType):
output_type='complex_list'
# Handle frequency, matrix lists
elif type(sparameters[1]) in ['np.array','np.matrix'] and isinstance(sparameters, FloatType) :
output_type='matrix_list'
sparameters=two_port_matrix_to_complex_form(sparameters)
# handle matrix
elif type(sparameters) in ['np.array','np.matrix']:
output_type='matrix'
raise
# Handle the correction types
if len(correction) is 13:
corrected_sparameters=correct_sparameters_twelve_term(sparameters,correction)
elif len(correction) is 17:
corrected_sparameters=correct_sparameters_sixteen_term(sparameters,correction)
elif len(correction) is 9:
corrected_sparameters=correct_sparameters_eight_term(sparameters,correction)
# Handle the output type using the derived one or the one entered as an option
if correction_options["output_type"] is None:
pass
else:
output_type=correction_options["output_type"]
if re.match('file',output_type, re.IGNORECASE):
output_table=S2PV1(correction_options["file_path"],sparameter_complex=corrected_sparameters)
output_table.save()
print(("Output was saved as {0}".format(output_table.path)))
elif re.search("complex",output_type,re.IGNORECASE):
return corrected_sparameters
elif re.search("matrix_list",output_type,re.IGNORECASE):
return two_port_complex_to_matrix_form(corrected_sparameters)
elif re.search("matrix",output_type,re.IGNORECASE):
raise
except:
print("Could not correct sparameters")
raise
def average_one_port_sparameters(table_list,**options):
"""Returns a table that is the average of the Sparameters in table list. The new table will have all the unique
frequency values contained in all of the tables. Tables must be in Real-Imaginary format or magnitude-angle format
do not try to average db-angle format. """
#This will work on any table that the data is stored in data, need to add a sparameter version
defaults={"frequency_selector":0,"frequency_column_name":"Frequency"}
average_options={}
for key,value in defaults.items():
average_options[key]=value
for key,value in options.items():
average_options[key]=value
frequency_list=[]
average_data=[]
for table in table_list:
frequency_list=frequency_list+table.get_column("Frequency")
unique_frequency_list=sorted(list(set(frequency_list)))
for frequency in unique_frequency_list:
new_row=[]
for table in table_list:
data_list=[x for x in table.data if x[average_options["frequency_selector"]]==frequency]
table_average=np.mean(np.array(data_list),axis=0)
new_row.append(table_average)
#print new_row
average_data.append(np.mean(new_row,axis=0).tolist())
return average_data
def two_port_comparison_plot_with_residuals(two_port_raw,mean_frame,difference_frame):
"""Creates a comparison plot given a TwoPortRawModel object and a pandas.DataFrame mean frame"""
fig, axes = plt.subplots(nrows=3, ncols=2, sharex='col',figsize=(8,6),dpi=80)
measurement_date=two_port_raw.metadata["Measurement_Date"]
ax0,ax1,ax2,ax3,ax4,ax5 = axes.flat
compare_axes=[ax0,ax1,ax2,ax3,ax4,ax5]
diff_axes=[]
for ax in compare_axes:
diff_axes.append(ax.twinx())
#diff_axes=[diff_ax0,diff_ax1,diff_ax2,diff_ax3,diff_ax4,diff_ax5]
column_names=['Frequency','magS11','argS11','magS21','argS21','magS22','argS22']
for index,ax in enumerate(diff_axes):
ax.plot(difference_frame['Frequency'].tolist(),difference_frame[column_names[index+1]].tolist(),'r-x')
ax.set_ylabel('Difference',color='red')
if re.search('mag',column_names[index+1]):
ax.set_ylim(-.02,.02)
#ax.legend_.remove()
for index, ax in enumerate(compare_axes):
ax.plot(two_port_raw.get_column('Frequency'),two_port_raw.get_column(column_names[index+1]),
'k-o',label=measurement_date)
ax.plot(mean_frame['Frequency'].tolist(),mean_frame[column_names[index+1]].tolist(),'gs',label='Mean')
ax.set_title(column_names[index+1])
ax.legend(loc=1,fontsize='8')
#ax.xaxis.set_visible(False)
if re.search('arg',column_names[index+1]):
ax.set_ylabel('Phase(Degrees)',color='green')
elif re.search('mag',column_names[index+1]):
ax.set_ylabel(r'|${\Gamma} $|',color='green')
#ax.sharex(diff_axes[index])
ax4.set_xlabel('Frequency(GHz)',color='k')
ax5.set_xlabel('Frequency(GHz)',color='k')
fig.subplots_adjust(hspace=0)
fig.suptitle(two_port_raw.metadata["Device_Id"]+"\n",fontsize=18,fontweight='bold')
plt.tight_layout()
plt.show()
return fig
def two_port_difference_frame(two_port_raw,mean_frame):
"""Creates a difference pandas.DataFrame given a two port raw file and a mean pandas.DataFrame"""
difference_list=[]
for row in two_port_raw.data[:]:
#print row[0]
mean_row=mean_frame[abs(mean_frame["Frequency"]-row[0])<abs(.01)].as_matrix()
#print mean_row
try:
mean_row=mean_row[0]
difference_row=[row[i+2]-mean_row[i] for i in range(1,len(mean_row))]
difference_row.insert(0,row[0])
difference_list.append(difference_row)
except:pass
column_names=['Frequency','magS11','argS11','magS21','argS21','magS22','argS22']
diff_data_frame=pandas.DataFrame(difference_list,columns=column_names)
return diff_data_frame
def two_port_mean_frame(device_id,system_id=None,history_data_frame=None):
"""Given a Device_Id and a pandas data frame of the history creates a mean data_frame"""
device_history=history_data_frame[history_data_frame["Device_Id"]==device_id]
if system_id is not None:
device_history=device_history[device_history["System_Id"]==system_id]
column_names=['Frequency','magS11','argS11','magS21','argS21','magS22','argS22']
unique_frequency_list=device_history["Frequency"].unique()
mean_array=[]
for index,freq in enumerate(unique_frequency_list):
row=[]
for column in column_names:
values=np.mean(device_history[device_history["Frequency"]==unique_frequency_list[index]][column].as_matrix())
#print values
mean_value=np.mean(values)
row.append(mean_value)
mean_array.append(row)
mean_frame=pandas.DataFrame(mean_array,columns=column_names)
return mean_frame
def mean_from_history(history_frame,**options):
"""mean_from_history creates a mean_frame given a full history frame (pandas.DataFrame object),
by setting options it selects column names
to output and input values to filter on. Returns a pandas.DataFrame object with column names = column_names,
and filtered by any of the following: "Device_Id","System_Id","Measurement_Timestamp",
"Connector_Type_Measurement", "Measurement_Date" or "Measurement_Time" """
defaults={"Device_Id":None, "System_Id":None,"Measurement_Timestamp":None,
"Connector_Type_Measurement":None,
"Measurement_Date":None,"Measurement_Time":None,"Direction":None,
"column_names":['Frequency','magS11','argS11'],"outlier_removal":True}
mean_options={}
for key,value in defaults.items():
mean_options[key]=value
for key,value in options.items():
mean_options[key]=value
filters=["Device_Id","System_Id","Measurement_Timestamp","Connector_Type_Measurement",
"Measurement_Date","Measurement_Time","Direction"]
temp_frame=history_frame.copy()
for index,filter_type in enumerate(filters):
if mean_options[filter_type] is not None:
temp_frame=temp_frame[temp_frame[filter_type]==mean_options[filter_type]]
# temp_frame=temp_frame[temp_frame["Device_Id"]==mean_options["Device_Id"]]
# temp_frame=temp_frame[temp_frame["System_Id"]==mean_options["System_Id"]]
if mean_options["outlier_removal"]:
mean_s11=np.mean(temp_frame["magS11"])
std_s11=np.std(temp_frame["magS11"])
temp_frame=temp_frame[temp_frame["magS11"]<(mean_s11+3*std_s11)]
temp_frame = temp_frame[temp_frame["magS11"] > (mean_s11 - 3 * std_s11)]
unique_frequency_list=temp_frame["Frequency"].unique()
mean_array=[]
for index,freq in enumerate(unique_frequency_list):
row=[]
for column in mean_options["column_names"]:
values=np.mean(temp_frame[temp_frame["Frequency"]==unique_frequency_list[index]][column].as_matrix())
mean_value=np.mean(values)
row.append(mean_value)
mean_array.append(row)
mean_frame=pandas.DataFrame(mean_array,columns=mean_options["column_names"])
return mean_frame
def median_from_history(history_frame,**options):
"""median_from_history creates a median_frame given a full history frame (pandas.DataFrame object),
by setting options it selects column names
to output and input values to filter on. Returns a pandas.DataFrame object with column names = column_names,
and filtered by any of the following: "Device_Id","System_Id","Measurement_Timestamp",
"Connector_Type_Measurement", "Measurement_Date" or "Measurement_Time" """
defaults={"Device_Id":None, "System_Id":None,"Measurement_Timestamp":None,
"Connector_Type_Measurement":None,
"Measurement_Date":None,"Measurement_Time":None,"Direction":None,
"column_names":['Frequency','magS11','argS11'],"outlier_removal":True}
median_options={}
for key,value in defaults.items():
median_options[key]=value
for key,value in options.items():
median_options[key]=value
filters=["Device_Id","System_Id","Measurement_Timestamp","Connector_Type_Measurement",
"Measurement_Date","Measurement_Time","Direction"]
temp_frame=history_frame.copy()
for index,filter_type in enumerate(filters):
if median_options[filter_type] is not None:
temp_frame=temp_frame[temp_frame[filter_type]==median_options[filter_type]]
if median_options["outlier_removal"]:
mean_s11=np.mean(temp_frame["magS11"])
std_s11=np.std(temp_frame["magS11"])
temp_frame=temp_frame[temp_frame["magS11"]<(mean_s11+3*std_s11)]
temp_frame = temp_frame[temp_frame["magS11"] > (mean_s11 - 3 * std_s11)]
# temp_frame=temp_frame[temp_frame["Device_Id"]==median_options["Device_Id"]]
# temp_frame=temp_frame[temp_frame["System_Id"]==median_options["System_Id"]]
unique_frequency_list=temp_frame["Frequency"].unique()
median_array=[]
for index,freq in enumerate(unique_frequency_list):
row=[]
for column in median_options["column_names"]:
values=np.median(temp_frame[temp_frame["Frequency"]==unique_frequency_list[index]][column].as_matrix())
median_value=np.median(values)
row.append(median_value)
median_array.append(row)
median_frame=pandas.DataFrame(median_array,columns=median_options["column_names"])
return median_frame
def raw_difference_frame(raw_model,mean_frame,**options):
"""Creates a difference pandas.DataFrame given a raw NIST model and a mean pandas.DataFrame"""
defaults={"column_names":mean_frame.columns.tolist()}
difference_options={}
for key,value in defaults.items():
difference_options[key]=value
for key,value in options.items():
difference_options[key]=value
difference_list=[]
for row in raw_model.data[:]:
#print row[0]
mean_row=mean_frame[abs(mean_frame["Frequency"]-row[0])<abs(.01)].as_matrix()
#print mean_row
try:
mean_row=mean_row[0]
difference_row=[row[i+2]-mean_row[i] for i in range(1,len(mean_row))]
difference_row.insert(0,row[0])
difference_list.append(difference_row)
except:pass
difference_data_frame=pandas.DataFrame(difference_list,columns=difference_options["column_names"])
return difference_data_frame
def return_history_key(calrep_model):
"Returns a key for the history dictionary given a calrep model"
model=calrep_model.__class__.__name__
#print model
if re.search('Calrep|DUT',model):
if re.search('OnePortCalrep',model):
return '1-port calrep'
elif re.search('TwoPortCalrep',model):
return '2-port calrep'
elif re.search('PowerCalrep',model):
if calrep_model.options["column_names"]==POWER_3TERM_COLUMN_NAMES:
return 'power 3term | |
# -*- encoding: utf-8 -*-
"""
Views: Encontramos todas las vistas del sistema de establecimientos.
@author <NAME>
@contact <EMAIL>
<EMAIL>
@camilortte on Twitter
@copyright Copyright 2014-2015, RecomendadorUD
@license GPL
@date 2014-10-10
@satus Pre-Alpha
@version= 0..215
"""
import datetime
import json
#Django
from django.shortcuts import render,redirect
from django.views.generic.base import View, TemplateView
from django.http import HttpResponse, Http404,HttpResponseRedirect
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_control
from django.forms import DateField
from django.contrib.gis.geos import Polygon, GEOSGeometry
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.urlresolvers import reverse_lazy , lazy,reverse
from django.contrib import messages
from django.views.generic import (
DetailView, CreateView , ListView, UpdateView,
DeleteView)
#externals apps
from vanilla import CreateView as CreateViewVanilla
from vanilla import TemplateView as TemplateViewVanilla
from haystack.query import SearchQuerySet
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from notifications import notify
#Internal apps
from apps.account_system.models import User
from apps.recommender_system.models import EstablecimientosRecommender
#Serializers
from .serializers import EstablecimientoSerializer, PaginatedEstablecimientoSerializer
#Models
from .models import (
Establecimiento, Comentario, Imagen, SubCategoria,
Categoria, TiposSolicitud, Solicitud)
#Forms
from .forms import (
ComentarioForm, EstablecimientoForm,
UploadImageForm, CategoriasFilterForm,
SolicitudForm, EstablecimientoTemporalForm)
class DetalleEstablecimientoView(DetailView):
u"""
Se encarga de mostrar todos los datos de un establecimiento (entre
ellos las images), ademas carga todos los forms como comentarios y
rating requeridos para la correcta interacción.
Hereda todo de DetailView
Attributes:
template_name (str): Plantilla que se cargará.
model (Mode): Clase del modelo que se usará.
"""
template_name = "establishment/detail.html"
model= Establecimiento
def get_context_data(self, **kwargs):
u"""
Se agregan contenxtos como las imagenes, los forms de
agregar y eliminar imagenes así como tambien los forms
de agregar y eliminar comentarios.
Tambien se realiza la paginación de comentarios.
Tambien realiza las respectiva validaciones de quien puede,
eliminiar, y agregar.
"""
context = super(DetalleEstablecimientoView, self).get_context_data(**kwargs)
establecimiento = self.object
context['imagenes'] = Imagen.objects.filter(establecimientos=establecimiento)
count=Imagen.objects.filter(establecimientos=establecimiento).count()
if count < settings.MAX_IMAGES_PER_PLACE:
context['imagenes_nulas'] = range(count,settings.MAX_IMAGES_PER_PLACE)
context['establecimiento'] =Establecimiento
if self.request.user.is_authenticated():
context['form_image'] = UploadImageForm
usuario = self.request.user
usuario_comentario=Comentario.objects.filter(author=usuario,post=establecimiento)
#esta vacio puede comentar
if not usuario_comentario:
data = {
'sender':context['object'].id,
'is_public':True
}
context['form'] = ComentarioForm(initial=data)
else:
#No esta vacio no puede comentar
pass
if self.request.user.is_organizacional():
propietario=Establecimiento.objects.filter(administradores=self.request.user,id=establecimiento.id)
if propietario:
#Es propietario del establecimiento
context['propietario']=True
comentarios=Comentario.objects.filter(post=establecimiento,is_public=True)
paginator = Paginator(comentarios, settings.MAX_COMMENTS_PER_PAGE) # Show 10 contacts per page
page = self.request.GET.get('page')
try:
comentarios = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
comentarios = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
comentarios = paginator.page(paginator.num_pages)
context['comentarios'] = comentarios
context['can_upload_image']=self.can_upload_image()
return context
def can_upload_image(self):
"""
Se encarga de validar si es posible subir otra imagen
"""
if self.request.user.is_authenticated():
if self.object.imagen_set.all().count() >= settings.MAX_IMAGES_PER_PLACE:
return False
else:
if self.request.user.is_superuser or self.object.administradores.filter(id=self.request.user.id):
return True
else:
if Imagen.objects.filter(usuarios=self.request.user,establecimientos=self.object).count() >= settings.MAX_UPLOAD_PER_USER:
return False
return True
else:
return False
@method_decorator(cache_control(must_revalidate=True, no_cache=True, no_store=True))
def dispatch(self, *args, **kwargs):
print "Establecimiento consultado"
return super(DetalleEstablecimientoView, self).dispatch(*args, **kwargs)
class JSONMixin(object):
u"""
JSONMixin es un mixin para enviar los comentarios mediante JSON.
"""
def render_to_response(self, context, **httpresponse_kwargs):
return self.get_json_response(
self.convert_context_to_json(context),
**httpresponse_kwargs
)
def get_json_response(self, content, **httpresponse_kwargs):
return HttpResponse(
content,
content_type='application/json',
**httpresponse_kwargs
)
def convert_context_to_json(self, context):
u"""
Este método serializa un formulario de Django y
retorna un objeto JSON con sus campos y errores
retorna un objecto JSON
"""
form = context.get('form')
to_json = {}
options = context.get('options', {})
to_json.update(options=options)
fields = {}
for field_name, field in form.fields.items():
if isinstance(field, DateField) \
and isinstance(form[field_name].value(), datetime.date):
fields[field_name] = \
unicode(form[field_name].value().strftime('%d.%m.%Y'))
else:
fields[field_name] = \
form[field_name].value() \
and unicode(form[field_name].value()) \
or form[field_name].value()
to_json.update(fields=fields)
if form.errors:
errors = {
'non_field_errors': form.non_field_errors(),
}
fields = {}
for field_name, text in form.errors.items():
fields[field_name] = text
errors.update(fields=fields)
to_json.update(errors=errors)
else:
to_json={}
context['success'] = True
to_json.update(success=context.get('success', False))
print "RETORNA ", json.dumps(to_json)
return json.dumps(to_json)
class CommentCreateView(JSONMixin, CreateView):
u"""
Se envcarga de crear un nuevo comentario, usa el mixisn JSON
para crearlo con json.
Atributes:
model (Mode): Clase del modelo que se usará.
form_class (Form): La clase formulario para la creación.
"""
model = Comentario
form_class = ComentarioForm
# once the user submits the form, validate the form and create the new user
def post(self, request, *args, **kwargs):
u"""
Validamos los datos del formulario y segun esto procedemos con la
creación del comentario.
se usa la clase validate comment para validar.
Returns:
Llamado a la clase padre de form_valid si es valido el comentario
De lo contrario se llama a la clase padre form_invalid()
"""
self.object = None
# setup the form
# we can use get_form this time as we no longer need to set the data property
form = self.get_form(self.form_class)
# print "KAWARGS: ",kwargs
# print "ARGS; ",args
self.establecimiento_id=kwargs['pk']
self.success_url=reverse('establecimiento_detail_url',kwargs={'pk':self.establecimiento_id})
form.instance.author = self.request.user
form.instance.post = Establecimiento.objects.get(id=self.establecimiento_id)
if form.is_valid() and self.validate_comment():
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
self.object = form.save()
messages.success(self.request, u"Comentario creado.")
return self.render_to_response(self.get_context_data(form=form))
def validate_comment(self):
u"""
Se validan que el usuario no halla comentado anteriormente en el mismo
establecimiento,
Returns:
True si puede comentario
False si ya comento y no pdra comentar
"""
comentario=Comentario.objects.filter(author=self.request.user.id,post=self.establecimiento_id)
print comentario
if not comentario:
#No existe ningun comentario
return True
else:
#Si existe un comentario
return False
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(CommentCreateView, self).dispatch(*args, **kwargs)
class EliminarEstablecimiento(DeleteView):
u"""
Clase encargada de eliminar un establecimiento solo por el usuario
propietario
"""
model = Establecimiento
success_url=reverse_lazy('establecimientos_propios_ur')
def get_object(self, queryset=None):
u"""
Validamos que el objeto que se eliminará sea propiedad del
usuario que lo elminará
Returns:
Context si el usuario es quien eliminara su propio establecimiento
Http404 si es un usuario invalido intentnaod eliminar.
"""
establecimiento_id= self.kwargs['pk']
establecimiento=Establecimiento.objects.filter(id=establecimiento_id,administradores=self.request.user.id)
if establecimiento and (self.request.user.is_organizacional or self.request.user.is_superuser ):
context = super(EliminarEstablecimiento, self).get_object(queryset=None)
return context
#De lo contrario
else:
print "No puede elimianr el comentario y esta intentando romper el sistema"
raise Http404
def delete(self, request, *args, **kwargs):
ctx= super(EliminarEstablecimiento, self).delete(request,*args, **kwargs)
messages.success(self.request, u"Establecimiento Eliminado.")
return ctx
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(EliminarEstablecimiento, self).dispatch(*args, **kwargs)
class EliminarComentario(DeleteView):
u"""
Clase para eliminar un comentario, este solo podrá ser eliminado
por el autor, el propietario del establecimiento o un usuario
administrador
Atributes:
model (Model): Modelo que se usará.
"""
model = Comentario
def get_object(self, queryset=None):
u"""
Validamos que el objeto que se eliminará sea propiedad del
usuario que lo elminará
Returns:
Context si el usuario es quien eliminara su propio comentario
Http404 si es un usuario invalido intentnaod eliminar.
"""
establecimiento_id= self.kwargs['establecimiento_id']
comentario_id= self.kwargs['comentario_id']
#obj = super(EliminarComentario, self).get_object()
comentario=Comentario.objects.filter(author=self.request.user.id,post=establecimiento_id,id=comentario_id)
#Si comentario no esta vacio
if ( comentario):
#comentario.delete()
context = {'establecimiento_id':establecimiento_id, 'comentario_id':comentario_id}
return context
#De lo contrario
else:
print "No puede elimianr el comentario y esta intentando romper el sistema"
raise Http404
return {'comentario_id':comentario_id}
def delete(self, request, *args, **kwargs):
u"""
se comprueba que el comentario a eliminar sea eliminado por el propietario del comentario
o por un usuario administrador. Si todo es valido se eliminara.
Returns:
HttpResponseRedirect A el establecimiento que alojó el comentario.
"""
comentario_id = self.kwargs['comentario_id']
establecimiento_id = self.kwargs['establecimiento_id']
if request.user.is_superuser:
comentario=Comentario.objects.get(id=comentario_id)
comentario.delete()
else:
comentario=Comentario.objects.filter(author=request.user,
post=Establecimiento.objects.get(id=establecimiento_id),
id=comentario_id)
#No esta vacio
if comentario:
if comentario[0].author.id==request.user.id:
comentario[0].delete()
messages.success(self.request, u"Comentario Eliminado.")
self.success_url = reverse('establecimiento_detail_url', kwargs={'pk': establecimiento_id})
return HttpResponseRedirect(self.success_url)
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(EliminarComentario, self).dispatch(*args, **kwargs)
class Establecimientoslist(ListView):
u"""
Lista los establecimientos en el sisitema, segun los criterios de busqueda.
Atributes:
paginate_by (int): Numero de establecimientos por pagina.
model (Model): Modelo
template_name (String): Template donde se carga la información
"""
paginate_by = 10
model = Establecimiento
template_name = "establishment/list.html"
def get_context_data(self, **kwargs):
u"""
Se agrega el contenxto el formulario de categorias
"""
context = super(Establecimientoslist, self).get_context_data(**kwargs)
#context['now'] = timezone.now()
context['form_categorias']=CategoriasFilterForm
return context
class CrearEstablecimiento(CreateViewVanilla):
u"""
Crea un nuevo establecimiento
"""
model= Establecimiento
template_name = "establishment/create.html"
content_type = None
form_class = EstablecimientoForm
success_url = lazy(reverse, str)("home_url") #Esta se modifica en el metodo get_succes_url
def get_success_url(self):
messages.success(self.request, u"Establecimiento creado.")
return reverse_lazy('establecimiento_detail_url',
kwargs={'pk': self.object.id})
def form_invalid(self, form):
return super(CrearEstablecimiento, self).form_invalid(form)
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(CrearEstablecimiento, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
u"""
Se agregan los contexto | |
"""General Kalman filter
Description:
------------
Kalman filter and the modified Bryson-Frazier smoother is covered in the book "Factorization Methods for Discrete
Sequential Estimation" by Bierman :cite:`bierman2006`. However, there are some typos but a corrected version of the
algorithm is listed in :cite:`gibbs2011`.
"""
# External library imports
import numpy as np
import h5py
# Standard library import
import os
# Midgard imports
from midgard.math.unit import Unit
# Where imports
from where.lib import log
from where.lib import config
class KalmanFilter(object):
"""A general Kalman filter
See for instance https://en.wikipedia.org/wiki/Kalman_filter#Details for information about a general Kalman
filter. We use the Modified Bryson-Frazier smoother, which is described at
https://en.wikipedia.org/wiki/Kalman_filter#Modified_Bryson.E2.80.93Frazier_smoother
Notation:
h: Partial derivatives # num_obs x n x 1
x: Predicted state estimate (x-tilde) # num_obs x n x 1
x_hat: Updated state estimate (x-hat) # num_obs x n x 1
sigma: Residual covariance # num_obs
innovation: Measurement residual # num_obs
z: Observed residual # num_obs
r: Observation noise covariance # num_obs
Q: Process noise covariance # dict()
k: Kalman gain # num_obs x n x 1
p: Predicted estimate covariance (p-tilde) # n x n (not stored)
p_hat: Updated estimate covariance (p-hat) # num_obs x n x n
phi: State transition # n x n (not stored)
x_smooth: Smoothed state estimates # num_obs x n x 1
lam: (lambda) # num_obs x n x 1
"""
def __init__(self, h, z=None, apriori_stdev=None, phi=None, r=None, Q=None, param_names=None):
"""Initialize the Kalman filter
Args:
h (Numpy array): Partial derivatives (num_obs x n x 1)
z (Numpy array): Observations (num_obs)
apriori_stdev (Numpy array): Apriori standard deviation (n)
phi (Numpy array): State transition (num_obs x n x n)
r (Numpy array): Observation noise covariance (num_obs)
Q (Numpy array): Process noise covariance (num_obs x n x n)
"""
self.h = h
self.num_obs, self.n, _ = self.h.shape
self.apriori_stdev = np.ones(self.n) if apriori_stdev is None else apriori_stdev
self.z = np.zeros((self.num_obs)) if z is None else z
self.phi = np.eye(self.n).repeat(self.num_obs).reshape(self.n, self.n, -1).T if phi is None else phi
self.r = np.ones((self.num_obs)) if r is None else r
self.Q = dict() if Q is None else Q
self.x_hat = np.zeros((self.num_obs, self.n, 1))
self.x_hat_ferr = np.zeros((self.num_obs, self.n))
self.x_smooth = np.zeros((self.num_obs, self.n, 1))
self.param_names = param_names if param_names else []
self.p_hat_file_path = config.files.path("output_covariance_matrix")
self.p_hat_file = h5py.File(self.p_hat_file_path, "w")
self.p_hat_file.attrs["labels"] = ", ".join(self.param_names)
self.p_hat_file.close()
def filter(self):
"""Run the Kalman filter forward and backward
"""
# Initialize
x_tilde = np.zeros((self.n, 1))
p_tilde = np.diag(self.apriori_stdev ** 2)
sigma = np.zeros(self.num_obs)
innovation = np.zeros(self.num_obs)
k = np.zeros((self.num_obs, self.n, 1))
lam = np.zeros((self.n, 1))
# Makes calculations easier to read (and gives a slight speed-up)
h = self.h
z = self.z
phi = self.phi
r = self.r
Q = self.Q
x_hat = self.x_hat
x_smooth = self.x_smooth
I = np.eye(self.n)
# Run filter forward over all observations
for epoch in range(self.num_obs):
innovation[epoch] = z[epoch] - h[epoch].T @ x_tilde
sigma[epoch] = (h[epoch].T @ p_tilde @ h[epoch]) + r[epoch]
k[epoch] = p_tilde @ h[epoch] / sigma[epoch]
x_hat[epoch] = x_tilde + k[epoch] * innovation[epoch]
p_hat = (I - k[epoch] @ h[epoch].T) @ p_tilde
x_tilde = phi[epoch] @ x_hat[epoch]
p_tilde = phi[epoch] @ p_hat @ phi[epoch].T
for (idx1, idx2), noise in Q.get(epoch, {}).items():
p_tilde[idx1, idx2] += noise
self._set_p_hat(epoch, p_hat)
self.x_hat_ferr[epoch, :] = np.sqrt(np.diagonal(p_hat))
# Run smoother backwards over all observations
for epoch in range(self.num_obs - 1, -1, -1):
# TODO smooth covariance matrix
p_hat = self._get_p_hat(epoch)
x_smooth[epoch] = x_hat[epoch] + p_hat.T @ lam
lam = (
phi[epoch - 1].T @ h[epoch] * innovation[epoch] / sigma[epoch]
+ phi[epoch - 1].T @ (I - k[epoch] @ h[epoch].T).T @ lam
)
def update_dataset(self, dset, param_names, normal_idx, num_unknowns):
"""Update the given dataset with results from the filtering
Args:
dset (Dataset): The dataset.
param_names (List): Strings with names of parameters. Used to form field names.
normal_idx (Slice): Slice denoting which parameters should be used for the normal equations.
num_unknowns (Int): Number of unknowns.
"""
# Update dataset with state and estimation fields and calculate new residuals
self._add_fields(dset, param_names)
dset.residual[:] = dset.est - (dset.obs - dset.calc)
num_unknowns += dset.meta.get("num_clock_coeff", 0)
# Calculate normal equations, and add statistics about estimation to dataset
N, b = self._normal_equations(normal_idx, dset.num_obs - 1)
g = self.x_hat[dset.num_obs - 1, normal_idx, :]
deg_freedom = dset.num_obs - num_unknowns
v = dset.residual[:, None]
P = np.diag(1 / self.r[: dset.num_obs])
sq_sum_residuals = np.asscalar(v.T @ P @ v)
sq_sum_omc_terms = np.asscalar(2 * b.T @ g - g.T @ N @ g)
variance_factor = sq_sum_residuals / deg_freedom if deg_freedom != 0 else np.inf
log.info(f"Variance factor = {variance_factor:.4f}, degrees of freedom = {deg_freedom:d}")
# Report and set analysis status if there are too few degrees of freedom
if deg_freedom < 1:
log.error(f"Degrees of freedom is {deg_freedom} < 1. Estimate fewer parameters")
if dset.meta.get("analysis_status") == "unchecked":
dset.meta["analysis_status"] = "too few degrees of freedom"
else:
if dset.meta.get("analysis_status") == "too few degrees of freedom":
dset.meta["analysis_status"] = "unchecked"
# Report and set analysis status if there are too few stations
# TODO: if vlbi_site_pos in state_vector and num_stations < 3
estimate_site_pos = np.char.startswith(np.array(param_names, dtype=str), "vlbi_site_pos").any()
if len(dset.unique("station")) < 3 and estimate_site_pos:
log.warn(f"Too few stations {len(dset.unique('station'))} < 3. Do not estimate station positions.")
# if dset.meta.get("analysis_status") == "unchecked":
# dset.meta["analysis_status"] = "needs custom state vector"
elif len(dset.unique("station")) < 3 and estimate_site_pos:
if dset.meta.get("analysis_status") == "needs custom state vector":
dset.meta["analysis_status"] = "unchecked"
# Update config
cfg_vars = dset.vars.copy()
cfg_vars.pop("rundate")
with config.update_tech_config(dset.analysis["rundate"], cfg_vars.pop("pipeline"), **cfg_vars) as cfg:
cfg.update("analysis_status", "status", dset.meta.get("analysis_status", ""), source=__file__)
# Add information to dset.meta
dset.meta.add("number of observations", dset.num_obs, section="statistics")
dset.meta.add("number of unknowns", num_unknowns, section="statistics")
dset.meta.add("square sum of residuals", sq_sum_residuals, section="statistics")
dset.meta.add("degrees of freedom", deg_freedom, section="statistics")
dset.meta.add("variance factor", variance_factor, section="statistics")
dset.meta.add("weighted square sum of o-c", sq_sum_residuals + sq_sum_omc_terms, section="statistics")
dset.meta.add("matrix", N.tolist(), section="normal equation")
dset.meta.add("vector", b[:, 0].tolist(), section="normal equation")
dset.meta.add("names", param_names[normal_idx], section="normal equation")
dset.meta.add(
"unit", [config.tech[f.split("-")[0]].unit.str for f in param_names[normal_idx]], section="normal equation"
)
def cleanup(self):
if not config.tech.keep_covariance_file.bool:
os.remove(self.p_hat_file_path)
def _add_fields(self, dset, param_names):
"""Add fields to the given dataset
Adds fields for state vectors and estimate vectors for each parameter. Parameters with names ending with an
underscore, `_`, are not added to the dataset.
Args:
dset (Dataset): The dataset.
param_names (List): Strings with names of parameters. Used to form field names.
"""
# Delete values from previous iterations
if "state" in dset.fields:
del dset.state
if "estimate" in dset.fields:
del dset.estimate
for idx, param_name in enumerate(param_names):
if param_name.endswith("_"):
continue
# State vectors
fieldname = f"state.{param_name}"
fieldname_sigma = fieldname + "_sigma"
value = self.x_smooth[: dset.num_obs, idx, 0]
value_sigma = np.sqrt(self.x_hat_ferr[: dset.num_obs, idx])
# Convert values to the display unit. It corresponds to "meter per <unit of partial>"
partial_unit = dset.unit("partial.{}".format(param_name))
to_unit = dset.meta["display_units"][param_name]
from_unit = f"meter/({partial_unit[0]})"
factor = Unit(from_unit, to_unit)
dset.meta.add(param_name, factor, section="display_factors")
dset.add_float(fieldname, val=value * factor, unit=to_unit, write_level="operational")
# Convert values to the display unit. It corresponds to "meter per <unit of partial>"
partial_unit = dset.unit("partial.{}".format(param_name))
to_unit = dset.meta["display_units"][param_name]
from_unit = f"meter/({partial_unit[0]})"
factor = Unit(from_unit, to_unit)
dset.meta.add(param_name, factor, section="display_factors")
dset.add_float(fieldname_sigma, val=value_sigma * factor, unit=to_unit, write_level="operational")
# Estimate vectors
fieldname = f"estimate.{param_name}"
value = self.h[: dset.num_obs, idx, 0] * self.x_smooth[: dset.num_obs, idx, 0]
dset.add_float(fieldname, val=value, unit="meter", write_level="analysis")
value = (self.x_smooth.transpose(0, 2, 1) @ self.h)[: dset.num_obs, 0, 0]
fieldname = "est"
if fieldname in dset.fields:
dset[fieldname][:] = value
else:
dset.add_float(fieldname, val=value, unit="meter", write_level="operational")
def _normal_equations(self, normal_idx, last_obs):
"""Calculate normal equations corresponding to the filter results
Args:
normal_idx (Slice): A slice denoting which columns should be used for the normal equations.
Returns:
Tuple of Numpy arrays: Normal matrix (n x n) and Normal vector (n x 1).
"""
p_hat_last = self._get_p_hat(last_obs)
p_tilde_0 = np.diag(self.apriori_stdev ** 2)
pg = p_hat_last[normal_idx, normal_idx]
cg = p_tilde_0[normal_idx, normal_idx]
g = self.x_hat[last_obs, normal_idx, :]
pg_inv = np.linalg.inv(pg)
cg_inv = np.linalg.inv(cg)
N = pg_inv - cg_inv
b = pg_inv @ g
# test
if False:
stat_idx = slice(normal_idx.stop, self.n, None)
R = np.diag(self.r[: last_obs + 1])
H_L = self.h[: last_obs + 1, stat_idx, 0]
c_L = p_tilde_0[stat_idx, stat_idx]
R_tilde = H_L @ c_L @ H_L.T + R
R_tilde_inv = np.linalg.inv(R_tilde)
H_g = self.h[: last_obs + 1, normal_idx, 0]
NN = | |
term length back to saved values
self.__send('term length {0}\n'.format(self.term_len))
self.__wait_for_string(self.prompt)
if self.term_width:
# Set term width back to saved values
self.__send('term width {0}\n'.format(self.term_width))
self.__wait_for_string(self.prompt)
elif self.os == "ASA":
self.screen.Send("terminal pager {0}\n".format(self.term_len))
self.prompt = None
self.logger.debug("<END> Deleting learned Prompt.")
self.hostname = None
self.logger.debug("<END> Deleting learned Hostname.")
# Delete the detected OS
self.os = None
self.logger.debug("<END> Deleting Discovered OS.")
# Return SecureCRT Synchronous and IgnoreEscape values back to defaults, if needed.
if self.session_set_sync:
self.screen.Synchronous = False
self.screen.IgnoreEscape = False
self.session_set_sync = False
self.logger.debug("<END> Unset Synchronous and IgnoreEscape")
# Unlock the tab to return control to the user
try:
self.session.Unlock()
except Exception:
pass
def __enter_enable(self, enable_pass, prompt=False):
"""
A function that will attempt to enter enable mode, if we aren't in enable mode when the method is called.
:param enable_pass: The enable password to use for the connected device.
:type enable_pass: str
"""
if self.prompt[-1] == "#":
self.logger.debug("<__enter_enable> Already in enable -- Moving on.")
elif self.prompt[-1] == ">":
if not enable_pass and prompt:
enable_pass = self.script.prompt_window("Please enter enable password.", "Enter Enable PW",
hide_input=True)
if enable_pass:
self.logger.debug("<__enter_enable> Not in enable. Attempting to elevate privilege.")
self.__send("enable\n")
result = self.__wait_for_strings(["% No", "assword", ">"])
if result == 1:
self.logger.debug("<__enter_enable> Enable password not set.")
try:
self.session.Unlock()
except Exception:
pass
raise InteractionError("Unable to enter Enable mode. No password set.")
if result == 2:
self.screen.Send("{0}\n".format(enable_pass))
self.__wait_for_string("#")
self.prompt = self.__get_prompt()
else:
self.logger.debug("<__enter_enable> Failed to detect password prompt after issuing 'enable'.")
try:
self.session.Unlock()
except Exception:
pass
raise InteractionError("Unable to enter Enable mode.")
else:
self.logger.debug("<__enter_enable> Not in enable mode and no enable password given. Cannot proceed.")
raise InteractionError("Not in enable mode and no enable password given. Cannot proceed.")
else:
self.logger.debug("<__enter_enable> Unable to recognize Cisco style prompt.")
raise InteractionError("Unable to recognize Cisco style prompt")
def __get_prompt(self):
"""
Discovers the prompt of the remote device and returns it.
"""
self.logger.debug("<GET PROMPT> Attempting to discover device prompt.")
result = ''
attempts = 0
while result == '' and attempts < 3:
test_string = "\n!&%\b\b\b"
timeout_seconds = 2
self.screen.Send(test_string)
result = self.screen.ReadString("!&%", timeout_seconds)
attempts += 1
self.logger.debug("<CONNECT> Attempt {0}: Prompt result = {1}".format(attempts, result))
prompt = result.strip(u"\r\n\b ")
if prompt == '':
self.logger.debug("<GET PROMPT> Prompt discovery failed. Raising exception.")
raise InteractionError("Unable to discover device prompt")
self.logger.debug("<GET PROMPT> Discovered prompt as '{0}'.".format(prompt))
return prompt
def __get_network_os(self):
"""
Discovers Network OS type so that scripts can make decisions based on the information, such as sending a
different version of a command for a particular OS.
"""
send_cmd = "show version | i Cisco"
raw_version = self.__get_output(send_cmd)
self.logger.debug("<GET OS> show version output: {0}".format(raw_version))
lower_version = raw_version.lower()
if "cisco ios xe" in lower_version:
version = "IOS"
elif "cisco ios software" in lower_version or "cisco internetwork operating system" in lower_version:
version = "IOS"
elif "cisco nexus operating system" in lower_version:
version = "NXOS"
elif "cisco adaptive security appliance" in lower_version:
version = "ASA"
elif "cisco ios xr software" in lower_version:
version = "IOS-XR"
else:
self.logger.debug("<GET OS> Error detecting OS. Raising Exception.")
raise InteractionError("Unknown or Unsupported device OS.")
return version
def __get_term_info(self):
"""
Returns the current terminal length and width, by capturing the output from the relevant commands.
:return: A 2-tuple containing the terminal length and the terminal width
"""
re_num_exp = r'\d+'
re_num = re.compile(re_num_exp)
if self.os == "IOS" or self.os == "NXOS":
result = self.__get_output("show terminal | i Length")
term_info = result.split(',')
re_length = re_num.search(term_info[0])
if re_length:
length = re_length.group(0)
else:
length = None
re_width = re_num.search(term_info[1])
if re_width:
width = re_width.group(0)
else:
width = None
return length, width
elif self.os == "ASA":
pager = self.__get_output("show pager")
re_length = re_num.search(pager)
if re_length:
length = re_length.group(0)
else:
length = None
term_info = self.__get_output("show terminal")
re_width = re_num.search(term_info[1])
if re_width:
width = re_width.group(0)
else:
width = None
return length, width
else:
return None, None
def __get_output(self, command):
"""
A function that issues a command to the current session and returns the output as a string variable.
*** NOTE *** This is a private method because it should only be used when it is guaranteed that the output
will be small (less than 1000 lines), or else SecureCRT can bog down and crash. "get_command_output()" is safer
because it avoids the problem by writing the output to a file before reading it back into a variable.
:param command: Command string that should be sent to the device
:type command: str
Variable holding the result of issuing the above command.
"""
# Send command
self.__send(command.strip() + '\n')
# Capture the output until we get our prompt back and write it to the file
result = self.screen.ReadString(self.prompt)
return result.strip('\r\n')
def write_output_to_file(self, command, filename, prompt_to_create=True):
"""
Send the supplied command to the remote device and writes the output to a file.
This function was written specifically to write output line by line because storing large outputs into a
variable will cause SecureCRT to bog down until it freezes. A good example is a large "show tech" output.
This method can handle any length of output
:param command: The command to be sent to the device
:type command: str
:param filename: A string with the absolute path to the filename to be written.
:type filename: str
"""
self.logger.debug("<WRITE_FILE> Call to write_output_to_file with command: {0}, filename: {0}"
.format(command, filename))
self.script.validate_dir(os.path.dirname(filename), prompt_to_create=prompt_to_create)
self.logger.debug("<WRITE_FILE> Using filename: {0}".format(filename))
# RegEx to match the whitespace and backspace commands after --More-- prompt
exp_more = r' [\b]+[ ]+[\b]+(?P<line>.*)'
re_more = re.compile(exp_more)
# The 3 different types of lines we want to match (MatchIndex) and treat differently
if self.os == "IOS" or self.os == "NXOS":
matches = ["\r\n", '--More--', self.prompt]
elif self.os == "ASA":
matches = ["\r\n", '<--- More --->', self.prompt]
else:
matches = ["\r\n", '--More--', self.prompt]
# Write the output to the specified file
try:
# Need the 'b' in mode 'wb', or else Windows systems add extra blank lines.
with open(filename, 'wb') as newfile:
self.__send(command + "\n")
# Loop to capture every line of the command. If we get CRLF (first entry in our "endings" list), then
# write that line to the file. If we get our prompt back (which won't have CRLF), break the loop b/c we
# found the end of the output.
while True:
nextline = self.screen.ReadString(matches, 30)
# If the match was the 1st index in the endings list -> \r\n
if self.screen.MatchIndex == 1:
# Strip newlines from front and back of line.
nextline = nextline.strip('\r\n')
# If there is something left, write it.
if nextline != "":
# Check for backspace and spaces after --More-- prompt and strip them out if needed.
regex = re_more.match(nextline)
if regex:
nextline = regex.group('line')
# Strip line endings from line. Also re-encode line as ASCII
# and ignore the character if it can't be done (rare error on
# Nexus)
newfile.write(nextline.strip('\r\n').encode('ascii', 'ignore') + "\r\n")
self.logger.debug("<WRITE_FILE> Writing Line: {0}".format(nextline.strip('\r\n')
.encode('ascii', 'ignore')))
elif self.screen.MatchIndex == 2:
# If we get a --More-- send a space character
self.screen.Send(" ")
elif self.screen.MatchIndex == 3:
# We got our prompt, so break the loop
break
else:
raise InteractionError("Timeout trying to capture output")
except IOError, err:
error_str = "IO Error for:\n{0}\n\n{1}".format(filename, err)
self.script.message_box(error_str, "IO Error", ICON_STOP)
def get_command_output(self, command):
"""
Captures the output from the provided command and saves the results in a variable.
** NOTE ** Assigning the output directly to a variable causes problems with SecureCRT for long outputs. It
will gradually get slower and slower until the program freezes and crashes. The workaround is to
save the output directly to a file (line by line), and then read it back into a variable. This is the
procedure that this method uses.
Keyword Arguments:
:param command: Command string that should be sent to the device
:type command: str
:return: The result from issuing the above command.
:rtype: str
"""
self.logger.debug("<GET OUTPUT> Running get_command_output with input '{0}'".format(command))
# Create a temporary filename
temp_filename = self.create_output_filename("{0}-temp".format(command))
self.logger.debug("<GET OUTPUT> | |
"""
Lagrangian Multiphase Plume
===========================
This module contains the numerical solution for the `bent_plume_model` module.
Some of the general tools for handling the multiphase components are
contained in `dispersed_phases`.
"""
# <NAME>, November 2014, Texas A&M University <<EMAIL>>.
from __future__ import (absolute_import, division, print_function)
from tamoc import seawater
from tamoc import dispersed_phases
import numpy as np
from scipy import integrate
from copy import deepcopy
def derivs(t, q, q0_local, q1_local, profile, p, particles):
"""
Calculate the derivatives for the system of ODEs for a Lagrangian plume
Calculates the right-hand-side of the system of ODEs for a Lagrangian
plume integral model. The continuous phase model matches very closely
the model of Lee and Cheung (1990), with adaptations for the shear
entrainment following Jirka (2004). Multiphase extensions following the
strategy in Socolofsky et al. (2008) with adaptation to Lagrangian plume
models by Johansen (2000, 2003) and Yapa and Zheng (1997).
Parameters
----------
t : float
Current value for the independent variable (time in s).
q : ndarray
Current value for the plume state space vector.
q0_local : `bent_plume_model.LagElement`
Object containing the numerical solution at the previous time step
q1_local : `bent_plume_model.LagElement`
Object containing the numerical solution at the current time step
profile : `ambient.Profile` object
The ambient CTD object used by the simulation.
p : `ModelParams` object
Object containing the fixed model parameters for the bent
plume model.
particles : list of `Particle` objects
List of `bent_plume_model.Particle` objects containing the dispersed
phase local conditions and behavior.
Returns
-------
yp : ndarray
A vector of the derivatives of the plume state space.
See Also
--------
calculate
"""
# Set up the output from the function to have the right size and shape
qp = np.zeros(q.shape)
# Update the local Lagrangian element properties
q1_local.update(t, q, profile, p, particles)
# Get the entrainment flux
md = entrainment(q0_local, q1_local, p)
# Get the dispersed phase tracking variables
(fe, up, dtp_dt) = track_particles(q0_local, q1_local, md, particles)
# Conservation of Mass
qp[0] = md
# Conservation of salt and heat
qp[1] = md * q1_local.Sa
qp[2] = md * seawater.cp() * q1_local.Ta
# Conservation of continuous phase momentum. Note that z is positive
# down (depth).
qp[3] = md * q1_local.ua
qp[4] = md * q1_local.va
qp[5] = - p.g / (p.gamma * p.rho_r) * (q1_local.Fb + q1_local.M *
(q1_local.rho_a - q1_local.rho)) + md * q1_local.wa
# Constant h/V thickeness to velocity ratio
qp[6] = 0.
# Lagrangian plume element advection (x, y, z) and s along the centerline
# trajectory
qp[7] = q1_local.u
qp[8] = q1_local.v
qp[9] = q1_local.w
qp[10] = q1_local.V
# Conservation equations for each dispersed phase
idx = 11
# Track the mass dissolving into the continuous phase per unit time
dm = np.zeros(q1_local.nchems)
# Compute mass and heat transfer for each particle
for i in range(len(particles)):
# Only simulate particles inside the plume
if particles[i].integrate:
# Dissolution and Biodegradation
if particles[i].particle.issoluble:
# Dissolution mass transfer for each particle component
dm_pc = - particles[i].A * particles[i].nbe * \
particles[i].beta * (particles[i].Cs -
q1_local.c_chems) * dtp_dt[i]
# Update continuous phase temperature with heat of
# solution
qp[2] += np.sum(dm_pc * \
particles[i].particle.neg_dH_solR \
* p.Ru / particles[i].particle.M)
# Biodegradation for for each particle component
dm_pb = -particles[i].k_bio * particles[i].m * \
particles[i].nbe * dtp_dt[i]
# Conservation of mass for dissolution and biodegradation
qp[idx:idx+q1_local.nchems] = dm_pc + dm_pb
# Update position in state space
idx += q1_local.nchems
else:
# No dissolution
dm_pc = np.zeros(q1_local.nchems)
# Biodegradation for insoluble particles
dm_pb = -particles[i].k_bio * particles[i].m * \
particles[i].nbe * dtp_dt[i]
qp[idx] = dm_pb
idx += 1
# Update the total mass dissolved
dm += dm_pc
# Heat transfer between the particle and the ambient
qp[idx] = - particles[i].A * particles[i].nbe * \
particles[i].rho_p * particles[i].cp * \
particles[i].beta_T * (particles[i].T - \
q1_local.T) * dtp_dt[i]
# Heat loss due to mass loss
qp[idx] += np.sum(dm_pc + dm_pb) * particles[i].cp * \
particles[i].T
# Take the heat leaving the particle and put it in the continuous
# phase fluid
qp[2] -= qp[idx]
idx += 1
# Particle age
qp[idx] = dtp_dt[i]
idx += 1
# Follow the particles in the local coordinate system (l,n,m)
# relative to the plume centerline
qp[idx] = 0.
idx += 1
qp[idx] = (up[i,1] - fe * q[idx]) * dtp_dt[i]
idx += 1
qp[idx] = (up[i,2] - fe * q[idx]) * dtp_dt[i]
idx += 1
else:
idx += particles[i].particle.nc + 5
# Conservation equations for the dissolved constituents in the plume
qp[idx:idx+q1_local.nchems] = md / q1_local.rho_a * q1_local.ca_chems \
- dm - q1_local.k_bio * q1_local.cpe
idx += q1_local.nchems
# Conservation equation for the passive tracers in the plume
qp[idx:] = md / q1_local.rho_a * q1_local.ca_tracers
# Return the slopes
return qp
def calculate(t0, q0, q0_local, profile, p, particles, derivs, dt_max,
sd_max):
"""
Integrate an the Lagrangian plume solution
Compute the solution tracking along the centerline of the plume until
the plume reaches the water surface, reaches a neutral buoyancy level
within the intrusion layer, or propagates a given maximum number of
nozzle diameters downstream.
Parameters
----------
t0 : float
Initial time (s)
q0 : ndarray
Initial values of the state space vector
q0_local : `bent_plume_model.LagElement`
Object containing the numerical solution at the initial condition
profile : `ambient.Profile` object
The ambient CTD object used by the simulation.
p : `ModelParams` object
Object containing the fixed model parameters for the bent
plume model.
particles : list of `Particle` objects
List of `bent_plume_model.Particle` objects containing the dispersed
phase local conditions and behavior.
derivs : function handle
Pointer to the function where the derivatives of the ODE system are
stored. Should be `lmp.derivs`.
dt_max : float
Maximum step size to use in the simulation (s). The ODE solver
in `calculate` is set up with adaptive step size integration, so
this value determines the largest step size in the output data, but
not the numerical stability of the calculation.
sd_max : float
Maximum number of nozzle diameters to compute along the plume
centerline (s/D)_max. This is the only stop criteria that is user-
selectable.
Returns
-------
t : ndarray
Vector of times when the plume solution is obtained (s).
y : ndarray
Matrix of the plume state space solutions. Each row corresponds to
a time in `t`.
See Also
--------
derivs, bent_plume_mode.Model
"""
# Create an integrator object: use "vode" with "backward
# differentitaion formula" for stiff ODEs
r = integrate.ode(derivs).set_integrator('vode', method='bdf', atol=1.e-6,
rtol=1e-3, order=5, max_step=dt_max)
# Push the initial state space to the integrator object
r.set_initial_value(q0, t0)
# Make a copy of the q1_local object needed to evaluate the entrainment
q1_local = deepcopy(q0_local)
q0_hold = deepcopy(q1_local)
# Create vectors (using the list data type) to store the solution
t = [t0]
q = [q0]
# Integrate a finite number of time steps
k = 0
psteps = 30.
stop = False
neutral_counter = 0
top_counter = 0
while r.successful() and not stop:
# Print progress to the screen
if np.remainder(np.float(k), psteps) == 0.:
print(' Distance: %g (m), time: %g (s), k: %d' % \
(q[-1][10], t[-1], k))
# Perform one step of the integration
r.set_f_params(q0_local, q1_local, profile, p, particles)
r.integrate(t[-1] + dt_max, step=True)
q1_local.update(r.t, r.y, profile, p, particles)
# Correct the temperature
r = correct_temperature(r, particles)
# Remove particle solution for particles outside the plume
r = correct_particle_tracking(r, particles)
# Store the results
t.append(r.t)
q.append(r.y)
# Update the Lagrangian elements for the next time step
q0_local = q0_hold
q0_hold = deepcopy(q1_local)
# Check if the | |
<filename>osmchadjango/supervise/tests/test_views.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import xml.etree.ElementTree as ET
from django.urls import reverse
from django.contrib.gis.geos import MultiPolygon, Polygon, Point, LineString
from rest_framework.test import APITestCase
from social_django.models import UserSocialAuth
from ...changeset.tests.modelfactories import (
ChangesetFactory, HarmfulChangesetFactory, GoodChangesetFactory,
SuspicionReasonsFactory, TagFactory, UserWhitelistFactory
)
from ...users.models import User
from ..models import AreaOfInterest, BlacklistedUser
class TestAoIListView(APITestCase):
def setUp(self):
self.m_polygon = MultiPolygon(
Polygon(((0, 0), (0, 1), (1, 1), (0, 0))),
Polygon(((1, 1), (1, 2), (2, 2), (1, 1)))
)
self.m_polygon_2 = MultiPolygon(
Polygon(((30, 30), (30, 31), (31, 31), (30, 30))),
Polygon(((31, 31), (31, 32), (32, 32), (31, 31)))
)
self.user = User.objects.create_user(
username='test_user',
email='<EMAIL>',
password='password'
)
UserSocialAuth.objects.create(
user=self.user,
provider='openstreetmap',
uid='123123',
)
self.user_2 = User.objects.create_user(
username='test',
email='<EMAIL>',
password='password'
)
UserSocialAuth.objects.create(
user=self.user_2,
provider='openstreetmap',
uid='42344',
)
self.area = AreaOfInterest.objects.create(
name='Best place in the world',
user=self.user,
geometry=self.m_polygon,
filters={
'editor': 'Potlatch 2',
'harmful': 'False',
'geometry': self.m_polygon.geojson
},
)
self.area_2 = AreaOfInterest.objects.create(
name='Another AOI',
user=self.user,
filters={'geometry': self.m_polygon_2.geojson},
geometry=self.m_polygon_2
)
self.area_3 = AreaOfInterest.objects.create(
user=self.user_2,
name='Harmful edits',
filters={'harmful': 'False'},
)
self.url = reverse('supervise:aoi-list-create')
def test_list_view_unauthenticated(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 401)
def test_list_view(self):
self.client.login(username=self.user.username, password='password')
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data.get('results').get('features')), 2)
def test_ordering(self):
self.client.login(username=self.user.username, password='password')
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
# test default ordering is -date
self.assertEqual(
response.data.get('results').get('features')[0]['properties']['name'],
'Another AOI'
)
# test ordering by date
response = self.client.get(self.url, {'order_by': 'date'})
self.assertEqual(
response.data.get('results').get('features')[0]['properties']['name'],
'Best place in the world'
)
# test ordering by name
response = self.client.get(self.url, {'order_by': '-name'})
self.assertEqual(
response.data.get('results').get('features')[0]['properties']['name'],
'Best place in the world'
)
def test_list_view_with_user_2(self):
self.client.login(username=self.user_2.username, password='password')
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data.get('results').get('features')), 1)
self.assertEqual(
response.data.get('results')['features'][0]['properties']['name'],
'Harmful edits'
)
self.assertEqual(
response.data.get('results')['features'][0]['properties']['filters'],
{'harmful': 'False'}
)
class TestAoICreateView(APITestCase):
def setUp(self):
self.polygon = Polygon([[2, 0], [5, 0], [5, 2], [2, 2], [2, 0]])
self.user = User.objects.create_user(
username='test_user',
email='<EMAIL>',
password='password'
)
UserSocialAuth.objects.create(
user=self.user,
provider='openstreetmap',
uid='123123',
)
self.url = reverse('supervise:aoi-list-create')
self.data = {
'name': '<NAME>',
'filters': {
'is_suspect': 'True',
'geometry': self.polygon.geojson
},
}
self.data_bbox = {
'name': '<NAME>',
'filters': {
'is_suspect': 'True',
'in_bbox': '2,0,5,2'
},
}
self.without_geo_aoi = {
'name': 'Unchecked suspect changesets',
'filters': {
'is_suspect': 'True',
'checked': 'False'
},
}
def test_create_AOI_unauthenticated(self):
response = self.client.post(self.url, self.data)
self.assertEqual(response.status_code, 401)
self.assertEqual(AreaOfInterest.objects.count(), 0)
def test_create_AOI(self):
self.client.login(username=self.user.username, password='password')
response = self.client.post(self.url, self.data)
self.assertEqual(response.status_code, 201)
self.assertEqual(AreaOfInterest.objects.count(), 1)
aoi = AreaOfInterest.objects.get(name='Golfo da Guiné')
self.assertEqual(aoi.user, self.user)
self.assertEqual(aoi.filters, self.data.get('filters'))
self.assertIsInstance(aoi.geometry, Polygon)
self.assertTrue(
aoi.geometry.intersects(
Polygon([[2, 0], [5, 0], [5, 2], [2, 2], [2, 0]])
)
)
def test_create_without_geometry_and_bbox(self):
self.client.login(username=self.user.username, password='password')
response = self.client.post(self.url, self.without_geo_aoi)
self.assertEqual(response.status_code, 201)
self.assertEqual(AreaOfInterest.objects.count(), 1)
aoi = AreaOfInterest.objects.get(name='Unchecked suspect changesets')
self.assertEqual(aoi.user, self.user)
self.assertEqual(aoi.filters, self.without_geo_aoi.get('filters'))
def test_create_with_bbox(self):
self.client.login(username=self.user.username, password='password')
response = self.client.post(self.url, self.data_bbox)
self.assertEqual(response.status_code, 201)
self.assertEqual(AreaOfInterest.objects.count(), 1)
aoi = AreaOfInterest.objects.get(name='Golfo da Guiné')
self.assertEqual(aoi.user, self.user)
self.assertEqual(aoi.filters, self.data_bbox.get('filters'))
self.assertIsInstance(aoi.geometry, Polygon)
self.assertTrue(
aoi.geometry.intersects(
Polygon([[2, 0], [5, 0], [5, 2], [2, 2], [2, 0]])
)
)
def test_validation(self):
self.client.login(username=self.user.username, password='password')
response = self.client.post(self.url, {'name': 'Empty AoI'})
self.assertEqual(response.status_code, 400)
self.assertEqual(AreaOfInterest.objects.count(), 0)
# test validation of unique name of AoI for each user
response = self.client.post(self.url, self.data)
self.assertEqual(response.status_code, 201)
response = self.client.post(self.url, self.data)
self.assertEqual(response.status_code, 400)
def test_auto_user_field(self):
user_2 = User.objects.create_user(
username='test',
email='<EMAIL>',
password='password'
)
UserSocialAuth.objects.create(
user=user_2,
provider='openstreetmap',
uid='4444',
)
self.client.login(username=self.user.username, password='password')
response = self.client.post(self.url, self.data)
self.assertEqual(response.status_code, 201)
aoi = AreaOfInterest.objects.get(name='Golfo da Guiné')
self.assertEqual(aoi.user, self.user)
class TestAoIDetailAPIViews(APITestCase):
def setUp(self):
self.m_polygon = MultiPolygon(
Polygon(((0, 0), (0, 1), (1, 1), (0, 0))),
Polygon(((1, 1), (1, 2), (2, 2), (1, 1)))
)
self.user = User.objects.create_user(
username='test_user',
email='<EMAIL>',
password='password'
)
UserSocialAuth.objects.create(
user=self.user,
provider='openstreetmap',
uid='123123',
)
self.aoi = AreaOfInterest.objects.create(
name='Best place in the world',
user=self.user,
geometry=self.m_polygon,
filters={
'editor': 'Potlatch 2',
'harmful': 'False',
'users': 'test',
'uids': '234,43',
'checked_by': 'qa_user',
'geometry': self.m_polygon.geojson
},
)
self.m_polygon_2 = MultiPolygon(
Polygon([[2, 0], [5, 0], [5, 2], [2, 2], [2, 0]])
)
self.data = {
'filters': {
'is_suspect': 'True',
'geometry': self.m_polygon_2.geojson,
},
'name': '<NAME>'
}
def test_unauthenticated(self):
response = self.client.get(
reverse('supervise:aoi-detail', args=[self.aoi.pk])
)
self.assertEqual(response.status_code, 401)
def test_retrieve_detail_authenticated(self):
self.client.login(username=self.user.username, password='password')
response = self.client.get(
reverse('supervise:aoi-detail', args=[self.aoi.pk])
)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.data['properties']['name'],
'Best place in the world'
)
self.assertEqual(
response.data['properties']['filters'],
{
'editor': 'Potlatch 2',
'harmful': 'False',
'users': 'test',
'uids': '234,43',
'checked_by': 'qa_user',
'geometry': self.m_polygon.geojson
}
)
self.assertEqual(
response.data['geometry']['type'],
'MultiPolygon'
)
self.assertIn(
'id',
response.data.keys()
)
self.assertNotIn(
'user',
response.data.keys()
)
self.assertEqual(
response.data['properties']['changesets_url'],
reverse('supervise:aoi-list-changesets', args=[self.aoi.pk])
)
def test_update_aoi_unauthenticated(self):
"""Unauthenticated users can not update AoI"""
response = self.client.put(
reverse('supervise:aoi-detail', args=[self.aoi.pk]),
self.data
)
self.assertEqual(response.status_code, 401)
self.aoi.refresh_from_db()
self.assertEqual(self.aoi.name, 'Best place in the world')
response = self.client.patch(
reverse('supervise:aoi-detail', args=[self.aoi.pk]),
self.data
)
self.assertEqual(response.status_code, 401)
self.aoi.refresh_from_db()
self.assertEqual(self.aoi.name, 'Best place in the world')
def test_delete_aoi_unauthenticated(self):
"""Unauthenticated users can not delete AoI"""
response = self.client.delete(
reverse('supervise:aoi-detail', args=[self.aoi.pk])
)
self.assertEqual(response.status_code, 401)
self.assertEqual(AreaOfInterest.objects.count(), 1)
def test_update_aoi_of_another_user(self):
"""A user can not update AoI of another user."""
user = User.objects.create_user(
username='test_2',
email='<EMAIL>',
password='password'
)
self.client.login(username=user.username, password='password')
response = self.client.put(
reverse('supervise:aoi-detail', args=[self.aoi.pk]),
self.data
)
self.assertEqual(response.status_code, 403)
self.aoi.refresh_from_db()
self.assertEqual(self.aoi.name, 'Best place in the world')
response = self.client.patch(
reverse('supervise:aoi-detail', args=[self.aoi.pk]),
self.data
)
self.assertEqual(response.status_code, 403)
self.aoi.refresh_from_db()
self.assertEqual(self.aoi.name, 'Best place in the world')
def test_delete_aoi_of_another_user(self):
"""A user can not delete AoI of another user."""
user = User.objects.create_user(
username='test_2',
email='<EMAIL>',
password='password'
)
self.client.login(username=user.username, password='password')
response = self.client.delete(
reverse('supervise:aoi-detail', args=[self.aoi.pk])
)
self.assertEqual(response.status_code, 403)
self.assertEqual(AreaOfInterest.objects.count(), 1)
def test_update_with_aoi_owner_user(self):
"""User can update his/her AoI"""
self.client.login(username=self.user.username, password='password')
response = self.client.put(
reverse('supervise:aoi-detail', args=[self.aoi.pk]),
self.data
)
self.assertEqual(response.status_code, 200)
self.aoi.refresh_from_db()
self.assertEqual(self.aoi.name, '<NAME>')
self.assertEqual(self.aoi.filters, self.data.get('filters'))
self.assertTrue(
self.aoi.geometry.intersects(
Polygon(((4, 0), (5, 0), (5, 1), (4, 0)))
)
)
def test_put_update_with_bbox(self):
"""'in_bbox' field must populate the geometry field with a Polygon"""
data = {
'filters': {
'is_suspect': 'True',
'in_bbox': '4,0,5,1'
},
'name': '<NAME>'
}
self.client.login(username=self.user.username, password='password')
response = self.client.put(
reverse('supervise:aoi-detail', args=[self.aoi.pk]),
data
)
self.assertEqual(response.status_code, 200)
self.aoi.refresh_from_db()
self.assertEqual(self.aoi.name, '<NAME>')
self.assertEqual(self.aoi.filters, data.get('filters'))
self.assertTrue(
self.aoi.geometry.intersects(
Polygon(((4, 0), (5, 0), (5, 1), (4, 0)))
)
)
self.assertIsInstance(self.aoi.geometry, Polygon)
def test_put_empty_geometry(self):
"""If the AoI receives a filter without geometry and in_bbox information,
the geometry field will be updated to None."""
data = {
'filters': {
'is_suspect': 'True',
},
'name': '<NAME>'
}
self.client.login(username=self.user.username, password='password')
response = self.client.put(
reverse('supervise:aoi-detail', args=[self.aoi.pk]),
data
)
self.assertEqual(response.status_code, 200)
self.aoi.refresh_from_db()
self.assertEqual(self.aoi.name, '<NAME>')
self.assertEqual(self.aoi.filters, data.get('filters'))
self.assertIsNone(self.aoi.geometry)
def test_patch_empty_geometry(self):
"""If the AoI receives a filter without geometry and in_bbox information,
the geometry field will be updated to None."""
data = {
'filters': {
'is_suspect': 'True',
},
'name': '<NAME>'
}
self.client.login(username=self.user.username, password='password')
response = self.client.patch(
reverse('supervise:aoi-detail', args=[self.aoi.pk]),
data
)
self.assertEqual(response.status_code, 200)
self.aoi.refresh_from_db()
self.assertEqual(self.aoi.name, '<NAME>')
self.assertEqual(self.aoi.filters, data.get('filters'))
self.assertIsNone(self.aoi.geometry)
def test_patch_update_with_bbox(self):
"""'in_bbox' field must populate the geometry field with a Polygon"""
data = {
'filters': {
'is_suspect': 'True',
'in_bbox': '4,0,5,1'
}
}
self.client.login(username=self.user.username, password='password')
response = self.client.patch(
reverse('supervise:aoi-detail', args=[self.aoi.pk]),
data
)
self.assertEqual(response.status_code, 200)
self.aoi.refresh_from_db()
self.assertEqual(self.aoi.filters, data.get('filters'))
self.assertIsInstance(self.aoi.geometry, Polygon)
self.assertTrue(
self.aoi.geometry.intersects(
Polygon(((4, 0), (5, 0), (5, 1), (4, 0)))
)
)
def test_update_with_line_and_point(self):
"""The geometry field must receive any geometry type."""
point = Point((0.5, 0.5))
data = {
'filters': {
'geometry': point.geojson,
},
'name': '<NAME>'
}
self.client.login(username=self.user.username, password='password')
response = self.client.put(
reverse('supervise:aoi-detail', args=[self.aoi.pk]),
data
)
self.assertEqual(response.status_code, 200)
self.aoi.refresh_from_db()
self.assertIsInstance(self.aoi.geometry, Point)
line = LineString(((0.5, 0.5), (1, 1)))
data = {
'filters': {
'geometry': line.geojson,
},
'name': '<NAME>'
}
response = self.client.put(
reverse('supervise:aoi-detail', args=[self.aoi.pk]),
data
)
self.assertEqual(response.status_code, 200)
self.aoi.refresh_from_db()
self.assertIsInstance(self.aoi.geometry, LineString)
def test_validation(self):
self.client.login(username=self.user.username, password='password')
response = self.client.put(
reverse('supervise:aoi-detail', args=[self.aoi.pk]),
self.data
)
self.assertEqual(response.status_code, 200)
# validate if the user are not allowed to let the filters and geometry fields empty
response = self.client.put(
reverse('supervise:aoi-detail', args=[self.aoi.pk]),
{'name': '<NAME>'}
)
self.assertEqual(response.status_code, 400)
self.aoi.refresh_from_db()
self.assertIsNotNone(self.aoi.filters)
self.assertIsNotNone(self.aoi.geometry)
def test_delete_with_aoi_owner_user(self):
self.client.login(username=self.user.username, password='password')
response = self.client.delete(
reverse('supervise:aoi-detail', args=[self.aoi.pk])
)
self.assertEqual(response.status_code, 204)
self.assertEqual(AreaOfInterest.objects.count(), 0)
class TestAoIChangesetListView(APITestCase):
def setUp(self):
self.m_polygon = MultiPolygon(
Polygon(((0, 0), (0, 1), (1, 1), (0, 0))),
Polygon(((1, 1), (1, 2), (2, 2), (1, 1)))
)
self.user = User.objects.create_user(
username='test_user',
email='<EMAIL>',
password='password'
)
UserSocialAuth.objects.create(
user=self.user,
provider='openstreetmap',
uid='123123',
)
self.aoi = AreaOfInterest.objects.create(
name='Best place in the world',
user=self.user,
geometry=self.m_polygon,
filters={
'editor': 'Potlatch 2',
'harmful': 'False',
'geometry': self.m_polygon.geojson
},
)
def test_authenticated_aoi_list_changesets_view(self):
ChangesetFactory(bbox=Polygon(((10, 10), (10, 11), (11, 11), (10, 10))))
ChangesetFactory(
editor='JOSM 1.5',
harmful=False,
bbox=Polygon(((0, 0), (0, 0.5), (0.7, 0.5), (0, 0))),
)
ChangesetFactory.create_batch(
51,
harmful=False,
bbox=Polygon(((0, 0), (0, 0.5), (0.7, 0.5), (0, 0))),
)
self.client.login(username=self.user.username, password='password')
response = self.client.get(
reverse('supervise:aoi-list-changesets', args=[self.aoi.pk])
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['count'], 51)
self.assertEqual(len(response.data['features']), 50)
self.assertIn('features', response.data.keys())
self.assertIn('geometry', response.data['features'][0].keys())
self.assertIn('properties', response.data['features'][0].keys())
self.assertIn('check_user', response.data['features'][0]['properties'])
self.assertIn('user', response.data['features'][0]['properties'])
self.assertIn('uid', response.data['features'][0]['properties'])
def test_unauthenticated_aoi_list_changesets_view(self):
response = self.client.get(
reverse('supervise:aoi-list-changesets', args=[self.aoi.pk])
)
self.assertEqual(response.status_code, 401)
def test_aoi_with_in_bbox_filter(self):
aoi_with_in_bbox = AreaOfInterest.objects.create(
name='Another place in the | |
'''
Created on Jun 9, 2014
@author: dbhage
English Celex Test Class
'''
import unittest
from celex.phonology.english_celex import EnglishCelex
from test import TEST_CELEX_PATH
class EnglishCelexTest(unittest.TestCase):
def setUp(self):
'''
Set values for tests.
'''
self.expected_prefix = 'e'
self.expected_language = "english"
self.expected_word_index = 1
self.expected_phon_index = 6
self.invalid_path = "invalid_path/"
self.cls = EnglishCelex
self.expected_dict_v0 = {'abilities': ["@-'bI-l@-tIz"],
'abdications': ['"{b-dI-\'k1-SHz'],
'abbreviation': ['@-"bri-vI-\'1-SH'],
'abatement': ["@-'b1t-m@nt"],
'abbreviate': ["@-'bri-vI-1t"],
'abettor': ["@-'bE-t@R"],
'abducts': ["@b-'dVkts"],
'abbes': ['{-b1z'],
'abhorred': ["@b-'h$d"],
'abbey': ['{-bI'],
'abhorrent': ["@b-'hQ-r@nt"],
'abiding': ["@-'b2-dIN"],
'abeyant': ["@-'b1-@nt"],
'abbe': ['{-b1'],
'abashed': ["@-'b{St"],
'abandons': ["@-'b{n-d@nz"],
'abashes': ["@-'b{-SIz"],
'abduction': ["{b-'dVk-SH"],
'abacus': ['{-b@-k@s'],
'abhorrence': ["@b-'hQ-r@ns"],
'abed': ["@-'bEd"],
'abaft': ["@-'b#ft"],
'abattoirs': ['{-b@-tw#z'],
'abbreviated': ["@-'bri-vI-1-tId"],
'abetting': ["@-'bE-tIN"],
'abbreviates': ["@-'bri-vI-1ts"],
'ability': ["@-'bI-l@-tI"],
'abdication': ['"{b-dI-\'k1-SH'],
'abc': ['"1-bi-\'si'],
'abetted': ["@-'bE-tId"],
'abide': ["@-'b2d"],
'abided by': ["@-'b2-dId-b2"],
'abides': ["@-'b2dz"],
'abhorring': ["@b-'h$-rIN"],
'abbots': ['{-b@ts'],
'aberration': ['"{-b@-\'r1-SH'],
'abdicated': ['{b-dI-k1-tId'],
'abbess': ['{-bEs'],
'abdicates': ['{b-dI-k1ts'],
'abbreviating': ["@-'bri-vI-1-tIN"],
'abasement': ["@-'b1s-m@nt"],
'abets': ["@-'bEts"],
'abet': ["@-'bEt"],
'abbesses': ['{-bE-sIz'],
'abeyance': ["@-'b1-@ns"],
'abattoir': ['{-b@-tw#R'],
'abash': ["@-'b{S"],
'aberrant': ["{-'bE-r@nt"],
'abase': ["@-'b1s"],
'aback': ["@-'b{k"],
'abaci': ['{-b@-s2'],
'abdominal': ["{b-'dQ-mI-nP"],
'abbreviations': ['@-"bri-vI-\'1-SHz'],
'abate': ["@-'b1t"],
'abbeys': ['{-bIz'],
'abating': ["@-'b1-tIN"],
'abiding by': ["@-'b2-dIN-b2"],
'abductions': ["{b-'dVk-SHz"],
'abates': ["@-'b1ts"],
'abated': ["@-'b1-tId"],
'abduct': ["@b-'dVkt"],
"abc's": ['"1-bi-\'siz'],
'abasing': ["@-'b1-sIN"],
'abandoned': ["@-'b{n-d@nd"],
'abeam': ["@-'bim"],
'abdicating': ['{b-dI-k1-tIN'],
'abacuses': ['{-b@-k@-sIz'],
'aas': ['"1-\'1z'],
'abandon': ["@-'b{n-d@n"],
'abducted': ["@b-'dVk-tId"],
'aa': ['"1-\'1'],
'abandonment': ["@-'b{n-d@n-m@nt"],
'abashing': ["@-'b{-SIN"],
'abducting': ["@b-'dVk-tIN"],
'abbot': ['{-b@t'],
'abcs': ['"1-bi-\'siz'],
'abhors': ["@b-'h$z"],
'abdomens': ['{b-d@-mEnz'],
'abased': ["@-'b1st"],
'abide by': ["@-'b2d-b2"],
'abases': ["@-'b1-sIz"],
'abidance by': ['@-\'b2-d@ns-"b2'],
'abdicate': ['{b-dI-k1t'],
'abandoning': ["@-'b{n-d@-nIN"],
'abdomen': ['{b-d@-mEn'],
'abides by': ["@-'b2dz-b2"],
'abided': ["@-'b2-dId"],
'abettors': ["@-'bE-t@z"],
'aberrations': ['"{-b@-\'r1-SHz'],
'a': ['1'],
'abhor': ["@b-'h$R"]}
self.expected_dict_v1 = {'abilities': ['@', "'bI", 'l@', 'tIz'],
'abdications': ['"{b', 'dI', "'k1", 'SHz'],
'abbreviation': ['@', '"bri', 'vI', "'1", 'SH'],
'abatement': ['@', "'b1t", 'm@nt'],
'abbreviate': ['@', "'bri", 'vI', '1t'],
'abettor': ['@', "'bE", 't@R'],
'abducts': ['@b', "'dVkts"],
'abbes': ['{', 'b1z'],
'abhorred': ['@b', "'h$d"],
'abbey': ['{', 'bI'],
'abhorrent': ['@b', "'hQ", 'r@nt'],
'abiding': ['@', "'b2", 'dIN'],
'abeyant': ['@', "'b1", '@nt'],
'abbe': ['{', 'b1'],
'abashed': ['@', "'b{St"],
'abandons': ['@', "'b{n", 'd@nz'],
'abashes': ['@', "'b{", 'SIz'],
'abduction': ['{b', "'dVk", 'SH'],
'abacus': ['{', 'b@', 'k@s'],
'abhorrence': ['@b', "'hQ", 'r@ns'],
'abed': ['@', "'bEd"],
'abaft': ['@', "'b#ft"],
'abattoirs': ['{', 'b@', 'tw#z'],
'abbreviated': ['@', "'bri", 'vI', '1', 'tId'],
'abetting': ['@', "'bE", 'tIN'],
'abbreviates': ['@', "'bri", 'vI', '1ts'],
'ability': ['@', "'bI", 'l@', 'tI'],
'abdication': ['"{b', 'dI', "'k1", 'SH'],
'abc': ['"1', 'bi', "'si"],
'abetted': ['@', "'bE", 'tId'],
'abide': ['@', "'b2d"],
'abided by': ['@', "'b2", 'dId', 'b2'],
'abides': ['@', "'b2dz"],
'abhorring': ['@b', "'h$", 'rIN'],
'abbots': ['{', 'b@ts'],
'aberration': ['"{', 'b@', "'r1", 'SH'],
'abdicated': ['{b', 'dI', 'k1', 'tId'],
'abbess': ['{', 'bEs'],
'abdicates': ['{b', 'dI', 'k1ts'],
'abbreviating': ['@', "'bri", 'vI', '1', 'tIN'],
'abasement': ['@', "'b1s", 'm@nt'],
'abets': ['@', "'bEts"],
'abet': ['@', "'bEt"],
'abbesses': ['{', 'bE', 'sIz'],
'abeyance': ['@', "'b1", '@ns'],
'abattoir': ['{', 'b@', 'tw#R'],
'abash': ['@', "'b{S"],
'aberrant': ['{', "'bE", 'r@nt'],
'abase': ['@', "'b1s"],
'aback': ['@', "'b{k"],
'abaci': ['{', 'b@', 's2'],
'abdominal': ['{b', "'dQ", 'mI', 'nP'],
'abbreviations': ['@', '"bri', 'vI', "'1", 'SHz'],
'abate': ['@', "'b1t"],
'abbeys': ['{', 'bIz'],
'abating': ['@', "'b1", 'tIN'],
'abiding by': ['@', "'b2", 'dIN', 'b2'],
'abductions': ['{b', "'dVk", 'SHz'],
'abates': ['@', "'b1ts"],
'abated': ['@', "'b1", 'tId'],
'abduct': ['@b', "'dVkt"],
"abc's": ['"1', 'bi', "'siz"],
'abasing': ['@', "'b1", 'sIN'],
'abandoned': ['@', "'b{n", 'd@nd'],
'abeam': ['@', "'bim"],
'abdicating': ['{b', 'dI', 'k1', 'tIN'],
'abacuses': ['{', 'b@', 'k@', 'sIz'],
'aas': ['"1', "'1z"],
'abandon': ['@', "'b{n", 'd@n'],
'abducted': ['@b', "'dVk", 'tId'],
'aa': ['"1', "'1"],
'abandonment': ['@', "'b{n", 'd@n', 'm@nt'],
'abashing': ['@', "'b{", 'SIN'],
'abducting': ['@b', "'dVk", 'tIN'],
'abbot': ['{', 'b@t'],
'abcs': ['"1', 'bi', "'siz"],
'abhors': ['@b', "'h$z"],
'abdomens': ['{b', 'd@', 'mEnz'],
'abased': ['@', "'b1st"],
'abide by': ['@', "'b2d", 'b2'],
'abases': ['@', "'b1", 'sIz'],
'abidance by': ['@', "'b2", 'd@ns', '"b2'],
'abdicate': ['{b', 'dI', 'k1t'],
'abandoning': ['@', "'b{n", 'd@', 'nIN'],
'abdomen': ['{b', 'd@', 'mEn'],
'abides by': ['@', "'b2dz", 'b2'],
'abided': ['@', "'b2", 'dId'],
'abettors': ['@', "'bE", 't@z'],
'aberrations': ['"{', 'b@', "'r1", 'SHz'],
'a': ['1'],
'abhor': ['@b', "'h$R"]}
self.expected_dict_v2 = {'abilities': ['@', 'b', 'I', 'l', '@', 't', 'I', 'z'],
'abdications': ['{', 'b', 'd', 'I', 'k', '1', 'S', 'H', 'z'],
'abbreviation': ['@', 'b', 'r', 'i', 'v', 'I', '1', 'S', 'H'],
'abatement': ['@', 'b', '1', 't', 'm', '@', 'n', 't'],
'abbreviate': ['@', 'b', 'r', 'i', 'v', 'I', '1', 't'],
'abettor': ['@', 'b', 'E', 't', '@', 'R'],
'abducts': ['@', 'b', 'd', 'V', 'k', 't', 's'],
'abbes': ['{', 'b', '1', 'z'],
'abhorred': ['@', 'b', 'h', '$', 'd'],
'abbey': ['{', 'b', 'I'],
'abhorrent': ['@', 'b', 'h', 'Q', 'r', '@', 'n', 't'],
'abiding': ['@', 'b', '2', 'd', 'I', 'N'],
'abeyant': ['@', 'b', '1', '@', 'n', 't'],
'abbe': ['{', 'b', '1'],
'abashed': ['@', 'b', '{', 'S', 't'],
'abandons': ['@', 'b', '{', 'n', 'd', '@', 'n', 'z'],
'abashes': ['@', 'b', '{', 'S', 'I', 'z'],
'abduction': ['{', 'b', 'd', 'V', 'k', 'S', 'H'],
'abacus': ['{', 'b', '@', 'k', '@', 's'],
'abhorrence': ['@', 'b', 'h', 'Q', 'r', '@', 'n', 's'],
'abed': ['@', 'b', 'E', 'd'],
'abaft': ['@', 'b', '#', 'f', 't'],
'abattoirs': ['{', 'b', '@', 't', 'w', '#', 'z'],
'abbreviated': ['@', 'b', 'r', 'i', 'v', 'I', '1', 't', 'I', 'd'],
'abetting': ['@', 'b', 'E', 't', 'I', 'N'],
'abbreviates': ['@', 'b', 'r', 'i', 'v', 'I', '1', 't', 's'],
'ability': ['@', 'b', 'I', 'l', '@', 't', 'I'],
'abdication': ['{', 'b', 'd', 'I', 'k', '1', 'S', 'H'],
'abc': ['1', 'b', 'i', 's', 'i'],
'abetted': ['@', 'b', 'E', 't', 'I', 'd'],
'abide': ['@', 'b', '2', 'd'],
'abided by': ['@', 'b', '2', 'd', 'I', 'd', 'b', '2'],
'abides': ['@', 'b', '2', 'd', 'z'],
'abhorring': ['@', 'b', 'h', '$', 'r', 'I', 'N'],
'abbots': ['{', 'b', '@', 't', 's'],
'aberration': ['{', 'b', '@', 'r', '1', 'S', 'H'],
'abdicated': ['{', 'b', 'd', 'I', 'k', '1', 't', 'I', 'd'],
'abbess': ['{', 'b', 'E', 's'],
'abdicates': ['{', 'b', 'd', 'I', 'k', '1', 't', 's'],
'abbreviating': ['@', 'b', 'r', 'i', 'v', 'I', '1', 't', 'I', 'N'],
'abasement': ['@', 'b', '1', 's', 'm', '@', 'n', 't'],
'abets': ['@', 'b', 'E', 't', 's'],
'abet': ['@', 'b', 'E', 't'],
'abbesses': ['{', 'b', 'E', 's', 'I', 'z'],
'abeyance': ['@', 'b', '1', '@', 'n', 's'],
'abattoir': ['{', 'b', '@', 't', 'w', '#', 'R'],
'abash': ['@', 'b', '{', 'S'],
'aberrant': ['{', 'b', 'E', 'r', '@', 'n', 't'],
'abase': ['@', 'b', '1', 's'],
'aback': ['@', 'b', '{', 'k'],
'abaci': ['{', 'b', '@', 's', '2'],
'abdominal': ['{', 'b', 'd', 'Q', 'm', 'I', 'n', 'P'],
'abbreviations': ['@', 'b', 'r', 'i', 'v', 'I', '1', 'S', 'H', 'z'],
'abate': ['@', 'b', '1', 't'],
'abbeys': ['{', 'b', 'I', 'z'],
'abating': ['@', 'b', '1', 't', 'I', 'N'],
'abiding by': ['@', 'b', '2', 'd', 'I', 'N', 'b', '2'],
'abductions': ['{', 'b', 'd', 'V', 'k', 'S', 'H', 'z'],
'abates': ['@', 'b', '1', 't', 's'],
'abated': ['@', 'b', '1', 't', 'I', 'd'],
'abduct': ['@', 'b', 'd', 'V', 'k', 't'],
"abc's": ['1', 'b', 'i', 's', 'i', 'z'],
'abasing': ['@', 'b', '1', 's', 'I', 'N'],
'abandoned': ['@', 'b', '{', 'n', 'd', '@', 'n', 'd'],
'abeam': ['@', 'b', 'i', 'm'],
'abdicating': ['{', 'b', 'd', 'I', 'k', '1', 't', 'I', 'N'],
'abacuses': ['{', 'b', '@', 'k', '@', 's', 'I', 'z'],
'aas': ['1', '1', 'z'],
'abandon': ['@', 'b', '{', 'n', 'd', '@', 'n'],
'abducted': ['@', 'b', 'd', 'V', 'k', 't', 'I', 'd'],
'aa': ['1', '1'],
'abandonment': ['@', 'b', '{', 'n', 'd', '@', 'n', 'm', '@', 'n', 't'],
'abashing': ['@', 'b', '{', 'S', 'I', 'N'],
'abducting': ['@', 'b', 'd', 'V', 'k', 't', 'I', 'N'],
'abbot': ['{', 'b', '@', 't'],
'abcs': ['1', 'b', 'i', 's', 'i', 'z'],
'abhors': ['@', 'b', 'h', '$', 'z'],
'abdomens': ['{', 'b', 'd', '@', 'm', 'E', 'n', 'z'],
'abased': ['@', 'b', '1', 's', 't'],
'abide by': ['@', | |
<reponame>Razmo99/SimproAPI<filename>SimproAPI/Trackables.py
import logging
from .Sessions import Sessions
import json
import requests
import itertools
import concurrent.futures
logger = logging.getLogger(__name__)
logger.debug('Importing Module : '+__name__)
class Trackables(object):
"""Class containing methods to find Trackable Plants and Equipment"""
def __init__(self,server,token):
self.simpro_session=Sessions(server,token)
def __enter__(self):
return self
def __exit__(self,exec_types,exec_val,exc_tb):
self.simpro_session.session.close()
def split_iterable(self,iterable, size):
"""Splits an iterable into specified sizes.
Arguments:
iterable {iterable object} -- Objec that will be chunked
size {int} -- size of the chunk
Yields:
Chunked section of iterable
Source:
https://alexwlchan.net/2018/12/iterating-in-fixed-size-chunks/
"""
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, size))
if not chunk:
break
yield chunk
def get_companies(self,company_id,custom_field_names,concurrently=False):
"""Finds all trackable equipment in a simpro company or companies
Arguments:
company_id {list} -- ID's of the companies to search
custom_field_names {list} -- list of custom field names to match against
Yields:
{dictionary} -- {
id:'',#ID of the company
name:'',#Name of the company
trackable_plants:[{
id:'',#ID of the plant
custom_fields:[
{
id:'', #ID of the plant type
custom_fields:[ #List of custom fields the plant type has
id:'', #ID of the plant type custom field
name:'', #name of the plant type custom field
]}
],
trackable_plant:[{
id:'',#ID of trackable equipment
custom_fields:[{
id:'', #ID of the custom field
name:'',Name of the custom field
value:'',Value of the custom field
}]
}]
}]
}
"""
#Iterate over the provided company ID's
for company in company_id:
#Start of the results table
result={
'id':company,
'trackable_plants':[]
}
#reference to use below
logger.debug('Getting trackable equipment for company: '+str(company))
trackable_plant_types=self.get_plant_types(
company,
custom_field_names
)
#Iterate over the trackable plant types
for trackable_plant_type in trackable_plant_types:
logger.debug('Getting trackable equipment for plant: '+str(trackable_plant_type['id']))
#reference to use below
if concurrently:
#This method uses multiprocessing
trackable_plants=self.get_equipment_concurrent(
company,
trackable_plant_type['id'],
#Iterate over the cutsom fields in the trackable plant that we want to retreive
[custom_fields['id'] for custom_fields in trackable_plant_type['custom_fields']]
)
else:
trackable_plants=self.get_equipment(
company,
trackable_plant_type['id'],
#Iterate over the cutsom fields in the trackable plant that we want to retreive
[custom_fields['id'] for custom_fields in trackable_plant_type['custom_fields']]
)
#Iterate over the trackable equipment
trackable_plant_results=[]
for trackable_plant in trackable_plants:
logger.debug('Getting trackable custom fields for equipment ID: '+str(trackable_plant['id']))
#Append plants to the plant_type
trackable_plant_results.append(trackable_plant)
#Set the results
trackable_plant_type['trackable_plant']=trackable_plant_results
result['trackable_plants'].append(trackable_plant_type)
if result['trackable_plants']:
logger.debug('Successfully found specified custom_field_names: {company_id: '+str(company)+' plant_type_id: '+str(trackable_plant_type['id'])+'}')
yield result
else:
logger.debug('Failed to find specified custom_field_names: {company_id: '+str(company))
def get_plant_types(self,company_id,custom_field_names):
"""Finds all trackable Plant Types from a Simpro Company
Arguments:
company_id {integer} -- ID of the company to search.
custom_field_name {list} -- name of the custom fields to find the ID of.
Yields:
{dictonary} -- {
id: #ID of the plant type
custom_fields:[
id:
name:
]}
"""
#Get all the id's for all plant types
plant_types=self.simpro_session.plant_type_get_all(
company_id,
{'columns':'ID'}
)
#Iterate over the retreived plant types
logger.debug('Getting trackable plant types for company_id: '+ str(company_id))
for plant_type in plant_types.json():
#Get all the custom fields for a plant type
plant_custom_fields=self.simpro_session.plant_type_custom_fields_get_all(
company_id,
plant_type['ID']
)
results = {
'id':plant_type['ID'],
'custom_fields':[]
}
#Iterate over the retreived plant type custom fields
for plant_custom_field in plant_custom_fields.json():
#Iterate of the custom fields we want to get the ID of.
for custom_field_name in custom_field_names:
#check for desired match
if plant_custom_field.get('Name') == custom_field_name:
#append some results
results['custom_fields'].append({
'id':plant_custom_field['ID'],
'name':plant_custom_field['Name']
})
#Yield a list of all desired custom fields in a dictionary
if results['custom_fields']:
logger.debug('Successfully Found specified custom_field_names in: {company_id: '+str(company_id)+' plant_type_id: '+str(plant_type['ID'])+'}')
yield results
else:
logger.debug('Failed to find specified custom_field_names in: {company_id: '+str(company_id)+' plant_type_id: '+str(plant_type['ID'])+'}')
def get_equipment(self,company_id,plant_type_id,custom_field_ids):
"""Finds all trackable equipment from a Simpro Plant
Arguments:
company_id {integer} -- ID of the company to search
plant_type_id {integer} -- ID of the Plant to search
custom_field_id {list} -- list of custom field ids to get the custom field values of
Yields:
{dictonary} -- {
id: #ID of the equipment
custom_fields:[{
id:
name:
value:
}]
}
"""
logger.debug('Getting trackable equipment from Plant id: '+ str(plant_type_id))
#Iterate over the pages in the plant
plants_and_equipment=self.simpro_session.plants_and_equipment_get_all(
company_id,
plant_type_id,
{'columns':'ID'}
)
for pages in plants_and_equipment:
#Iterate over the equipment in the pages
for equipment in pages.json():
#Place to store results per custom_field_ids
custom_fields_results=[]
#Iterate over the list of custom field ids
for custom_field_id in custom_field_ids:
#Retreive the specified custom field
custom_field = self.simpro_session.plants_and_equipment_custom_fields_get_specific(
company_id,
plant_type_id,
equipment['ID'],
custom_field_id
)
#Just a json ref of the retreived data
json_cf=custom_field.json()
#Add an entry to the results list
custom_fields_results.append({
'id':json_cf['CustomField']['ID'],
'name':json_cf['CustomField']['Name'],
'value':json_cf['Value']})
#If their are results yield them
if custom_fields_results:
logger.debug('Successfully found custom_field_ids: {company_id: '+str(company_id)+' plant_type_id: '+str(plant_type_id)+' plant_id: '+str(equipment['ID'])+'}')
results={
'id':equipment['ID'],
'custom_fields':custom_fields_results
}
yield results
else:
logger.debug('Failed to find custom_field_ids: {company_id: '+str(company_id)+' plant_type_id: '+str(plant_type_id)+' plant_id: '+str(equipment['ID'])+'}')
def get_equipment_chunks(self,plant_ids,company_id,plant_type_id,custom_field_ids):
"""Gets equipment based on provided list of plant_ids
Arguments:
plant_ids {list} -- list of ID's to lookup [{'ID': 123},...]
company_id {int} -- company to look under
plant_type_id {int} -- plant_type to look under
custom_field_ids {list} -- custom field ids to lookup/return
returns:
{list} -- [{
id: #ID of the equipment
custom_fields:[{ #list of custom fields
id:
name:
value:
}]
}]
"""
results=[]
for plant_id in plant_ids:
custom_fields_results=[]
#Iterate over the list of custom field ids
for custom_field_id in custom_field_ids:
#Retreive the specified custom field
custom_field = self.simpro_session.plants_and_equipment_custom_fields_get_specific(
company_id,
plant_type_id,
plant_id['ID'],
custom_field_id
)
#Just a json ref of the retreived data
json_cf=custom_field.json()
#Add an entry to the results list
custom_fields_results.append({
'id':json_cf['CustomField']['ID'],
'name':json_cf['CustomField']['Name'],
'value':json_cf['Value']})
#If their are results return them
if custom_fields_results:
output={
'id':plant_id['ID'],
'custom_fields':custom_fields_results
}
results.append(output)
logger.debug('Successfully found custom_field_ids: {company_id: '+str(company_id)+' plant_type_id: '+str(plant_type_id)+' plant_id: '+str(plant_id['ID'])+'}')
else:
logger.debug('Failed to find custom_field_ids: {company_id: '+str(company_id)+' plant_type_id: '+str(plant_type_id)+' plant_id: '+str(plant_id['ID'])+'}')
if results:
return results
def get_equipment_concurrent(self,company_id,plant_type_id,custom_field_ids,max_workers=None,chunk_size=None):
""" Gets equipment using the concurrent futures module.
Arguments:
company_id {int} -- company id
plant_type_id {int} -- plant type id
custom_field_ids {list} -- custom field ids to return E.G. [13,25]
max_workers {int} -- Number of works to spawn, more then 4 causes connection errors
chunk_size {int} -- Size of the plant chunks passed to the spawned workers, leave none for even split.
Yields:
{list} -- [{
id: #ID of the equipment
custom_fields:[{ #list of custom fields
id:
name:
value:
}]
}]
"""
#Get all plants under a plant type
plants_and_equipment=self.simpro_session.plants_and_equipment_get_all(
company_id,
plant_type_id,
{'columns':'ID'}
)
#Place all plant ID's into one list
plant_ids=[]
[plant_ids.extend(i.json()) for i in plants_and_equipment]
#Check for optional variables
max_workers=4 if not max_workers else max_workers
chunk_size=len(plant_ids)//max_workers if not chunk_size else chunk_size
#List to hold results
results=[]
with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor:
logger.debug('Starting concurrent futures chunk_size:'+str(chunk_size)+' max_workers:'+str(max_workers))
#Split the list of plant ids into chunks
x=self.split_iterable(plant_ids,chunk_size)
#For each chunk create a future object to be proccesses
futures=[executor.submit(
self.get_equipment_chunks,
i,
company_id,
plant_type_id,
custom_field_ids) for i in list(x)]
#Wait for the futures to be completed and extend thhe results list
for future in concurrent.futures.as_completed(futures):
results.extend(future.result())
logger.debug('Finished concurrent futures: Total input IDs: '+str(len(plant_ids))+' Total results: '+str(len(results)))
return results
def compare_equipment(self, company_id,plant_type_id,plant_data,match_data,match_serial_field,match_return_fields,simpro_serial_custom_field,simpro_return_custom_fields):
"""compare trackable data against another source return what's specififed
Arguments:
company_id {integer} -- ID of the company_id
plant_type_id {integer} -- ID of the plant_type_id
plant_data {dictionary} -- contains information on the plant to compare against:
{dictonary} -- {
id: #ID of the equipment
custom_fields:[{
id:{integer},
name:{string},
value:}]}
match_data {list} -- List containing a dictionary's to iterate against simpro data
[
{key:value}
]
match_serial_field {string} -- key in match_data to match against.
match_return_fields {list} -- Keys in match_date that you want returned
simpro_serial_custom_field {string} -- Name of the custom field to compare against
simpro_return_custom_fields {list} -- Custom fields to return when a match is found
Yields:
{dictonary} -- {
company_id:'',
plant_type_id:'',
plant_id:'',
custom_fields:[{
id:
name:
value:
}],
match_returned_custom_fields:{
key:value of 'match_return_fields'
}
}
"""
#Iterate over the plants in the data input
for plant in plant_data:
#Iterate over the custom fields in each plant
for custom_field in plant['custom_fields']:
#Get the index for the match_date and store it
match_serial_field_index=None
for i,x in enumerate(match_data):
if x[match_serial_field]==custom_field['value']:
match_serial_field_index=i
#If the name of the custom field and match serial are the same and a match index is presend continue.
if custom_field['name'] == simpro_serial_custom_field and match_serial_field_index:
#Setup of the | |
<reponame>mehrdad-shokri/mne-python<filename>tutorials/source-modeling/plot_background_freesurfer_mne.py<gh_stars>1-10
"""
.. _tut-freesurfer-mne:
=================================
How MNE uses FreeSurfer's outputs
=================================
This tutorial explains how MRI coordinate frames are handled in MNE-Python,
and how MNE-Python integrates with FreeSurfer for handling MRI data and
source space data in general.
.. contents::
:local:
As usual we'll start by importing the necessary packages; for this tutorial
that includes :mod:`nibabel` to handle loading the MRI images (MNE-Python also
uses :mod:`nibabel` under the hood). We'll also use a special :mod:`Matplotlib
<matplotlib.patheffects>` function for adding outlines to text, so that text is
readable on top of an MRI image.
"""
import os
import numpy as np
import nibabel
import matplotlib.pyplot as plt
import matplotlib.patheffects as path_effects
import mne
from mne.transforms import apply_trans
from mne.io.constants import FIFF
###############################################################################
# MRI coordinate frames
# =====================
#
# Let's start out by looking at the ``sample`` subject MRI. Following standard
# FreeSurfer convention, we look at :file:`T1.mgz`, which gets created from the
# original MRI :file:`sample/mri/orig/001.mgz` when you run the FreeSurfer
# command `recon-all <https://surfer.nmr.mgh.harvard.edu/fswiki/recon-all>`_.
# Here we use :mod:`nibabel` to load the T1 image, and the resulting object's
# :meth:`~nibabel.spatialimages.SpatialImage.orthoview` method to view it.
data_path = mne.datasets.sample.data_path()
subjects_dir = os.path.join(data_path, 'subjects')
subject = 'sample'
t1_fname = os.path.join(subjects_dir, subject, 'mri', 'T1.mgz')
t1 = nibabel.load(t1_fname)
t1.orthoview()
###############################################################################
# Notice that the axes in the
# :meth:`~nibabel.spatialimages.SpatialImage.orthoview` figure are labeled
# L-R, S-I, and P-A. These reflect the standard RAS (right-anterior-superior)
# coordinate system that is widely used in MRI imaging. If you are unfamiliar
# with RAS coordinates, see the excellent nibabel tutorial
# :doc:`nibabel:coordinate_systems`.
#
# Nibabel already takes care of some coordinate frame transformations under the
# hood, so let's do it manually so we understand what is happening. First let's
# get our data as a 3D array and note that it's already a standard size:
data = np.asarray(t1.dataobj)
print(data.shape)
###############################################################################
# These data are voxel intensity values. Here they are unsigned integers in the
# range 0-255, though in general they can be floating point values. A value
# ``data[i, j, k]`` at a given index triplet ``(i, j, k)`` corresponds to some
# real-world physical location ``(x, y, z)`` in space. To get its physical
# location, first we have to choose what coordinate frame we're going to use.
#
# For example, we could choose a geographical coordinate
# frame, with origin is at the center of the earth, Z axis through the north
# pole, X axis through the prime meridian (zero degrees longitude), and Y axis
# orthogonal to these forming a right-handed coordinate system. This would not
# be a very useful choice for defining the physical locations of the voxels
# during the MRI acquisition for analysis, but you could nonetheless figure out
# the transformation that related the ``(i, j, k)`` to this coordinate frame.
#
# Instead, each scanner defines a more practical, native coordinate system that
# it uses during acquisition, usually related to the physical orientation of
# the scanner itself and/or the subject within it. During acquisition the
# relationship between the voxel indices ``(i, j, k)`` and the physical
# location ``(x, y, z)`` in the *scanner's native coordinate frame* is saved in
# the image's *affine transformation*.
#
# .. sidebar:: Under the hood
#
# ``mne.transforms.apply_trans`` effectively does a matrix multiplication
# (i.e., :func:`numpy.dot`), with a little extra work to handle the shape
# mismatch (the affine has shape ``(4, 4)`` because it includes a
# *translation*, which is applied separately).
#
# We can use :mod:`nibabel` to examine this transformation, keeping in mind
# that it processes everything in units of millimeters, unlike MNE where things
# are always in SI units (meters).
#
# This allows us to take an arbitrary voxel or slice of data and know where it
# is in the scanner's native physical space ``(x, y, z)`` (in mm) by applying
# the affine transformation to the voxel coordinates.
print(t1.affine)
vox = np.array([122, 119, 102])
xyz_ras = apply_trans(t1.affine, vox)
print('Our voxel has real-world coordinates {}, {}, {} (mm)'
.format(*np.round(xyz_ras, 3)))
###############################################################################
# If you have a point ``(x, y, z)`` in scanner-native RAS space and you want
# the corresponding voxel number, you can get it using the inverse of the
# affine. This involves some rounding, so it's possible to end up off by one
# voxel if you're not careful:
ras_coords_mm = np.array([1, -17, -18])
inv_affine = np.linalg.inv(t1.affine)
i_, j_, k_ = np.round(apply_trans(inv_affine, ras_coords_mm)).astype(int)
print('Our real-world coordinates correspond to voxel ({}, {}, {})'
.format(i_, j_, k_))
###############################################################################
# Let's write a short function to visualize where our voxel lies in an
# image, and annotate it in RAS space (rounded to the nearest millimeter):
def imshow_mri(data, img, vox, xyz, suptitle):
"""Show an MRI slice with a voxel annotated."""
i, j, k = vox
fig, ax = plt.subplots(1, figsize=(6, 6))
codes = nibabel.orientations.aff2axcodes(img.affine)
# Figure out the title based on the code of this axis
ori_slice = dict(P='Coronal', A='Coronal',
I='Axial', S='Axial',
L='Sagittal', R='Saggital')
ori_names = dict(P='posterior', A='anterior',
I='inferior', S='superior',
L='left', R='right')
title = ori_slice[codes[0]]
ax.imshow(data[i], vmin=10, vmax=120, cmap='gray', origin='lower')
ax.axvline(k, color='y')
ax.axhline(j, color='y')
for kind, coords in xyz.items():
annotation = ('{}: {}, {}, {} mm'
.format(kind, *np.round(coords).astype(int)))
text = ax.text(k, j, annotation, va='baseline', ha='right',
color=(1, 1, 0.7))
text.set_path_effects([
path_effects.Stroke(linewidth=2, foreground='black'),
path_effects.Normal()])
# reorient view so that RAS is always rightward and upward
x_order = -1 if codes[2] in 'LIP' else 1
y_order = -1 if codes[1] in 'LIP' else 1
ax.set(xlim=[0, data.shape[2] - 1][::x_order],
ylim=[0, data.shape[1] - 1][::y_order],
xlabel=f'k ({ori_names[codes[2]]}+)',
ylabel=f'j ({ori_names[codes[1]]}+)',
title=f'{title} view: i={i} ({ori_names[codes[0]]}+)')
fig.suptitle(suptitle)
fig.subplots_adjust(0.1, 0.1, 0.95, 0.85)
return fig
imshow_mri(data, t1, vox, {'Scanner RAS': xyz_ras}, 'MRI slice')
###############################################################################
# Notice that the axis scales (``i``, ``j``, and ``k``) are still in voxels
# (ranging from 0-255); it's only the annotation text that we've translated
# into real-world RAS in millimeters.
#
#
# "MRI coordinates" in MNE-Python: FreeSurfer surface RAS
# -------------------------------------------------------
#
# While :mod:`nibabel` uses **scanner RAS** ``(x, y, z)`` coordinates,
# FreeSurfer uses a slightly different coordinate frame: **MRI surface RAS**.
# The transform from voxels to the FreeSurfer MRI surface RAS coordinate frame
# is known in the `FreeSurfer documentation
# <https://surfer.nmr.mgh.harvard.edu/fswiki/CoordinateSystems>`_ as ``Torig``,
# and in nibabel as :meth:`vox2ras_tkr
# <nibabel.freesurfer.mghformat.MGHHeader.get_vox2ras_tkr>`. This
# transformation sets the center of its coordinate frame in the middle of the
# conformed volume dimensions (``N / 2.``) with the axes oriented along the
# axes of the volume itself. For more information, see
# :ref:`coordinate_systems`.
#
# Since MNE-Python uses FreeSurfer extensively for surface computations (e.g.,
# white matter, inner/outer skull meshes), internally MNE-Python uses the
# Freeurfer surface RAS coordinate system (not the :mod:`nibabel` scanner RAS
# system) for as many computations as possible, such as all source space
# and BEM mesh vertex definitions.
#
# Whenever you see "MRI coordinates" or "MRI coords" in MNE-Python's
# documentation, you should assume that we are talking about the
# "FreeSurfer MRI surface RAS" coordinate frame!
#
# We can do similar computations as before to convert the given voxel indices
# into FreeSurfer MRI coordinates (i.e., what we call "MRI coordinates" or
# "surface RAS" everywhere else in MNE), just like we did above to convert
# voxel indices to *scanner* RAS:
Torig = t1.header.get_vox2ras_tkr()
print(t1.affine)
print(Torig)
xyz_mri = apply_trans(Torig, vox)
imshow_mri(data, t1, vox, dict(MRI=xyz_mri), 'MRI slice')
###############################################################################
# Knowing these relationships and being mindful about transformations, we
# can get from a point in any given space to any other space. Let's start out
# by plotting the Nasion on a saggital MRI slice:
fiducials = mne.coreg.get_mni_fiducials(subject, subjects_dir=subjects_dir)
nasion_mri = [d for d in fiducials if d['ident'] == FIFF.FIFFV_POINT_NASION][0]
print(nasion_mri) # note it's in Freesurfer MRI coords
###############################################################################
# When we print the nasion, it displays as a ``DigPoint`` and shows its
# coordinates in millimeters, but beware that the underlying data is
# :ref:`actually stored in meters <units>`,
# so before transforming and plotting we'll convert to millimeters:
nasion_mri = nasion_mri['r'] * 1000 # meters → millimeters
nasion_vox = np.round(
apply_trans(np.linalg.inv(Torig), nasion_mri)).astype(int)
imshow_mri(data, t1, nasion_vox, dict(MRI=nasion_mri),
'Nasion estimated from MRI transform')
###############################################################################
# We can also take the digitization point from the MEG data, which is in the
# "head" coordinate frame.
#
# Let's look at the nasion in the head coordinate frame:
info = mne.io.read_info(
os.path.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif'))
nasion_head = [d for d in info['dig'] if
d['kind'] == FIFF.FIFFV_POINT_CARDINAL and
d['ident'] == FIFF.FIFFV_POINT_NASION][0]
print(nasion_head) # note it's in "head" coordinates
###############################################################################
# .. sidebar:: Head coordinate frame
#
# The head coordinate frame in MNE is the "Neuromag" head coordinate
# frame. The origin is given by the intersection between a line connecting
# the LPA and RPA and the line orthogonal to it that runs through the
# nasion. | |
"""Generate coil geometries.
This module provides functions to generate various coil geometries
that can be used in conjuction with the eppy module to calculate eddy
currents in flat plates.
"""
import numpy as np
import numpy.typing as npt
# ----------------------------------------------------------------------
# User defined types
#
ArrayFloat = npt.NDArray[np.float_]
def straight_wire(start: ArrayFloat, end: ArrayFloat,
n: int=40) -> tuple[ArrayFloat, ArrayFloat]:
"""Return position vectors and line segments for straight line.
Parameters
----------
start : ndarray(dtype=float, dim=1)
Coordinate of start point (x, y, z).
end : ndarray(dtype=float, dim=1)
Coordinate of end point (x, y, z).
n : int, defaults to 40
Number of line segments.
Returns
-------
R : ndarray(dtype=float, dim=2)
Array of position vectors for each small line segment.
dl : ndarray(dtype=float, dim=2)
Array of line segment vectors.
"""
points = np.array([start, end])
line = np.array([0, 1])
line = line[None, :]
L = np.linalg.norm(end - start)
esize = L/n
R, dl = coil_segments(points, esize, lines=line)
return R, dl
def circular_coil(center: ArrayFloat, radius: float, plane: str="XY",
n: int=40) -> tuple[ArrayFloat, ArrayFloat]:
"""Return position vectors and line segments for circular coil.
Parameters
----------
center : ndarray(dtype=float, dim=1)
Coordinate of the center (x, y, z).
radius : float
Radius of the circular coil.
plane : {'XY', 'YZ'}, defaults to 'XY'
Plane in which the circular coil is defined.
n : int, defaults to 40
Number of line segments.
Returns
-------
R : ndarray(dtype=float, dim=2)
Array of position vectors for each small line segment.
dl : ndarray(dtype=float, dim=2)
Array of line segment vectors.
"""
P = np.zeros((3, 3))
if plane == "XY":
P[0] = center + np.array([radius, 0, 0])
P[1] = center + np.array([0, radius, 0])
P[2] = center - np.array([radius, 0, 0])
elif plane == "YZ":
P[0] = center + np.array([0, radius, 0])
P[1] = center + np.array([0, 0, radius])
P[2] = center - np.array([0, radius, 0])
esize = 2*np.pi*radius/n
circle = np.array([0, 1, 2])
circle = circle[None, :]
R, dl = coil_segments(P, esize, circles=circle)
return R, dl
def pancake(center: ArrayFloat, r_in: float, r_out: float,
turns: int, n: int=24) -> tuple[ArrayFloat, ArrayFloat]:
"""Return position vectors and line segments for pancake coil.
Parameters
----------
center : ndarray(dtype=float, dim=1)
Coordinate of the center (x, y, z).
r_in : float
Inner radius.
r_out : float
Outer radius.
turns : int
Number of windings.
n : int, defaults to 24
Number of line segments per winding.
Returns
-------
R : ndarray(dtype=float, dim=2)
Array of position vectors for each small line segment.
dl : ndarray(dtype=float, dim=2)
Array of line segment vectors.
"""
theta = 2*np.pi*turns
N = turns*n
esize = turns*np.pi*(r_out+r_in)/N
R, dl = spiral_segments(center, r_in, r_out, theta, 0.0, esize)
return R, dl
def helical(center: ArrayFloat, radius: float, h: float, turns:int,
plane: str="XY", n: int=40) -> tuple[ArrayFloat, ArrayFloat]:
"""Return position vectors and line segments for helical coil.
Parameters
----------
center : ndarray(dtype=float, dim=1)
Coordinate of the center (x, y, z).
radius : float
Coil radius.
h : float
Coil length.
turns : float or int
Number of windings.
plane : {'XY', 'YZ'}, defaults to 'XY'
Plane by which direction (normal) of the coil is defined.
n : int, defaults to 40
Number of line segments per winding.
Returns
-------
R : ndarray(dtype=float, dim=2)
Array of position vectors for each small line segment.
dl : ndarray(dtype=float, dim=2)
Array of line segment vectors.
"""
theta = 2*np.pi*turns
N = turns*n
esize = turns*np.pi*radius/N
R, dl = spiral_segments(center, radius, radius, theta, h, esize)
if plane == "YZ":
normal = np.array([1.0, 0.0, 0.0])
R, dl = tilt_and_rotate_coil(R, dl, center, normal, 0.0)
return R, dl
def hairpin(center: ArrayFloat, length: float, width: float,
plane: str="XY", n: int=40) -> tuple[ArrayFloat, ArrayFloat]:
"""Return position vectors and line segments for hairpin coil.
Parameters
----------
center : ndarray(dtype=float, dim=1)
Coordinate of the center (x, y, z).
length : float
Length of coil.
width : float
Length of coil.
plane : {'XY', 'YZ'}, defaults to 'XY'
Plane in which the circular coil is defined.
n : int, defaults to 40
Number of line segments.
Returns
-------
R : ndarray(dtype=float, dim=2)
Array of position vectors for each small line segment.
dl : ndarray(dtype=float, dim=2)
Array of line segment vectors.
"""
P = np.zeros((6, 3))
if plane == "XY":
P[0] = center + np.array([-length/2, width/2, 0])
P[1] = center + np.array([length/2, width/2, 0])
P[2] = center + np.array([length/2 + width/2, 0, 0])
P[3] = center + np.array([length/2, -width/2, 0])
P[4] = center + np.array([-length/2, -width/2, 0])
P[5] = center + np.array([-length/2 - width/2, 0, 0])
elif plane == "YZ":
P[0] = center + np.array([0, -length/2, width/2])
P[1] = center + np.array([0, length/2, width/2])
P[2] = center + np.array([0, length/2 + width/2, 0])
P[3] = center + np.array([0, length/2, -width/2])
P[4] = center + np.array([0, -length/2, -width/2])
P[5] = center + np.array([0, -length/2 - width/2, 0])
lines = np.array([[0, 1], [3, 4]])
arcs = np.array([[1, 2, 3],
[4, 5, 0]])
L = length*2 + np.pi*(width/2)**2
esize = L/n
R, dl = coil_segments(P, esize, lines=lines, arcs=arcs)
return R, dl
def coil_segments(points: ArrayFloat, esize: float,
**kw: npt.NDArray[np.int_]) -> tuple[ArrayFloat, ArrayFloat]:
"""Return position vectors and line segments for a coil.
Parameters
----------
points : ndarray(dtype=float, dim=2)
List of coordinates (x, y, z).
esize : float
Desired element length.
**kw : keyword arguments
See below.
Keyword arguments
-----------------
**lines : ndarray(dtype=float, dim=2), optional
Connectivity matrix (N, 2) with the start and end point of a
line on each row.
**circles : ndarray(dtype=float, dim=2), optional
Array with N circle definitions (N, 3). Each circle is defined
by three points on its radius, with the current in the
direction from P1 to P2. The first element of i^th arc [i, 0]
corresponds to P1, the second element to P2 and the third
element to P3 of that particular arc.
**arcs : ndarray(dtype=float, dim=2), optional
Array with N arc definitions (N, 3). Each arc is defined by
three points on its radius, with the current running from P1
via P2 to P3. The first element of i^th arc [i, 0] corresponds
to P1, the second element to P2 and the third element to P3 of
that particular arc.
Returns
-------
R : ndarray(dtype=float, dim=2)
Array of position vectors for each small line segment.
dl : ndarray(dtype=float, dim=2)
Array of line segment vectors.
"""
R = np.empty((0, 3), float)
dl = np.empty((0, 3), float)
lines = kw.get("lines") if 'lines' in kw else None
if lines is not None:
for line in lines:
dR, ddl = line_segments(points[line[0]], points[line[1]], esize)
R = np.vstack((R, dR))
dl = np.vstack((dl, ddl))
circles = kw.get("circles") if 'circles' in kw else None
if circles is not None:
for circle in circles:
dR, ddl = circle_segments_3p(points[circle[0]], points[circle[1]],
points[circle[2]], esize)
R = np.vstack((R, dR))
dl = np.vstack((dl, ddl))
arcs = kw.get("arcs") if 'arcs' in kw else None
if arcs is not None:
for arc in arcs:
dR, ddl = circle_segments_3p(points[arc[0]], points[arc[1]],
points[arc[2]], esize, is_arc=True)
R = np.vstack((R, dR))
dl = np.vstack((dl, ddl))
return R, dl
def line_segments(p1: ArrayFloat, p2: ArrayFloat,
esize: float) -> tuple[ArrayFloat, ArrayFloat]:
"""Return position vectors and line segments for straight line.
Parameters
----------
p1 : ndarray(dtype=float, dim=1)
Coordinates of start point (x, y, z).
p2 : ndarray(dtype=float, dim=1)
Coordinates of end point (x, y, z).
esize : float
Desired element length.
Returns
-------
R : ndarray(float, dim=2)
Array with position vectors for all line segment.
dl : ndarray(float dim=2)
Array of line segment length vectors.
"""
L = np.linalg.norm(p2-p1)
nel = int(np.ceil(L/esize))
esize = L/nel
points = np.linspace(p1, p2, nel+1)
R = (points[:-1, :] + points[1:, :])/2
dl = np.tile(esize*(p2-p1)/L, (nel, 1))
return R, dl
def circle_segments_3p(p1: ArrayFloat, p2: ArrayFloat, p3: ArrayFloat,
esize: float,
is_arc: bool=False) -> tuple[ArrayFloat, ArrayFloat]:
"""Return position vectors and line segments for an arc.
The arc or circle is defined three points in three dimensions. The
current is defined to run from p1 via p2 to p3.
Parameters
----------
p1 : ndarray(dtype=float, dim=1)
Coordinates of the first point (x, y, z)
p2 : | |
self.__set_parameters_0_with_http_info(bt_locator, **kwargs) # noqa: E501
return data
def set_templates(self, bt_locator, **kwargs): # noqa: E501
"""set_templates # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_templates(bt_locator, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str bt_locator: (required)
:param BuildTypes body:
:param bool optimize_settings:
:param str fields:
:return: BuildTypes
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__set_templates_with_http_info(bt_locator, **kwargs) # noqa: E501
else:
(data) = self.__set_templates_with_http_info(bt_locator, **kwargs) # noqa: E501
return data
def set_vcs_labeling_options(self, bt_locator, **kwargs): # noqa: E501
"""set_vcs_labeling_options # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_vcs_labeling_options(bt_locator, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str bt_locator: (required)
:param VcsLabeling body:
:return: VcsLabeling
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__set_vcs_labeling_options_with_http_info(bt_locator, **kwargs) # noqa: E501
else:
(data) = self.__set_vcs_labeling_options_with_http_info(bt_locator, **kwargs) # noqa: E501
return data
def update_vcs_root_entry(self, bt_locator, vcs_root_locator, **kwargs): # noqa: E501
"""update_vcs_root_entry # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_vcs_root_entry(bt_locator, vcs_root_locator, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str bt_locator: (required)
:param str vcs_root_locator: (required)
:param VcsRootEntry body:
:param str fields:
:return: VcsRootEntry
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__update_vcs_root_entry_with_http_info(bt_locator, vcs_root_locator, **kwargs) # noqa: E501
else:
(data) = self.__update_vcs_root_entry_with_http_info(bt_locator, vcs_root_locator, **kwargs) # noqa: E501
return data
def update_vcs_root_entry_checkout_rules(self, bt_locator, vcs_root_locator, **kwargs): # noqa: E501
"""update_vcs_root_entry_checkout_rules # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_vcs_root_entry_checkout_rules(bt_locator, vcs_root_locator, async_req=True)
>>> result = thread.get()
:param async_req: bool
:param str bt_locator: (required)
:param str vcs_root_locator: (required)
:param str body:
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.__update_vcs_root_entry_checkout_rules_with_http_info(bt_locator, vcs_root_locator, **kwargs) # noqa: E501
else:
(data) = self.__update_vcs_root_entry_checkout_rules_with_http_info(bt_locator, vcs_root_locator, **kwargs) # noqa: E501
return data
def __add_agent_requirement_with_http_info(self, bt_locator, **kwargs): # noqa: E501
"""add_agent_requirement # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__add_agent_requirement_with_http_info(bt_locator, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str bt_locator: (required)
:param str fields:
:param AgentRequirement body:
:return: AgentRequirement
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bt_locator', 'fields', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_agent_requirement" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'bt_locator' is set
if ('bt_locator' not in params or
params['bt_locator'] is None):
raise ValueError("Missing the required parameter `bt_locator` when calling `add_agent_requirement`") # noqa: E501
collection_formats = {}
path_params = {}
if 'bt_locator' in params:
if isinstance(params['bt_locator'], TeamCityObject):
path_params['btLocator'] = params['bt_locator'].locator_id
else:
path_params['btLocator'] = params['bt_locator'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/buildTypes/{btLocator}/agent-requirements', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AgentRequirement', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __add_artifact_dep_with_http_info(self, bt_locator, **kwargs): # noqa: E501
"""add_artifact_dep # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__add_artifact_dep_with_http_info(bt_locator, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str bt_locator: (required)
:param str fields:
:param ArtifactDependency body:
:return: ArtifactDependency
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bt_locator', 'fields', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_artifact_dep" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'bt_locator' is set
if ('bt_locator' not in params or
params['bt_locator'] is None):
raise ValueError("Missing the required parameter `bt_locator` when calling `add_artifact_dep`") # noqa: E501
collection_formats = {}
path_params = {}
if 'bt_locator' in params:
if isinstance(params['bt_locator'], TeamCityObject):
path_params['btLocator'] = params['bt_locator'].locator_id
else:
path_params['btLocator'] = params['bt_locator'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/buildTypes/{btLocator}/artifact-dependencies', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArtifactDependency', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __add_build_type_with_http_info(self, **kwargs): # noqa: E501
"""add_build_type # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__add_build_type_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param BuildType body:
:param str fields:
:return: BuildType
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_build_type" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/buildTypes', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BuildType', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __add_feature_with_http_info(self, bt_locator, **kwargs): # noqa: E501
"""add_feature # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__add_feature_with_http_info(bt_locator, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str bt_locator: (required)
:param str fields:
:param Feature body:
:return: Feature
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bt_locator', 'fields', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_feature" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'bt_locator' is set
if ('bt_locator' not in params or
params['bt_locator'] is None):
raise ValueError("Missing the required parameter `bt_locator` when calling `add_feature`") # noqa: E501
collection_formats = {}
path_params = {}
if 'bt_locator' in params:
if isinstance(params['bt_locator'], TeamCityObject):
path_params['btLocator'] = params['bt_locator'].locator_id
else:
path_params['btLocator'] = params['bt_locator'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/buildTypes/{btLocator}/features', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Feature', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __add_feature_parameter_with_http_info(self, bt_locator, feature_id, parameter_name, **kwargs): # noqa: E501
"""add_feature_parameter # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__add_feature_parameter_with_http_info(bt_locator, feature_id, parameter_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str bt_locator: (required)
:param str feature_id: (required)
:param str parameter_name: (required)
:param str body:
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bt_locator', 'feature_id', 'parameter_name', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
| |
<reponame>mercuree/BoilerPy<filename>boilerpy/filters.py
#!/usr/bin/env python
""" generated source for module MarkEverythingContentFilter """
#
# * boilerpipe
# *
# * Copyright (c) 2009 <NAME>
# *
# * The author licenses this file to You under the Apache License, Version 2.0
# * (the "License"); you may not use this file except in compliance with
# * the License. You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
#
# -----------------------------------------------------------------------
# FILTER MANIFEST
# -----------------------------------------------------------------------
# --------------------- Simple Filters: -----------------------
# MarkEverythingContentFilter - Marks all blocks as content.
# InvertedFilter - Reverts the "isContent" flag for all TextBlocks
# BoilerplateBlockFilter - Removes TextBlocks which have explicitly been marked as "not content".
# MinWordsFilter - Keeps only those content blocks which contain at least k words.
# MinClauseWordsFilter - Keeps only blocks that have at least one segment fragment ("clause") with at least k words
# SplitParagraphBlocksFilter - Splits TextBlocks at paragraph boundaries
# SurroundingToContentFilter
# LabelToBoilerplateFilter - Marks all blocks that contain a given label as "boilerplate".
# LabelToContentFilter - Marks all blocks that contain a given label as "content".
#
# --------------------- Heuristic Filters: -----------------------
# SimpleBlockFusionProcessor - Merges two subsequent blocks if their text densities are equal.
# ContentFusion
# LabelFusion - Fuses adjacent blocks if their labels are equal.
# BlockProximityFusion - Fuses adjacent blocks if their distance (in blocks) does not exceed a certain limit.
# KeepLargestBlockFilter - Keeps the largest {@link TextBlock} only (by the number of words)
# ExpandTitleToContentFilter - Marks all TextBlocks "content" which are between the headline and the part that has already been marked content, if they are marked MIGHT_BE_CONTENT
# ArticleMetadataFilter
# AddPrecedingLabelsFilter - Adds the labels of the preceding block to the current block, optionally adding a prefix.
# DocumentTitleMatchClassifier - Marks TextBlocks which contain parts of the HTML TITLE tag
#
# --------------------- English-trained Heuristic Filters: -----------------------
# MinFulltextWordsFilter - Keeps only those content blocks which contain at least k full-text words
# KeepLargestFulltextBlockFilter - Keeps the largest TextBlock only (by the number of words)
# IgnoreBlocksAfterContentFilter - Marks all blocks as "non-content" that occur after blocks that have been marked INDICATES_END_OF_TEXT
# IgnoreBlocksAfterContentFromEndFilter - like above
# TerminatingBlocksFinder - Finds blocks which are potentially indicating the end of an article text and marks them with INDICATES_END_OF_TEXT
# NumWordsRulesClassifier - Classifies TextBlocks as content/not-content through rules that have been determined using the C4.8 machine learning algorithm
# DensityRulesClassifier - lassifies TextBlocks as content/not-content through rules that have been determined using the C4.8 machine learning algorithm
# CanolaFilter - A full-text extractor trained on krdwrd Canola
import re
from . import document
from .document import DefaultLabels
# Boilerpipe abstract interface
class BoilerpipeFilter(object):
def process(self, doc):
pass
def subtractBlocks(self, blockArr, blocksToRemove):
# inefficient but in place: for block in blocksToRemove: blockArr.remove(blocksToRemove)
# efficiently subtracts second array from first assuming blocksToRemove shows up in the same order as blocArr
if len(blocksToRemove) == 0: return blockArr
newBlockArr = []
removeIter = iter(blocksToRemove)
curBlockToRemove = next(removeIter)
for idx, block in enumerate(blockArr):
if block == curBlockToRemove:
try:
curBlockToRemove = next(removeIter)
except StopIteration:
# add the rest
newBlockArr.extend(blockArr[idx + 1:])
break
else:
newBlockArr.append(block)
return newBlockArr
# chain together multiple filters in sequence
class FilterChain(BoilerpipeFilter):
def __init__(self, filterArr):
super(FilterChain, self).__init__()
self.filterArr = filterArr
def process(self, doc):
isUpdated = False
for filtr in self.filterArr:
isUpdated |= filtr.process(doc)
return isUpdated
# -----------------------------------------------------------------------
# SIMPLE FILTERS
# -----------------------------------------------------------------------
#
# * Marks all blocks as content.
# *
# * @author <NAME>
#
class MarkEverythingContentFilter(BoilerpipeFilter):
def process(self, doc):
""" generated source for method process """
changes = False
for tb in doc.getTextBlocks():
if not tb.isContent():
tb.setIsContent(True)
changes = True
return changes
#
# * Reverts the "isContent" flag for all {@link TextBlock}s
# *
# * @author <NAME>
#
class InvertedFilter(BoilerpipeFilter):
def process(self, doc):
""" generated source for method process """
tbs = doc.getTextBlocks()
if len(tbs) == 0: return False
for tb in tbs: tb.setIsContent(not tb.isContent())
return True
#
# * Removes {@link TextBlock}s which have explicitly been marked as "not content".
# *
# * @author <NAME>
#
class BoilerplateBlockFilter(BoilerpipeFilter):
def process(self, doc):
""" generated source for method process """
textBlocks = doc.getTextBlocks()
newBlocks = [tb for tb in textBlocks if tb.isContent()]
hasChanges = len(newBlocks) < len(textBlocks)
doc.setTextBlocks(newBlocks)
return hasChanges
#
# * Keeps only those content blocks which contain at least <em>k</em> words.
# *
# * @author <NAME>
#
class MinWordsFilter(BoilerpipeFilter):
def __init__(self, minWords):
super(MinWordsFilter, self).__init__()
self.minWords = minWords
def process(self, doc):
changes = False
for tb in doc.getTextBlocks():
if not tb.isContent(): continue
if tb.getNumWords() < self.minWords:
tb.setIsContent(False)
changes = True
return changes
#
# * Keeps only blocks that have at least one segment fragment ("clause") with at
# * least <em>k</em> words (default: 5).
# *
# * NOTE: You might consider using the {@link SplitParagraphBlocksFilter}
# * upstream.
# *
# * @author <NAME>
# * @see SplitParagraphBlocksFilter
#
class MinClauseWordsFilter(BoilerpipeFilter):
def __init__(self, minWords=5, acceptClausesWithoutDelimiter=False):
super(MinClauseWordsFilter, self).__init__()
self.minWords = minWords
self.acceptClausesWithoutDelimiter = acceptClausesWithoutDelimiter
PAT_CLAUSE_DELIMITER = re.compile(r"\b[\,\.\:\;\!\?]+(?:\s+|\Z)", re.UNICODE)
PAT_WHITESPACE = re.compile("\s+")
def process(self, doc):
""" generated source for method process """
changes = False
for tb in doc.getTextBlocks():
if not tb.isContent(): continue
hasClause = False
possibleClauseArr = self.PAT_CLAUSE_DELIMITER.split(tb.getText())
for possibleClause in possibleClauseArr[:-1]:
hasClause = self.isClauseAccepted(possibleClause)
if hasClause: break
# since clauses should *always end* with a delimiter, we normally
# don't consider text without one
if self.acceptClausesWithoutDelimiter:
hasClause |= self.isClauseAccepted(possibleClauseArr[-1])
if not hasClause:
tb.setIsContent(False)
changes = True
# System.err.println("IS NOT CONTENT: " + text);
return changes
def isClauseAccepted(self, text):
""" generated source for method isClause """
n = 1
for match in self.PAT_WHITESPACE.finditer(text):
n += 1
if n >= self.minWords: return True
return n >= self.minWords
#
# * Splits TextBlocks at paragraph boundaries.
# *
# * NOTE: This is not fully supported (i.e., it will break highlighting support
# * via #getContainedTextElements()), but this one probably is necessary for some other
# * filters.
# *
# * @author <NAME>
# * @see MinClauseWordsFilter
#
class SplitParagraphBlocksFilter(BoilerpipeFilter):
def process(self, doc):
changes = False
blocks = doc.getTextBlocks()
blocksNew = []
for tb in blocks:
text = tb.getText();
paragraphs = re.split(r"[\n\r]+", text)
if len(paragraphs) < 2:
blocksNew.append(tb)
continue
isContent = tb.isContent()
labels = tb.getLabels()
for p in paragraphs:
tbP = document.TextBlock(p)
tbP.setIsContent(isContent)
tbP.addLabels(labels)
blocksNew.append(tbP)
changes = True
if changes: doc.setTextBlocks(blocksNew)
return changes
class SurroundingToContentFilter(BoilerpipeFilter):
# this is now default when no arguments are passed
# INSTANCE_TEXT = SurroundingToContentFilter(TextBlockCondition())
# ctor - condition is an function for an additional condition to determine if it can be made content
def __init__(self, condition=lambda tb: tb.getLinkDensity() == 0 and tb.getNumWords() > 6):
super(SurroundingToContentFilter, self).__init__()
self.cond = condition
def process(self, doc):
""" generated source for method process """
tbs = doc.getTextBlocks()
n = len(tbs)
hasChanges = False
i = 1
while i < n - 1:
prev = tbs[i - 1]
cur = tbs[i]
next = tbs[i + 1]
if not cur.isContent() and prev.isContent() and next.isContent() and self.cond(cur):
cur.setIsContent(True)
hasChanges = True
i += 2
else:
i += 1
# WARNING: POSSIBLE BUG - in original i+=2 regardless of whether content is found. this seems illogica to me - should be +=1
return hasChanges
#
# * Marks all blocks that contain a given label as "boilerplate".
# *
# * @author <NAME>
#
class LabelToBoilerplateFilter(BoilerpipeFilter):
""" generated source for class LabelToBoilerplateFilter """
# INSTANCE_STRICTLY_NOT_CONTENT = LabelToBoilerplateFilter(DefaultLabels.STRICTLY_NOT_CONTENT)
def __init__(self, *labels):
super(LabelToBoilerplateFilter, self).__init__()
self.labels = labels
def process(self, doc):
changes = False
for tb in doc.getTextBlocks():
if tb.isContent() and any(tb.hasLabel(label) for label in self.labels):
tb.setIsContent(False)
changes = True
return changes
#
# * Marks all blocks that contain a given label as "content".
# *
# * @author <NAME>
#
class LabelToContentFilter(BoilerpipeFilter):
""" generated source for class LabelToContentFilter """
def __init__(self, *labels):
""" generated source for method __init__ """
super(LabelToContentFilter, self).__init__()
self.labels = labels
def process(self, doc):
changes = False
for tb in doc.getTextBlocks():
if not tb.isContent() and any(tb.hasLabel(label) for label in self.labels):
tb.setIsContent(True)
changes = True
return changes
# -----------------------------------------------------------------------
# | |
import itertools
import json
import sys
import time
import traceback
import urllib
import hypixel
import asyncio
from discord.ext import commands
from utils.config import Config
from utils.logger import log
from utils import checks
from utils.tools import *
config = Config()
key = [config._hypixelKey] #no push perms anyway, & not my api key
hypixel.setKeys(key)
class Hypixel(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=["hinfo"])
async def hypixelinfo(self, ctx, username: str):
try:
player = hypixel.Player(username)
embed = discord.Embed(description=None)
flogin = player.JSON['firstLogin']
if sys.platform == "windows":
cflogin = datetime.fromtimestamp(flogin / 1000.0).strftime('%A, %B %#d, %Y at %#I:%M %p %Z')
elif sys.platform == "linux":
cflogin = datetime.fromtimestamp(flogin / 1000.0).strftime('%A, %B %-d, %Y at %-I:%M %p %Z')
if ctx.me.color is not None:
embed.color = ctx.me.color
try:
guildname = hypixel.Guild(player.getGuildID()).JSON['name']
except Exception: #shut up code convention i dont care
guildname = "They aren't in a gang."
try:
lmv = player.JSON['mcVersionRp'] #deprecated? constantly doesn't work
except KeyError:
lmv = "They haven't played Minecraft in years, I guess."
try:
llogin = player.JSON['lastLogin']
if sys.platform == "windows":
cllogin = datetime.fromtimestamp(llogin / 1000.0).strftime('%A, %B %#d, %Y at %#I:%M %p %Z')
elif sys.platform == "linux":
cllogin = datetime.fromtimestamp(llogin / 1000.0).strftime('%A, %B %-d, %Y at %-I:%M %p %Z')
except KeyError:
cllogin = "They hid their last login. Figure it out yourself."
plevel = player.getLevel()
cplevel = '{:,.0f}'.format(plevel)
perlevel = (plevel-int(plevel))*100
perlevel2 = '{:,.1f}'.format(perlevel)
embed.title = f"{player.getName()}'s Hypixel Stats"
embed.set_thumbnail(url=f"https://crafatar.com/avatars/{player.UUID}?size=64")
embed.add_field(name="Rank", value=f"{player.getRank()['rank']}")
embed.add_field(name="Level", value=f"{cplevel}\n({perlevel2}% to {int(cplevel) + 1})")
embed.add_field(name="Guild Name", value=f"{guildname}")
embed.add_field(name="First Login", value=f"{cflogin}")
embed.add_field(name="Last Login", value=f"{cllogin}")
embed.add_field(name="Last Minecraft Version played", value=f"{lmv}")
if sys.platform == "windows":
embed.set_footer(
text=f"Requested by: {ctx.message.author} / {datetime.fromtimestamp(time.time()).strftime('%A, %B %#d, %Y at %#I:%M %p %Z')}",
icon_url=ctx.message.author.avatar_url)
elif sys.platform == "linux":
embed.set_footer(
text=f"Requested by: {ctx.message.author} / {datetime.fromtimestamp(time.time()).strftime('%A, %B %-d, %Y at %-I:%M %p %Z')}",
icon_url=ctx.message.author.avatar_url)
await ctx.send(embed=embed)
except hypixel.PlayerNotFoundException:
await ctx.send("Player not found! Try another UUID or username.")
except Exception:
await ctx.send(traceback.print_exc())
@commands.command(aliases=['ginfo', 'hginfo', 'hg'])
async def hguildinfo(self, ctx, *, dirtygname: str):
try:
gname = dirtygname.replace(" ", "%20")
link = f"https://api.hypixel.net/findGuild?key={config._hypixelKey}&byName={gname}" #raw key
r = requests.get(link)
gid = r.json()['guild']
guild = hypixel.Guild(gid)
playercount = len(guild.JSON['members'])
def t5exp():
try:
explist = []
todaysdate = str(datetime.now().date().isoformat())
if 5 <= len(guild.JSON['members']):
smallerone = len(guild.JSON['members'])
else:
smallerone = 5
for w in range(0, len(guild.JSON['members'])):
try:
if guild.JSON['members'][w]['expHistory'][todaysdate] == 0:
continue
else:
ass = guild.JSON['members'][w]['expHistory'][todaysdate]
ass2 = guild.JSON['members'][w]['uuid']
ass3 = [ass2, ass]
explist.append(ass3)
except KeyError:
continue
explist.sort(key=lambda x: x[1], reverse = True)
top5 = list(itertools.islice(explist, smallerone))
if len(guild.JSON['members']) == 4:
uuid1 = hypixel.Player(top5[0][0]).JSON['displayname']
uuid2 = hypixel.Player(top5[1][0]).JSON['displayname']
uuid3 = hypixel.Player(top5[2][0]).JSON['displayname']
uuid4 = hypixel.Player(top5[3][0]).JSON['displayname']
return f"#1 - {uuid1}: {top5[0][1]}\n#2 - {uuid2}: {top5[1][1]}\n#3 - {uuid3}: {top5[2][1]}\n#4 - {uuid4}: {top5[3][1]}"
if len(guild.JSON['members']) == 3:
uuid1 = hypixel.Player(top5[0][0]).JSON['displayname']
uuid2 = hypixel.Player(top5[1][0]).JSON['displayname']
uuid3 = hypixel.Player(top5[2][0]).JSON['displayname']
return f"#1 - {uuid1}: {top5[0][1]}\n#2 - {uuid2}: {top5[1][1]}\n#3 - {uuid3}: {top5[2][1]}"
if len(guild.JSON['members']) == 2:
uuid1 = hypixel.Player(top5[0][0]).JSON['displayname']
uuid2 = hypixel.Player(top5[1][0]).JSON['displayname']
return f"#1 - {uuid1}: {top5[0][1]}\n#2 - {uuid2}: {top5[1][1]}"
if len(guild.JSON['members']) == 1:
uuid1 = hypixel.Player(top5[0][0]).JSON['displayname']
return f"#1 - {uuid1}: {top5[0][1]}"
else:
uuid1 = hypixel.Player(top5[0][0]).JSON['displayname']
uuid2 = hypixel.Player(top5[1][0]).JSON['displayname']
uuid3 = hypixel.Player(top5[2][0]).JSON['displayname']
uuid4 = hypixel.Player(top5[3][0]).JSON['displayname']
uuid5 = hypixel.Player(top5[4][0]).JSON['displayname']
return f"#1 - {uuid1}: {top5[0][1]}\n#2 - {uuid2}: {top5[1][1]}\n#3 - {uuid3}: {top5[2][1]}\n#4 - {uuid4}: {top5[3][1]}\n#5 - {uuid5}: {top5[4][1]}"
except IndexError:
return "No history can be displayed."
except Exception:
traceback.print_exc()
try:
embed = discord.Embed(description=f"{guild.JSON['description']}")
except KeyError:
embed = discord.Embed()
try:
embed.title = f"[{guild.JSON['tag']}] - {guild.JSON['name']} ({playercount} members)"
except KeyError:
embed.title = f"{guild.JSON['name']} - ({playercount} members)"
try:
if guild.JSON['tagColor'] == "YELLOW":
embed.color = discord.Color.from_rgb(255, 247, 13)
elif guild.JSON['tagColor'] == "DARK_GREEN":
embed.color = discord.Color.from_rgb(0, 138, 21)
elif guild.JSON['tagColor'] == "DARK_AQUA":
embed.color = discord.Color.from_rgb(12, 176, 194)
elif guild.JSON['tagColor'] == "GOLD":
embed.color = discord.Color.from_rgb(227, 202, 14)
else:
embed.color = discord.Color.from_rgb(173, 173, 173)
except KeyError:
embed.color = discord.Color.from_rgb(173, 173, 173)
embed.add_field(name='Created', value=f"{datetime.fromtimestamp(guild.JSON['created'] / 1000.0).strftime('%A, %B %-d, %Y at %-I:%M %p %Z')}")
embed.add_field(name='Coins', value=f"{guild.JSON['coins']}")
embed.add_field(name='Experience', value=f"{guild.JSON['exp']}")
embed.add_field(name='Top 5 Gained Exp', value=t5exp())
try:
for s in range(0, len(guild.JSON['preferredGames'])):
#except that arrays start at one @kurt
embed.add_field(name=f'Preferred Games #{s}', value=f"{guild.JSON['preferredGames'][s]}")
except KeyError:
pass
try:
embed.add_field(name='Guild Tag Color', value=f"{guild.JSON['tagColor']}")
except KeyError:
embed.add_field(name='Guild Tag Color', value="GRAY")
if sys.platform == "windows":
embed.set_footer(
text=f"Requested by: {ctx.message.author} / {datetime.fromtimestamp(time.time()).strftime('%A, %B %#d, %Y at %#I:%M %p %Z')}",
icon_url=ctx.message.author.avatar_url)
elif sys.platform == "linux":
embed.set_footer(
text=f"Requested by: {ctx.message.author} / {datetime.fromtimestamp(time.time()).strftime('%A, %B %-d, %Y at %-I:%M %p %Z')}",
icon_url=ctx.message.author.avatar_url)
await ctx.send(embed=embed)
except hypixel.GuildIDNotValid:
await ctx.send("Guild not found! Are you sure you typed it correctly?")
except Exception:
await ctx.send(traceback.format_exc())
@commands.command(aliases=['bedwars', 'binfo', 'bwinfo'])
async def hbedwars(self, ctx, username: str):
try:
player = hypixel.Player(username)
embed = discord.Embed(description=f"Level {player.JSON['achievements']['bedwars_level']}")
embed.title = f"{player.getName()}'s Bedwars Stats"
embed.set_thumbnail(url=f"https://crafatar.com/avatars/{player.UUID}?size=64")
if ctx.me.color is not None:
embed.color = ctx.me.color
try:
embed.add_field(name='Beds Broken', value="{:,}".format(player.JSON['achievements']['bedwars_beds']))
except KeyError:
embed.add_field(name='Beds Broken', value="None. They're innocent, your honor.")
embed.add_field(name='Coins', value=f"{player.JSON['stats']['Bedwars']['coins']}")
embed.add_field(name='Winstreak', value=f"{player.JSON['stats']['Bedwars']['winstreak']}")
embed.add_field(name='Wins', value="{:,}".format(player.JSON['achievements']['bedwars_wins']))
embed.add_field(name='Losses', value="{:,}".format(player.JSON['stats']['Bedwars']['losses_bedwars']))
embed.add_field(name='Kills', value=f"{player.JSON['stats']['Bedwars']['kills_bedwars']}")
embed.add_field(name='Final Kills', value=f"{player.JSON['stats']['Bedwars']['final_kills_bedwars']}")
embed.add_field(name='Deaths', value=f"{player.JSON['stats']['Bedwars']['deaths_bedwars']}")
embed.add_field(name='Final Deaths', value=f"{player.JSON['stats']['Bedwars']['final_deaths_bedwars']}")
embed.add_field(name='Emeralds collected',
value=f"{player.JSON['stats']['Bedwars']['emerald_resources_collected_bedwars']}")
embed.add_field(name='Diamonds collected',
value=f"{player.JSON['stats']['Bedwars']['diamond_resources_collected_bedwars']}")
embed.add_field(name='Gold collected',
value=f"{player.JSON['stats']['Bedwars']['gold_resources_collected_bedwars']}")
embed.add_field(name='Iron collected',
value=f"{player.JSON['stats']['Bedwars']['iron_resources_collected_bedwars']}")
wdr = int(player.JSON['achievements']['bedwars_wins'])/int(player.JSON['stats']['Bedwars']['losses_bedwars'])
kdr = int(player.JSON['stats']['Bedwars']['kills_bedwars'])/int(player.JSON['stats']['Bedwars']['deaths_bedwars'])
fkdr = int(player.JSON['stats']['Bedwars']['final_kills_bedwars']) / int(player.JSON['stats']['Bedwars']['final_deaths_bedwars'])
awdr = '{:,.2f}'.format(wdr)
akdr = '{:,.2f}'.format(kdr)
afkdr = '{:,.2f}'.format(fkdr)
embed.add_field(name='Win/Loss Ratio', value=f"{awdr}")
embed.add_field(name='Kill/Death Ratio', value=f"{akdr}")
embed.add_field(name='Final Kill/Final Death Ratio', value=f"{afkdr}")
if sys.platform == "windows":
embed.set_footer(
text=f"Requested by: {ctx.message.author} / {datetime.fromtimestamp(time.time()).strftime('%A, %B %#d, %Y at %#I:%M %p %Z')}",
icon_url=ctx.message.author.avatar_url)
elif sys.platform == "linux":
embed.set_footer(
text=f"Requested by: {ctx.message.author} / {datetime.fromtimestamp(time.time()).strftime('%A, %B %-d, %Y at %-I:%M %p %Z')}",
icon_url=ctx.message.author.avatar_url)
await ctx.send(embed=embed)
except hypixel.PlayerNotFoundException:
await ctx.send("Player not found! Try another UUID or username.")
except KeyError:
await ctx.send("This user has never played Bed Wars before.")
except Exception:
await ctx.send(traceback.print_exc())
@commands.command(aliases=['skywars', 'sinfo', 'swinfo'])
async def hskywars(self, ctx, username: str):
try:
player = hypixel.Player(username)
embed = discord.Embed(description=None)
embed.title = f"{player.getName()}'s Skywars Stats"
embed.set_thumbnail(url=f"https://crafatar.com/avatars/{player.UUID}?size=64")
if ctx.me.color is not None:
embed.color = ctx.me.color
embed.add_field(name="Coins",
value=f"{player.JSON['stats']['SkyWars']['coins']}")
embed.add_field(name="Kills (Solo)", value=f"{player.JSON['achievements']['skywars_kills_solo']}")
embed.add_field(name="Kills (Teams)",
value=f"{player.JSON['achievements']['skywars_kills_team']}")
embed.add_field(name="Wins (Solo)",
value=f"{player.JSON['achievements']['skywars_wins_solo']}")
embed.add_field(name="Wins (Teams)",
value=f"{player.JSON['achievements']['skywars_wins_team']}")
embed.add_field(name="Kills (Solo)",
value=f"{player.JSON['achievements']['skywars_kills_solo']}")
embed.add_field(name="Deaths",
value=f"{player.JSON['stats']['SkyWars']['deaths']}")
embed.add_field(name="Games Played",
value=f"{player.JSON['stats']['SkyWars']['games']}")
try:
embed.add_field(name="Lucky Blocks Wins", value=f"{player.JSON['stats']['SkyWars']['lab_win_lucky_blocks_lab']}")
except KeyError:
embed.add_field(name="Lucky Blowck Wins", value="They have not won in LUCKY BLOWCKS")
wdr = (int(player.JSON['achievements']['skywars_wins_solo'])+int(player.JSON['achievements']['skywars_wins_team']))/(int(player.JSON['stats']['SkyWars']['deaths']))
kdr = (int(player.JSON['achievements']['skywars_kills_solo'])+int(player.JSON['achievements']['skywars_kills_team']))/(int(player.JSON['stats']['SkyWars']['deaths']))
awdr = '{:,.2f}'.format(wdr)
akdr = '{:,.2f}'.format(kdr)
embed.add_field(name='Win/Loss Ratio (Overall)', value=f"{awdr}")
embed.add_field(name='Kill/Death Ratio (Overall)', value=f"{akdr}")
if sys.platform == "windows":
embed.set_footer(
text=f"Requested by: {ctx.message.author} / {datetime.fromtimestamp(time.time()).strftime('%A, %B %#d, %Y at %#I:%M %p %Z')}",
icon_url=ctx.message.author.avatar_url)
elif sys.platform == "linux":
embed.set_footer(
text=f"Requested by: {ctx.message.author} / {datetime.fromtimestamp(time.time()).strftime('%A, %B %-d, %Y at %-I:%M %p %Z')}",
icon_url=ctx.message.author.avatar_url)
await ctx.send(embed=embed)
except hypixel.PlayerNotFoundException:
await ctx.send("Player not found! Try another UUID or username.")
except KeyError:
await ctx.send("This user has never played Skywars before.")
except Exception:
await ctx.send(traceback.print_exc())
@commands.command(aliases=['playercount'])
async def hpc(self, ctx):
try:
pc = hypixel.getJSON('playercount')['playerCount']
queue = hypixel.getJSON('gameCounts')['games']['QUEUE']['players']
if queue == 0:
embed = discord.Embed(description=f"Total people online - {pc} players")
else:
embed = discord.Embed(description=f"Total people online - {pc} players ({queue} in queue)")
embed.title = "Hypixel Player Count"
if ctx.me.color is not None:
embed.color = ctx.me.color
embed.add_field(name="Skyblock", value=f"{hypixel.getJSON('gameCounts')['games']['SKYBLOCK']['players']}")
embed.add_field(name="Bed Wars", value=f"{hypixel.getJSON('gameCounts')['games']['BEDWARS']['players']}")
embed.add_field(name="AFK", value=f"{hypixel.getJSON('gameCounts')['games']['IDLE']['players']}")
embed.add_field(name="Skywars", value=f"{hypixel.getJSON('gameCounts')['games']['SKYWARS']['players']}")
embed.add_field(name="Housing", value=f"{hypixel.getJSON('gameCounts')['games']['HOUSING']['players']}")
embed.add_field(name="Duels", value=f"{hypixel.getJSON('gameCounts')['games']['DUELS']['players']}")
embed.add_field(name="Arcade Games", value=f"{hypixel.getJSON('gameCounts')['games']['ARCADE']['players']}")
embed.add_field(name="Murder Mystery", value=f"{hypixel.getJSON('gameCounts')['games']['MURDER_MYSTERY']['players']}")
embed.add_field(name="Build Battle", value=f"{hypixel.getJSON('gameCounts')['games']['BUILD_BATTLE']['players']}")
embed.add_field(name="The Pit", value=f"{hypixel.getJSON('gameCounts')['games']['PIT']['players']}")
embed.add_field(name="Prototype Games", value=f"{hypixel.getJSON('gameCounts')['games']['PROTOTYPE']['players']}")
embed.add_field(name="TNT Games", value=f"{hypixel.getJSON('gameCounts')['games']['TNTGAMES']['players']}")
embed.add_field(name="UHC", value=f"{hypixel.getJSON('gameCounts')['games']['UHC']['players']}")
embed.add_field(name="Classic Games", value=f"{hypixel.getJSON('gameCounts')['games']['LEGACY']['players']}")
embed.add_field(name="Mega Walls", value=f"{hypixel.getJSON('gameCounts')['games']['WALLS3']['players']}")
embed.add_field(name="Main Lobby", value=f"{hypixel.getJSON('gameCounts')['games']['MAIN_LOBBY']['players']}")
embed.add_field(name="Survival Games", value=f"{hypixel.getJSON('gameCounts')['games']['SURVIVAL_GAMES']['players']}")
embed.add_field(name="Stuck in Limbo", value=f"{hypixel.getJSON('gameCounts')['games']['LIMBO']['players']}")
embed.add_field(name="Cops and Crims", value=f"{hypixel.getJSON('gameCounts')['games']['MCGO']['players']}")
embed.add_field(name="Warlords", value=f"{hypixel.getJSON('gameCounts')['games']['BATTLEGROUND']['players']}")
embed.add_field(name="Super Smash™", value=f"{hypixel.getJSON('gameCounts')['games']['SUPER_SMASH']['players']}")
embed.add_field(name="Speed UHC", value=f"{hypixel.getJSON('gameCounts')['games']['SPEED_UHC']['players']}")
embed.add_field(name="Turbo Kart Racer", value=f"{hypixel.getJSON('gameCounts')['games']['LEGACY']['modes']['GINGERBREAD']}")
embed.add_field(name="Rich people doing replays", value=f"{hypixel.getJSON('gameCounts')['games']['REPLAY']['players']}")
if sys.platform == "windows":
embed.set_footer(
text=f"Requested by: {ctx.message.author} / {datetime.fromtimestamp(time.time()).strftime('%A, %B %#d, %Y at %#I:%M %p %Z')}",
icon_url=ctx.message.author.avatar_url)
elif sys.platform == "linux":
embed.set_footer(
text=f"Requested by: {ctx.message.author} / {datetime.fromtimestamp(time.time()).strftime('%A, %B %-d, %Y at %-I:%M %p %Z')}",
icon_url=ctx.message.author.avatar_url)
await ctx.send(embed=embed)
except Exception:
await ctx.send(traceback.print_exc())
@commands.command(aliases=['duelsinfo', 'dinfo', 'hdinfo'])
async def hduels(self, ctx, username: str):
try:
player = hypixel.Player(username)
embed = discord.Embed(description=f"They've played {player.JSON['stats']['Duels']['games_played_duels']} times.")
embed.title = f"{username}'s Duels Stats"
embed.set_thumbnail(url=f"https://crafatar.com/avatars/{player.UUID}?size=64")
embed.add_field(name="Coins", value=f"{player.JSON['stats']['Duels']['coins']}")
embed.add_field(name="Wins", value=f"{player.JSON['stats']['Duels']['wins']}")
embed.add_field(name="Losses", value=f"{player.JSON['stats']['Duels']['losses']}")
embed.add_field(name="Deaths", value=f"{player.JSON['stats']['Duels']['deaths']}")
try:
embed.add_field(name="Kills", value=f"{player.JSON['stats']['Duels']['kills']}")
except KeyError:
embed.add_field(name="Kills", value="0")
try:
embed.add_field(name="Cosmetic Title", value=f"{player.JSON['stats']['Duels']['active_cosmetictitle']}")
except KeyError:
pass
embed.add_field(name="Goals Hit", value=f"{player.JSON['stats']['Duels']['goals']} times")
embed.add_field(name="Bow Shots", value=f"{player.JSON['stats']['Duels']['bow_shots']}")
embed.add_field(name="Bow Hits", value=f"{player.JSON['stats']['Duels']['bow_hits']}")
wdr = int(player.JSON['stats']['Duels']['wins'])/int(player.JSON['stats']['Duels']['losses'])
try:
kdr = int(player.JSON['stats']['Duels']['kills'])/int(player.JSON['stats']['Duels']['deaths'])
except KeyError:
kdr = int(0/int(player.JSON['stats']['Duels']['deaths']))
awdr = '{:,.2f}'.format(wdr)
akdr = '{:,.2f}'.format(kdr)
if akdr == "0.00":
akdr = "0"
else:
pass
embed.add_field(name='Win/Loss Ratio', value=f"{awdr}")
embed.add_field(name='Kill/Death Ratio', value=f"{akdr}")
if sys.platform == "windows":
embed.set_footer(
text=f"Requested by: {ctx.message.author} / {datetime.fromtimestamp(time.time()).strftime('%A, %B %#d, %Y at %#I:%M %p %Z')}",
icon_url=ctx.message.author.avatar_url)
elif sys.platform == "linux":
embed.set_footer(
text=f"Requested | |
and
associated name servers
show cache : show cache status
add acl : add acl entry
clear acl <id> : clear the content of this acl
del acl : delete acl entry
get acl : report the patterns matching a sample for an ACL
show acl [id] : report available acls or dump an acl's contents
add map : add map entry
clear map <id> : clear the content of this map
del map : delete map entry
get map : report the keys and values matching a sample for a map
set map : modify map entry
show map [id] : report available maps or dump a map's contents
trace <module> [cmd [args...]] : manage live tracing
show trace [<module>] : show live tracing state
show threads : show some threads debugging information
show pools : report information about the memory pools usage
show events [<sink>] : show event sink state
show profiling : show CPU profiling options
set profiling : enable/disable CPU profiling
[root@centos7 ~]#echo "show info" | socat stdio /var/lib/haproxy/haproxy.sock
Name: HAProxy
Version: 2.1.3
Release_date: 2020/02/12
Nbthread: 4
Nbproc: 1
Process_num: 1
Pid: 2279
Uptime: 0d 0h46m07s
Uptime_sec: 2767
Memmax_MB: 0
PoolAlloc_MB: 0
PoolUsed_MB: 0
PoolFailed: 0
Ulimit-n: 200041
Maxsock: 200041
Maxconn: 100000
Hard_maxconn: 100000
CurrConns: 0
CumConns: 1
CumReq: 1
MaxSslConns: 0
CurrSslConns: 0
CumSslConns: 0
Maxpipes: 0
PipesUsed: 0
PipesFree: 0
ConnRate: 0
ConnRateLimit: 0
MaxConnRate: 0
SessRate: 0
SessRateLimit: 0
MaxSessRate: 0
SslRate: 0
SslRateLimit: 0
MaxSslRate: 0
SslFrontendKeyRate: 0
SslFrontendMaxKeyRate: 0
SslFrontendSessionReuse_pct: 0
SslBackendKeyRate: 0
SslBackendMaxKeyRate: 0
SslCacheLookups: 0
SslCacheMisses: 0
CompressBpsIn: 0
CompressBpsOut: 0
CompressBpsRateLim: 0
ZlibMemUsage: 0
MaxZlibMemUsage: 0
Tasks: 19
Run_queue: 1
Idle_pct: 100
node: centos7.wangxiaochun.<EMAIL>
Stopping: 0
Jobs: 7
Unstoppable Jobs: 0
Listeners: 6
ActivePeers: 0
ConnectedPeers: 0
DroppedLogs: 0
BusyPolling: 0
FailedResolutions: 0
TotalBytesOut: 0
BytesOutRate: 0
DebugCommandsIssued: 0
[root@centos7 ~]#cat /etc/haproxy/haproxy.cfg
......
listen magedu-test-80
bind :81,:82
mode http
server web1 10.0.0.17:80 check inter 3000 fall 3 rise 5
server web2 10.0.0.27:80 check weight 3
......
[root@centos7 ~]#echo "show servers state" | socat stdio /var/lib/haproxy/haproxy.sock
1
# be_id be_name srv_id srv_name srv_addr srv_op_state srv_admin_state srv_uweight srv_iweight srv_time_since_last_change srv_check_status srv_check_result srv_check_health srv_check_state srv_agent_state bk_f_forced_id srv_f_forced_id srv_fqdn srv_port srvrecord
2 magedu-test-80 1 web1 10.0.0.17 2 0 2 1 812 6 3 7 6 0 0 0 - 80 -
2 magedu-test-80 2 web2 10.0.0.27 2 0 2 3 812 6 3 4 6 0 0 0 - 80 -
4 web_port 1 web1 127.0.0.1 0 0 1 1 810 8 2 0 6 0 0 0 - 8080 -
[root@centos7 ~]#echo "get weight magedu-test-80/web2" | socat stdio /var/lib/haproxy/haproxy.sock
3 (initial 3)
#修改weight,注意只针对单进程有效
[root@centos7 ~]#echo "set weight magedu-test-80/web2 2" | socat stdio /var/lib/haproxy/haproxy.sock
[root@centos7 ~]#echo "get weight magedu-test-80/web2" | socat stdio /var/lib/haproxy/haproxy.sock
2 (initial 3)
#将后端服务器禁用,注意只针对单进程有效
[root@centos7 ~]#echo "disable server magedu-test-80/web2" | socat stdio /var/lib/haproxy/haproxy.sock
#将后端服务器软下线,即weight设为0
[root@centos7 ~]#echo "set weight magedu-test-80/web1 0" | socat stdio /var/lib/haproxy/haproxy.sock
#将后端服务器禁用,针对多进程
[root@centos7 ~]#vim /etc/haproxy/haproxy.cfg
......
stats socket /var/lib/haproxy/haproxy1.sock mode 600 level admin process 1
stats socket /var/lib/haproxy/haproxy2.sock mode 600 level admin process 2 nbproc 2
.....
[root@centos7 ~]#echo "disable server magedu-test-80/web2" | socat stdio /var/lib/haproxy/haproxy1.sock
[root@centos7 ~]#echo "disable server magedu-test-80/web2" | socat stdio /var/lib/haproxy/haproxy2.sock
[root@haproxy ~]#for i in {1..2};do echo "set weight magedu-test-80/web$i 10" | socat stdio /var/lib/haproxy/haproxy$i.sock;done
#如果静态算法,如:static-rr,可以更改weight为0或1,但不支持动态更改weight为其它值,否则会提示下面信息
[root@centos7 ~]#echo "set weight magedu-test-80/web1 0" | socat stdio /var/lib/haproxy/haproxy.sock
[root@centos7 ~]#echo "set weight magedu-test-80/web1 1" | socat stdio /var/lib/haproxy/haproxy.sock
[root@centos7 ~]#echo "set weight magedu-test-80/web1 2" | socat stdio /var/lib/haproxy/haproxy.sock
Backend is using a static LB algorithm and only accepts weights '0%' and '100%'.
'''
static-rr
static-rr:基于权重的轮询调度,不支持权重的运行时利用socat进行动态调整及后端服务器慢启动,其后端主机数量没有限制,相当于LVS中的 wrr
'''
listen web_host
bind 10.0.0.7:80,:8801-8810,10.0.0.7:9001-9010
mode http
log global
balance static-rr
server web1 10.0.0.17:80 weight 1 check inter 3000 fall 2 rise 5
server web2 10.0.0.27:80 weight 2 check inter 3000 fall 2 rise 5
'''
first
first:根据服务器在列表中的位置,自上而下进行调度,但是其只会当第一台服务器的连接数达到上限,新请求才会分配给下一台服务,因此会忽略服务器的权重设置,此方式使用较少
'''
listen web_host
bind 10.0.0.7:80,:8801-8810,10.0.0.7:9001-9010
mode http
log global
balance first
server web1 10.0.0.17:80 maxconn 2 weight 1 check inter 3000 fall 2 rise 5
server web2 10.0.0.27:80 weight 1 check inter 3000 fall 2 rise 5
'''
测试访问效果
'''
#同时运行下面命令,观察结果
# while true;do curl http://10.0.0.7/index.html ; sleep 0.1;done
'''
动态算法
动态算法:基于后端服务器状态进行调度适当调整,优先调度至当前负载较低的服务器,且权重可以在haproxy运行时动态调整无需重启。
roundrobin
roundrobin:基于权重的轮询动态调度算法,支持权重的运行时调整,不同于lvs中的rr轮训模式,HAProxy中的roundrobin支持慢启动(新加的服务器会逐渐增加转发数),其每个后端backend中最多支持4095个real server,支持对real server权重动态调整,roundrobin为默认调度算法
'''
listen web_host
bind 10.0.0.7:80,:8801-8810,10.0.0.7:9001-9010
mode http
log global
balance roundrobin
server web1 10.0.0.17:80 weight 1 check inter 3000 fall 2 rise 5
server web2 10.0.0.27:80 weight 2 check inter 3000 fall 2 rise 5
'''
支持动态调整权重:
'''
# echo "get weight web_host/web1" | socat stdio /var/lib/haproxy/haproxy.sock
1 (initial 1)
# echo "set weight web_host/web1 3" | socat stdio /var/lib/haproxy/haproxy.sock
# echo "get weight web_host/web1" | socat stdio /var/lib/haproxy/haproxy.sock
3 (initial 1)
'''
leastconn
leastconn加权的最少连接的动态,支持权重的运行时调整和慢启动,即当前后端服务器连接最少的优先调度(新客户端连接),比较适合长连接的场景使用,比如:MySQL等场景。
'''
listen web_host
bind 10.0.0.7:80,:8801-8810,10.0.0.7:9001-9010
mode http
log global
balance leastconn
server web1 10.0.0.17:80 weight 1 check inter 3000 fall 2 rise 5
server web2 10.0.0.27:80 weight 1 check inter 3000 fall 2 rise 5
'''
random
在1.9版本开始增加一个叫做random的负载平衡算法,其基于随机数作为一致性hash的key,随机负载平衡对于大型服务器场或经常添加或删除服务器非常有用,支持weight的动态调整,weight较大的主机有更大概率获取新请求
random配置实例
'''
listen web_host
bind 10.0.0.7:80,:8801-8810,10.0.0.7:9001-9010
mode http
log global
balance random
server web1 10.0.0.17:80 weight 1 check inter 3000 fall 2 rise 5
server web2 10.0.0.27:80 weight 1 check inter 3000 fall 2 rise 5
'''
其他算法
其它算法即可作为静态算法,又可以通过选项成为动态算法
source
源地址hash,基于用户源地址hash并将请求转发到后端服务器,后续同一个源地址请求将被转发至同一个后端web服务器。此方式当后端服务器数据量发生变化时,会导致很多用户的请求转发至新的后端服务器,默认为静态方式,但是可以通过hash-type支持的选项更改
这个算法一般是在不插入Cookie的TCP模式下使用,也可给拒绝会话cookie的客户提供最好的会话粘性,适用于session会话保持但不支持cookie和缓存的场景
源地址有两种转发客户端请求到后端服务器的服务器选取计算方式,分别是取模法和一致性hash
map-base取模法
map-based:取模法,对source地址进行hash计算,再基于服务器总权重的取模,最终结果决定将此请求转发至对应的后端服务器。此方法是静态的,即不支持在线调整权重,不支持慢启动,可实现对后端服务器均衡调度。缺点是当服务器的总权重发生变化时,即有服务器上线或下线,都会因总权重发生变化而导致调度结果整体改变,hash-type 指定的默认值为此算法
'''
所谓取模运算,就是计算两个数相除之后的余数,10%7=3, 7%4=3
map-based算法:基于权重取模,hash(source_ip)%所有后端服务器相加的总权重
'''
取模法配置示例:
'''
listen web_host
bind 10.0.0.7:80,:8801-8810,10.0.0.7:9001-9010
mode tcp
log global
balance source
hash-type map-based
server web1 10.0.0.17:80 weight 1 check inter 3000 fall 2 rise 3
server web2 10.0.0.27:80 weight 1 check inter 3000 fall 2 rise 3
[root@haproxy ~]#echo "set weight web_host/10.0.0.27 10" | socat stdio /var/lib/haproxy/haproxy.sock
Backend is using a static LB algorithm and only accepts weights '0%' and '100%'.
[root@haproxy ~]#echo "set weight web_host/10.0.0.27 0" | socat stdio /var/lib/haproxy/haproxy.sock
[root@haproxy conf.d]#echo "get weight web_host/10.0.0.27" | socat stdio /var/lib/haproxy/haproxy.sock
0 (initial 1)
'''
一致性hash
一致性哈希,当服务器的总权重发生变化时,对调度结果影响是局部的,不会引起大的变动,hash(o)mod n ,该hash算法是动态的,支持使用 socat等工具进行在线权重调整,支持慢启动
算法:
'''
1、key1=hash(source_ip)%(2^32) [0---4294967295]
2、keyA=hash(后端服务器虚拟ip)%(2^32)
3、将key1和keyA都放在hash环上,将用户请求调度到离key1最近的keyA对应的后端服务器
'''
hash环偏斜问题
'''
增加虚拟服务器IP数量,比如:一个后端服务器根据权重为1生成1000个虚拟IP,再hash。而后端服务器权重为2则生成2000的虚拟IP,再bash,最终在hash环上生成3000个节点,从而解决hash环偏斜问题
'''
hash对象
Hash对象到后端服务器的映射关系:
Haproxy-调度算法详解插图
一致性hash示意图
后端服务器在线与离线的调度方式
Haproxy-调度算法详解插图(1)
Haproxy-调度算法详解插图(2)
一致性hash配置示例
'''
listen web_host
bind 10.0.0.7:80,:8801-8810,10.0.0.7:9001-9010
mode tcp
log global
balance source
hash-type consistent
server web1 10.0.0.17:80 weight 1 check inter 3000 fall 2 rise 5
server web2 10.0.0.27:80 weight 1 check inter 3000 fall 2 rise 5
'''
uri
基于对用户请求的URI的左半部分或整个uri做hash,再将hash结果对总权重进行取模后,根据最终结果将请求转发到后端指定服务器,适用于后端是缓存服务器场景,默认是静态,也可以通过hash-type指定map-based和consistent,来定义使用取模法还是一致性hash。
注意:此算法是应用层,所有只支持 mode http ,不支持 mode tcp
'''
<scheme>://<user>:<password>@<host>:<port>/<path>;<params>?<query>#<frag>
左半部分:/<path>;<params>
整个uri:/<path>;<params>?<query>#<frag>
'''
Haproxy-调度算法详解插图(3)
uri 取模法配置示例
'''
listen web_host
bind 10.0.0.7:80,:8801-8810,10.0.0.7:9001-9010
mode http
log global
balance uri
server web1 10.0.0.17:80 weight 1 check inter 3000 fall 2 rise 5
server web2 10.0.0.27:80 weight 1 check inter 3000 fall 2 rise 5
'''
uri 一致性hash配置示例
'''
listen web_host
bind 10.0.0.7:80,:8801-8810,10.0.0.7:9001-9010
mode http
log global
balance uri
hash-type consistent
server web1 10.0.0.17:80 weight 1 check inter 3000 fall 2 rise 5
server web2 10.0.0.27:80 weight 1 check inter 3000 fall 2 rise 5
'''
访问测试
访问不同的uri,确认可以将用户同样的请求转发至相同的服务器
'''
# curl http://10.0.0.7/test1.html
# curl http://10.0.0.7/test2..html
'''
url_param
url_param对用户请求的url中的 params 部分中的一个参数key对应的value值作hash计算,并由服务器总权重相除以后派发至某挑出的服务器;通常用于追踪用户,以确保来自同一个用户的请求始终发往同一个real server,如果无没key,将按roundrobin算法
'''
假设:
url = http://www.magedu.com/foo/bar/index.php?key=value
则:
host = "www.magedu.com"
url_param = "key=value"
'''
url_param取模法配置示例
'''
listen web_host
bind 10.0.0.7:80,:8801-8810,10.0.0.7:9001-9010
mode http
log global
balance url_param userid #url_param hash
server web1 10.0.0.17:80 weight 1 check inter 3000 fall 2 rise 5
server web2 10.0.0.27:80 weight 1 check inter 3000 fall 2 rise 5
'''
url_param一致性hash配置示例
'''
listen web_host
bind 10.0.0.7:80,:8801-8810,10.0.0.7:9001-9010
mode http
log global
balance url_param userid #对url_param的值取hash
hash-type consistent
server web1 10.0.0.17:80 weight 1 check inter 3000 fall 2 rise 5
server web2 10.0.0.27:80 weight 1 check inter 3000 fall 2 rise 5
'''
测试访问
'''
# curl http://10.0.0.7/index.html?userid=<NAME_ID>
# curl "http://10.0.0.7/index.html?userid=<NAME_ID>&typeid=<TYPE_ID>"
'''
hdr
针对用户每个http头部(header)请求中的指定信息做hash,此处由 name 指定的http首部将会被取出并做hash计算,然后由服务器总权重取模以后派发至某挑出的服务器,如无有效的值,则会使用默认的轮询调度。
hdr取模法配置示例
'''
listen web_host
bind 10.0.0.7:80,:8801-8810,10.0.0.7:9001-9010
mode http
log global
balance hdr(User-Agent)
#balance hdr(host)
server web1 10.0.0.17:80 weight 1 check inter 3000 fall 2 rise 5
server web2 10.0.0.27:80 weight 1 check inter 3000 fall 2 rise 5
'''
一致性hash配置示例
'''
listen web_host
bind 10.0.0.7:80,:8801-8810,10.0.0.7:9001-9010
mode http
log global
balance hdr(User-Agent)
hash-type consistent
server web1 10.0.0.17:80 weight 1 check inter 3000 fall 2 rise 5
server web2 10.0.0.27:80 weight 1 check inter 3000 fall 2 rise 5
'''
测试访问
'''
[root@centos6 ~]#curl -v http://10.0.0.7/index.html
[root@centos6 ~]#curl -vA 'firefox' http://10.0.0.7/index.html
[root@centos6 ~]#curl -vA 'chrome' http://10.0.0.7/index.html
'''
rdp-cookie
rdp-cookie对远windows远程桌面的负载,使用cookie保持会话,默认是静态,也可以通过hash-type指定map-based和consistent,来定义使用取模法还是一致性hash。
rdp-cookie取模法配置示例
'''
listen RDP
bind 10.0.0.7:3389
balance rdp-cookie
mode tcp
server rdp0 10.0.0.17:3389 check fall 3 rise 5 inter 2000 weight 1
'''
rdp-cookie一致性hash配置示例
'''
[root@haproxy ~]#cat /etc/haproxy/conf.d/windows_rdp.cfg
listen magedu_RDP_3389
bind 172.16.0.100:3389
balance rdp-cookie
hash-type consistent
mode tcp
server rdp0 10.0.0.200:3389 check fall 3 rise 5 inter 2000 weight 1
[root@haproxy ~]#hostname -I
10.0.0.100 172.16.0.100
'''
Haproxy-调度算法详解插图(4)
Haproxy-调度算法详解插图(5)
基于iptables实现RDP协议转发
必须开启ip转发功能:
'''
net.ipv4.ip_forward = 1
'''
'''
[root@centos8 ~]#sysctl -w net.ipv4.ip_forward=1
#客户端和Windows在不同网段需要下面命令
[root@centos8 ~]#iptables -t nat -A PREROUTING -d 172.16.0.100 -p tcp --dport 3389 -j DNAT --to-destination 10.0.0.200:3389
#客户端和Windows在同一网段需要再执行下面命令
[root@centos8 ~]#iptables -t nat -A PREROUTING -d 10.0.0.8 -p tcp --dport 3389 -j DNAT --to-destination 10.0.0.1:3389
[root@centos8 ~]#iptables -t nat -A POSTROUTING -s 10.0.0.0/24 -j SNAT --to-source 10.0.0.8
'''
在windows 可以看到以下信息
Haproxy-调度算法详解插图(6)
算法总结
'''
static-rr--------->tcp/http 静态
first------------->tcp/http 静态
roundrobin-------->tcp/http 动态
leastconn--------->tcp/http 动态
random------------>tcp/http 动态
以下静态和动态取决于hash_type是否consistent
source------------>tcp/http
Uri--------------->http
url_param--------->http
hdr--------------->http
rdp-cookie-------->tcp
'''
各算法使用场景
'''
first #使用较少
static-rr #做了session共享的web集群
roundrobin
random
leastconn #数据库
source #基于客户端公网IP的会话保持
Uri--------------->http #缓存服务器,CDN服务商,蓝汛、百度、阿里云、腾讯
url_param--------->http
hdr #基于客户端请求报文头部做下一步处理
rdp-cookie #很少使用
'''
------------------------------
haproxy - 基于 cookie 的会话保持
高级功能及配置
介绍HAProxy高级配置及实用案例
基于cookie的会话保持
cookie value:为当前server指定cookie值,实现基于cookie的会话黏性,相对于基于 source 地址 hash 调度算法对客户端的粒度更精准,但同时也加大了haproxy负载,目前此模式使用较少, 已经被session共享服务器代替
注意:不支持 tcp mode,使用 http mode
配置选项
'''
cookie name [ rewrite | insert | prefix ][ indirect ] [ nocache ][ postonly ] [ preserve ][ httponly ] [ secure ][ domain ]* [ maxidle <idle> ][ maxlife ]
name: #cookie 的key名称,用于实现持久连接
insert: #插入新的cookie,默认不插入cookie
indirect: #如果客户端已经有cookie,则不会再发送cookie信息
nocache: #当client和hapoxy之间有缓存服务器(如:CDN)时,不允许中间缓存器缓存cookie,因为这会导致很多经过同一个CDN的请求都发送到同一台后端服务器
'''
配置示例
'''
listen web_port
bind 10.0.0.7:80
balance roundrobin
mode http #不支持 tcp mode
log global
cookie WEBSRV insert nocache indirect
server web1 10.0.0.17:80 check inter 3000 fall 2 rise 5 cookie web1
server web2 10.0.0.27:80 check inter 3000 fall 2 rise 5 cookie web2
'''
验证cookie信息
浏览器验证:
haproxy-基于cookie的会话保持插图
haproxy-基于cookie的会话保持插图(1)
通过命令行验证:
'''
[root@centos6 ~]#curl -i 10.0.0.7
HTTP/1.1 200 OK
date: Thu, 02 Apr 2020 02:26:08 GMT
server: Apache/2.4.6 (CentOS)
last-modified: Thu, 02 Apr 2020 01:44:28 GMT
etag: "a-5a244f0fd5175"
accept-ranges: bytes
content-length: 10
content-type: | |
]
if ( socket . ntohs ( oOo00Oo0o00oo ) == LISP_AFI_NAME ) :
packet = packet [ OO00OO : : ]
packet , self . hostname = lisp_decode_dist_name ( packet )
if 13 - 13: i11iIiiIii * O0 . OoooooooOO % I1Ii111 + I1ii11iIi11i + OOooOOo
if 45 - 45: oO0o % i11iIiiIii / Ii1I / IiII % Ii1I - Ii1I
return ( iIiiII11 )
if 73 - 73: I1ii11iIi11i * I1ii11iIi11i / II111iiii % iII111i
if 74 - 74: OoO0O00 / I1ii11iIi11i - ooOoO0o * i1IIi + I1ii11iIi11i . I11i
if 13 - 13: iII111i + o0oOOo0O0Ooo / iII111i - Ii1I - iII111i
if 34 - 34: IiII . OOooOOo + OOooOOo - OoooooooOO * I1Ii111
if 72 - 72: iIii1I11I1II1 % i1IIi / OoO0O00 / I1IiiI - II111iiii - I1Ii111
oOo0ooO0O0oo = "HHBBHHH"
OO00OO = struct . calcsize ( oOo0ooO0O0oo )
if ( len ( packet ) < OO00OO ) : return ( None )
if 43 - 43: o0oOOo0O0Ooo - Oo0Ooo - I1ii11iIi11i / II111iiii + I1IiiI / I1ii11iIi11i
oOo00Oo0o00oo , I1I111 , OOo000OOoOO , O0ooO , ii1iII1i1iiIi , o0o0OoOo000O , Iii11i1 = struct . unpack ( oOo0ooO0O0oo , packet [ : OO00OO ] )
if 34 - 34: Oo0Ooo
if 21 - 21: I1IiiI / I1IiiI % I1Ii111 - OoOoOO00 % OoOoOO00 - II111iiii
if ( socket . ntohs ( oOo00Oo0o00oo ) != LISP_AFI_LCAF ) : return ( None )
if 97 - 97: oO0o
self . ms_port = socket . ntohs ( o0o0OoOo000O )
self . etr_port = socket . ntohs ( Iii11i1 )
packet = packet [ OO00OO : : ]
if 98 - 98: I1Ii111 * I1IiiI + iIii1I11I1II1
if 75 - 75: oO0o
if 50 - 50: oO0o / Oo0Ooo
if 32 - 32: OoO0O00 % oO0o * I1ii11iIi11i + I11i / I1Ii111
oOo0ooO0O0oo = "H"
OO00OO = struct . calcsize ( oOo0ooO0O0oo )
if ( len ( packet ) < OO00OO ) : return ( None )
if 5 - 5: o0oOOo0O0Ooo + iII111i / OoooooooOO + Ii1I . OoOoOO00 / oO0o
if 18 - 18: II111iiii . o0oOOo0O0Ooo
if 75 - 75: OoooooooOO - Oo0Ooo
if 56 - 56: II111iiii - i11iIiiIii - oO0o . o0oOOo0O0Ooo
oOo00Oo0o00oo = struct . unpack ( oOo0ooO0O0oo , packet [ : OO00OO ] ) [ 0 ]
packet = packet [ OO00OO : : ]
if ( oOo00Oo0o00oo != 0 ) :
self . global_etr_rloc . afi = socket . ntohs ( oOo00Oo0o00oo )
packet = self . global_etr_rloc . unpack_address ( packet )
if ( packet == None ) : return ( None )
self . global_etr_rloc . mask_len = self . global_etr_rloc . host_mask_len ( )
if 4 - 4: i1IIi
if 91 - 91: IiII . OoO0O00 * Ii1I / o0oOOo0O0Ooo
if 41 - 41: I1IiiI . OoO0O00 / i1IIi . Oo0Ooo . oO0o
if 44 - 44: iII111i * I11i + i11iIiiIii + i1IIi / IiII * II111iiii
if 58 - 58: OOooOOo
if 72 - 72: OoO0O00 + OOooOOo - Oo0Ooo % ooOoO0o . IiII
if ( len ( packet ) < OO00OO ) : return ( iIiiII11 )
if 95 - 95: iII111i % OOooOOo - IiII - OoOoOO00 % o0oOOo0O0Ooo * O0
oOo00Oo0o00oo = struct . unpack ( oOo0ooO0O0oo , packet [ : OO00OO ] ) [ 0 ]
packet = packet [ OO00OO : : ]
if ( oOo00Oo0o00oo != 0 ) :
self . global_ms_rloc . afi = socket . ntohs ( oOo00Oo0o00oo )
packet = self . global_ms_rloc . unpack_address ( packet )
if ( packet == None ) : return ( iIiiII11 )
self . global_ms_rloc . mask_len = self . global_ms_rloc . host_mask_len ( )
if 16 - 16: I1Ii111 / Oo0Ooo
if 48 - 48: Oo0Ooo / oO0o + iII111i % iII111i
if 9 - 9: I1ii11iIi11i - o0oOOo0O0Ooo . Oo0Ooo + I1ii11iIi11i . OOooOOo
if 30 - 30: OoooooooOO - iIii1I11I1II1 / oO0o * Ii1I / Ii1I
if 52 - 52: OoOoOO00 - OoO0O00 + I1IiiI + IiII
if ( len ( packet ) < OO00OO ) : return ( iIiiII11 )
if 49 - 49: oO0o / I11i - oO0o
oOo00Oo0o00oo = struct . unpack ( oOo0ooO0O0oo , packet [ : OO00OO ] ) [ 0 ]
packet = packet [ OO00OO : : ]
if ( oOo00Oo0o00oo != 0 ) :
self . private_etr_rloc . afi = socket . ntohs ( oOo00Oo0o00oo )
packet = self . private_etr_rloc . unpack_address ( packet )
if ( packet == None ) : return ( iIiiII11 )
self . private_etr_rloc . mask_len = self . private_etr_rloc . host_mask_len ( )
if 31 - 31: OoOoOO00 + I1IiiI + I1ii11iIi11i + I11i * II111iiii % oO0o
if 90 - 90: OOooOOo * iIii1I11I1II1 / i1IIi
if 60 - 60: OOooOOo * I1Ii111 . oO0o
if 47 - 47: oO0o % OOooOOo / OOooOOo % OoOoOO00 % I1Ii111 / OoOoOO00
if 51 - 51: I1IiiI . I11i - OoOoOO00
if 10 - 10: Oo0Ooo * OOooOOo / IiII . o0oOOo0O0Ooo
while ( len ( packet ) >= OO00OO ) :
oOo00Oo0o00oo = struct . unpack ( oOo0ooO0O0oo , packet [ : OO00OO ] ) [ 0 ]
packet = packet [ OO00OO : : ]
if ( oOo00Oo0o00oo == 0 ) : continue
iI11I1I = lisp_address ( socket . ntohs ( oOo00Oo0o00oo ) , "" , 0 , 0 )
packet = iI11I1I . unpack_address ( packet )
if ( packet == None ) : return ( iIiiII11 )
iI11I1I . mask_len = iI11I1I . host_mask_len ( )
self . rtr_list . append ( iI11I1I )
if 97 - 97: Ii1I . Ii1I % iII111i
return ( iIiiII11 )
if 49 - 49: Oo0Ooo % OOooOOo - OoooooooOO + IiII
if 54 - 54: iIii1I11I1II1 - OoooooooOO / I11i / oO0o % I1IiiI + OoOoOO00
if 26 - 26: OoO0O00 * II111iiii % OOooOOo * iII111i + iII111i
class lisp_nat_info ( ) :
def __init__ ( self , addr_str , hostname , port ) :
self . address = addr_str
self . hostname = hostname
self . port = port
self . uptime = lisp_get_timestamp ( )
if 25 - 25: I11i - I1ii11iIi11i
if 100 - 100: I1Ii111 / Ii1I + OoOoOO00 . OoooooooOO
def timed_out ( self ) :
i11IiIIi11I = time . time ( ) - self . uptime
return ( i11IiIIi11I >= ( LISP_INFO_INTERVAL * 2 ) )
if 83 - 83: O0
if 35 - 35: i11iIiiIii - I11i . OoOoOO00 * II111iiii % i11iIiiIii
if 55 - 55: o0oOOo0O0Ooo / O0 / OoooooooOO * Oo0Ooo % iII111i
class lisp_info_source ( ) :
def __init__ ( self , hostname , addr_str , port ) :
self . address = lisp_address ( LISP_AFI_IPV4 , addr_str , 32 , 0 )
self . port = port
self . uptime = lisp_get_timestamp ( )
self . nonce = None
self . hostname = hostname
self . no_timeout = False
if 24 - 24: I1ii11iIi11i % OOooOOo + OoooooooOO + OoO0O00
if 100 - 100: Oo0Ooo % OoO0O00 - OoOoOO00
def cache_address_for_info_source ( self ) :
o0OoOo0o0OOoO0 = self . address . print_address_no_iid ( ) + self . hostname
lisp_info_sources_by_address [ o0OoOo0o0OOoO0 ] = self
if 46 - 46: o0oOOo0O0Ooo
if 28 - 28: i1IIi
def cache_nonce_for_info_source ( self , nonce ) :
self . nonce = nonce
lisp_info_sources_by_nonce [ nonce ] = self
if 81 - 81: oO0o % OoooooooOO . I1Ii111 - OoOoOO00 / I1IiiI
if 62 - 62: I1Ii111 * I11i / I11i
if 42 - 42: ooOoO0o * ooOoO0o / Ii1I / OOooOOo * OOooOOo
if 92 - 92: Oo0Ooo / iII111i - OoooooooOO - o0oOOo0O0Ooo % ooOoO0o
if 35 | |
<reponame>linwukang/hue
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import logging
import mimetypes
import operator
import os
import parquet
import posixpath
import re
import stat as stat_module
import urllib
from urlparse import urlparse
from bz2 import decompress
from datetime import datetime
from cStringIO import StringIO
from gzip import GzipFile
from django.contrib.auth.models import User, Group
from django.core.paginator import EmptyPage, Paginator, Page, InvalidPage
from django.urls import reverse
from django.template.defaultfilters import stringformat, filesizeformat
from django.http import Http404, StreamingHttpResponse, HttpResponseNotModified, HttpResponseForbidden, HttpResponse, HttpResponseRedirect
from django.views.decorators.http import require_http_methods
from django.views.static import was_modified_since
from django.shortcuts import redirect
from django.utils.functional import curry
from django.utils.http import http_date
from django.utils.html import escape
from django.utils.translation import ugettext as _
from aws.s3.s3fs import S3FileSystemException
from avro import datafile, io
from desktop import appmanager
from desktop.lib import i18n
from desktop.lib.conf import coerce_bool
from desktop.lib.django_util import render, format_preserving_redirect
from desktop.lib.django_util import JsonResponse
from desktop.lib.export_csvxls import file_reader
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.fs import splitpath
from desktop.lib.i18n import smart_str
from desktop.lib.paths import SAFE_CHARACTERS_URI, SAFE_CHARACTERS_URI_COMPONENTS
from desktop.lib.tasks.compress_files.compress_utils import compress_files_in_hdfs
from desktop.lib.tasks.extract_archive.extract_utils import extract_archive_in_hdfs
from desktop.views import serve_403_error
from hadoop.core_site import get_trash_interval
from hadoop.fs.hadoopfs import Hdfs
from hadoop.fs.exceptions import WebHdfsException
from hadoop.fs.fsutils import do_overwrite_save
from filebrowser.conf import ENABLE_EXTRACT_UPLOADED_ARCHIVE, MAX_SNAPPY_DECOMPRESSION_SIZE, SHOW_DOWNLOAD_BUTTON, SHOW_UPLOAD_BUTTON, REDIRECT_DOWNLOAD
from filebrowser.lib.archives import archive_factory
from filebrowser.lib.rwx import filetype, rwx
from filebrowser.lib import xxd
from filebrowser.forms import RenameForm, UploadFileForm, UploadArchiveForm, MkDirForm, EditorForm, TouchForm,\
RenameFormSet, RmTreeFormSet, ChmodFormSet, ChownFormSet, CopyFormSet, RestoreFormSet,\
TrashPurgeForm, SetReplicationFactorForm
from desktop.auth.backend import is_admin
DEFAULT_CHUNK_SIZE_BYTES = 1024 * 4 # 4KB
MAX_CHUNK_SIZE_BYTES = 1024 * 1024 # 1MB
# Defaults for "xxd"-style output.
# Sentences refer to groups of bytes printed together, within a line.
BYTES_PER_LINE = 16
BYTES_PER_SENTENCE = 2
# The maximum size the file editor will allow you to edit
MAX_FILEEDITOR_SIZE = 256 * 1024
INLINE_DISPLAY_MIMETYPE = re.compile('video/|image/|audio/|application/pdf|application/msword|application/excel|'
'application/vnd\.ms|'
'application/vnd\.openxmlformats')
INLINE_DISPLAY_MIMETYPE_EXCEPTIONS = re.compile('image/svg\+xml')
logger = logging.getLogger(__name__)
class ParquetOptions(object):
def __init__(self, col=None, format='json', no_headers=True, limit=-1):
self.col = col
self.format = format
self.no_headers = no_headers
self.limit = limit
def index(request):
# Redirect to home directory by default
path = request.user.get_home_directory()
try:
if not request.fs.isdir(path):
path = '/'
except Exception:
pass
return view(request, path)
def download(request, path):
"""
Downloads a file.
This is inspired by django.views.static.serve.
?disposition={attachment, inline}
"""
decoded_path = urllib.unquote(path)
if path != decoded_path:
path = decoded_path
if not SHOW_DOWNLOAD_BUTTON.get():
return serve_403_error(request)
if not request.fs.exists(path):
raise Http404(_("File not found: %(path)s.") % {'path': escape(path)})
if not request.fs.isfile(path):
raise PopupException(_("'%(path)s' is not a file.") % {'path': path})
content_type = mimetypes.guess_type(path)[0] or 'application/octet-stream'
stats = request.fs.stats(path)
mtime = stats['mtime']
size = stats['size']
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'), mtime, size):
return HttpResponseNotModified()
# TODO(philip): Ideally a with statement would protect from leaks, but tricky to do here.
fh = request.fs.open(path)
# Verify read permissions on file first
try:
request.fs.read(path, offset=0, length=1)
except WebHdfsException, e:
if e.code == 403:
raise PopupException(_('User %s is not authorized to download file at path "%s"') %
(request.user.username, path))
else:
raise PopupException(_('Failed to download file at path "%s": %s') % (path, e))
if REDIRECT_DOWNLOAD.get() and hasattr(fh, 'read_url'):
response = HttpResponseRedirect(fh.read_url())
setattr(response, 'redirect_override', True)
else:
response = StreamingHttpResponse(file_reader(fh), content_type=content_type)
response["Last-Modified"] = http_date(stats['mtime'])
response["Content-Length"] = stats['size']
response['Content-Disposition'] = request.GET.get('disposition', 'attachment; filename="' + stats['name'] + '"') if _can_inline_display(path) else 'attachment'
request.audit = {
'operation': 'DOWNLOAD',
'operationText': 'User %s downloaded file %s with size: %d bytes' % (request.user.username, path, stats['size']),
'allowed': True
}
return response
def view(request, path):
"""Dispatches viewing of a path to either index() or fileview(), depending on type."""
decoded_path = urllib.unquote(path)
if path != decoded_path:
path = decoded_path
# default_to_home is set in bootstrap.js
if 'default_to_home' in request.GET:
home_dir_path = request.user.get_home_directory()
if request.fs.isdir(home_dir_path):
return format_preserving_redirect(request, '/filebrowser/view=' + urllib.quote(home_dir_path.encode('utf-8'), safe=SAFE_CHARACTERS_URI_COMPONENTS))
# default_to_home is set in bootstrap.js
if 'default_to_trash' in request.GET:
home_trash_path = _home_trash_path(request.fs, request.user, path)
if request.fs.isdir(home_trash_path):
return format_preserving_redirect(request, '/filebrowser/view=' + urllib.quote(home_trash_path.encode('utf-8'), safe=SAFE_CHARACTERS_URI_COMPONENTS))
trash_path = request.fs.trash_path(path)
if request.fs.isdir(trash_path):
return format_preserving_redirect(request, '/filebrowser/view=' + urllib.quote(trash_path.encode('utf-8'), safe=SAFE_CHARACTERS_URI_COMPONENTS))
try:
stats = request.fs.stats(path)
if stats.isDir:
return listdir_paged(request, path)
else:
return display(request, path)
except S3FileSystemException, e:
msg = _("S3 filesystem exception.")
if request.is_ajax():
exception = {
'error': smart_str(e)
}
return JsonResponse(exception)
else:
raise PopupException(msg, detail=e)
except (IOError, WebHdfsException), e:
msg = _("Cannot access: %(path)s. ") % {'path': escape(path)}
if "Connection refused" in e.message:
msg += _(" The HDFS REST service is not available. ")
elif request.fs._get_scheme(path).lower() == 'hdfs':
if is_admin(request.user) and not _is_hdfs_superuser(request):
msg += _(' Note: you are a Hue admin but not a HDFS superuser, "%(superuser)s" or part of HDFS supergroup, "%(supergroup)s".') \
% {'superuser': request.fs.superuser, 'supergroup': request.fs.supergroup}
if request.is_ajax():
exception = {
'error': msg
}
return JsonResponse(exception)
else:
raise PopupException(msg , detail=e)
def _home_trash_path(fs, user, path):
return fs.join(fs.trash_path(path), 'Current', user.get_home_directory()[1:])
def home_relative_view(request, path):
decoded_path = urllib.unquote(path)
if path != decoded_path:
path = decoded_path
home_dir_path = request.user.get_home_directory()
if request.fs.exists(home_dir_path):
path = '%s%s' % (home_dir_path, path)
return view(request, path)
def edit(request, path, form=None):
"""Shows an edit form for the given path. Path does not necessarily have to exist."""
decoded_path = urllib.unquote(path)
if path != decoded_path:
path = decoded_path
try:
stats = request.fs.stats(path)
except IOError, ioe:
# A file not found is OK, otherwise re-raise
if ioe.errno == errno.ENOENT:
stats = None
else:
raise
# Can't edit a directory
if stats and stats['mode'] & stat_module.S_IFDIR:
raise PopupException(_("Cannot edit a directory: %(path)s") % {'path': path})
# Maximum size of edit
if stats and stats['size'] > MAX_FILEEDITOR_SIZE:
raise PopupException(_("File too big to edit: %(path)s") % {'path': path})
if not form:
encoding = request.GET.get('encoding') or i18n.get_site_encoding()
if stats:
f = request.fs.open(path)
try:
try:
current_contents = unicode(f.read(), encoding)
except UnicodeDecodeError:
raise PopupException(_("File is not encoded in %(encoding)s; cannot be edited: %(path)s.") % {'encoding': encoding, 'path': path})
finally:
f.close()
else:
current_contents = u""
form = EditorForm(dict(path=path, contents=current_contents, encoding=encoding))
data = dict(
exists=(stats is not None),
path=path,
filename=os.path.basename(path),
dirname=os.path.dirname(path),
breadcrumbs=parse_breadcrumbs(path),
is_embeddable=request.GET.get('is_embeddable', False),
show_download_button=SHOW_DOWNLOAD_BUTTON.get())
if not request.is_ajax():
data['stats'] = stats;
data['form'] = form;
return render("edit.mako", request, data)
def save_file(request):
"""
The POST endpoint to save a file in the file editor.
Does the save and then redirects back to the edit page.
"""
form = EditorForm(request.POST)
is_valid = form.is_valid()
path = form.cleaned_data.get('path')
decoded_path = urllib.unquote(path)
if path != decoded_path:
path = decoded_path
if request.POST.get('save') == "Save As":
if not is_valid:
return edit(request, path, form=form)
else:
return render("saveas.mako", request, {'form': form})
if not path:
raise PopupException(_("No path specified"))
if not is_valid:
return edit(request, path, form=form)
encoding = form.cleaned_data['encoding']
data = form.cleaned_data['contents'].encode(encoding)
try:
if request.fs.exists(path):
do_overwrite_save(request.fs, path, data)
else:
request.fs.create(path, overwrite=False, data=data)
except WebHdfsException, e:
raise PopupException(_("The file could not be saved"), detail=e.message.splitlines()[0])
except Exception, e:
raise PopupException(_("The file could not be saved"), detail=e)
request.path = reverse("filebrowser_views_edit", kwargs=dict(path=path))
return edit(request, path, form)
def parse_breadcrumbs(path):
parts = splitpath(path)
url, breadcrumbs = '', []
for part in parts:
if url and not url.endswith('/'):
url += '/'
url += part
breadcrumbs.append({'url': urllib.quote(url.encode('utf-8'), safe=SAFE_CHARACTERS_URI_COMPONENTS), 'label': part})
return breadcrumbs
def listdir(request, path):
"""
Implements directory listing (or index).
Intended to be called via view().
"""
decoded_path = urllib.unquote(path)
if path != decoded_path:
path = decoded_path
if not request.fs.isdir(path):
raise PopupException(_("Not a directory: %(path)s") % {'path': path})
file_filter = request.GET.get('file_filter', 'any')
assert file_filter in ['any', 'file', 'dir']
home_dir_path = request.user.get_home_directory()
breadcrumbs = parse_breadcrumbs(path)
data = {
'path': path,
'file_filter': file_filter,
'breadcrumbs': breadcrumbs,
'current_dir_path': urllib.quote(path.encode('utf-8'), safe=SAFE_CHARACTERS_URI),
'current_request_path': '/filebrowser/view=' + urllib.quote(path.encode('utf-8'), safe=SAFE_CHARACTERS_URI_COMPONENTS),
'home_directory': home_dir_path if home_dir_path and request.fs.isdir(home_dir_path) else None,
'cwd_set': True,
'is_superuser': request.user.username == request.fs.superuser,
'groups': request.user.username == request.fs.superuser and [str(x) for x in Group.objects.values_list('name', flat=True)] or [],
'users': request.user.username == request.fs.superuser and [str(x) for x in User.objects.values_list('username', flat=True)] or [],
'superuser': request.fs.superuser,
'show_upload': (request.GET.get('show_upload') == 'false' and (False,) or (True,))[0],
'show_download_button': SHOW_DOWNLOAD_BUTTON.get(),
'show_upload_button': SHOW_UPLOAD_BUTTON.get(),
'is_embeddable': request.GET.get('is_embeddable', False),
}
stats = request.fs.listdir_stats(path)
# Include parent dir, unless at filesystem root.
if not request.fs.isroot(path):
parent_path = request.fs.parent_path(path)
parent_stat = request.fs.stats(parent_path)
# The 'path' field would be absolute, but | |
#
# Copyright (c) 2021 The GPflux Contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
r"""
This module contains helper functions for constructing :class:`~gpflow.kernels.MultioutputKernel`,
:class:`~gpflow.inducing_variables.MultioutputInducingVariables`,
:class:`~gpflow.mean_functions.MeanFunction`, and :class:`~gpflux.layers.GPLayer` objects.
"""
import inspect
import warnings
from dataclasses import fields
from typing import List, Optional, Type, TypeVar, Union
import numpy as np
import gpflow
from gpflow import default_float
from gpflow.inducing_variables import (
InducingPoints,
SeparateIndependentInducingVariables,
SharedIndependentInducingVariables,
)
from gpflow.kernels import SeparateIndependent, SharedIndependent
from gpflow.utilities import deepcopy
from gpflux.layers.gp_layer import GPLayer
def construct_basic_kernel(
kernels: Union[gpflow.kernels.Kernel, List[gpflow.kernels.Kernel]],
output_dim: Optional[int] = None,
share_hyperparams: bool = False,
) -> gpflow.kernels.MultioutputKernel:
r"""
Construct a :class:`~gpflow.kernels.MultioutputKernel` to use
in :class:`GPLayer`\ s.
:param kernels: A single kernel or list of :class:`~gpflow.kernels.Kernel`\ s.
- When a single kernel is passed, the same kernel is used for all
outputs. Depending on ``share_hyperparams``, the hyperparameters will be
shared across outputs. You must also specify ``output_dim``.
- When a list of kernels is passed, each kernel in the list is used on a separate
output dimension and a :class:`gpflow.kernels.SeparateIndependent` is returned.
:param output_dim: The number of outputs. This is equal to the number of latent GPs
in the :class:`GPLayer`. When only a single kernel is specified for ``kernels``,
you must also specify ``output_dim``. When a list of kernels is specified for ``kernels``,
we assume that ``len(kernels) == output_dim``, and ``output_dim`` is not required.
:param share_hyperparams: If `True`, use the type of kernel and the same hyperparameters
(variance and lengthscales) for the different outputs. Otherwise, the
same type of kernel (Squared-Exponential, Matern12, and so on) is used for
the different outputs, but the kernel can have different hyperparameter values for each.
"""
if isinstance(kernels, list):
mo_kern = SeparateIndependent(kernels)
elif not share_hyperparams:
copies = [deepcopy(kernels) for _ in range(output_dim)]
mo_kern = SeparateIndependent(copies)
else:
mo_kern = SharedIndependent(kernels, output_dim)
return mo_kern
def construct_basic_inducing_variables(
num_inducing: Union[int, List[int]],
input_dim: int,
output_dim: Optional[int] = None,
share_variables: bool = False,
z_init: Optional[np.ndarray] = None,
) -> gpflow.inducing_variables.MultioutputInducingVariables:
r"""
Construct a compatible :class:`~gpflow.inducing_variables.MultioutputInducingVariables`
to use in :class:`GPLayer`\ s.
:param num_inducing: The total number of inducing variables, ``M``.
This parameter can be freely chosen by the user. General advice
is to set it as high as possible, but smaller than the number of datapoints.
The computational complexity of the layer is cubic in ``M``.
If a list is passed, each element in the list specifies the number of inducing
variables to use for each ``output_dim``.
:param input_dim: The dimensionality of the input data (or features) ``X``.
Typically, this corresponds to ``X.shape[-1]``.
For :class:`~gpflow.inducing_variables.InducingPoints`, this specifies the dimensionality
of ``Z``.
:param output_dim: The dimensionality of the outputs (or targets) ``Y``.
Typically, this corresponds to ``Y.shape[-1]`` or the number of latent GPs.
The parameter is used to determine the number of inducing variable sets
to create when a different set is used for each output. The parameter
is redundant when ``num_inducing`` is a list, because the code assumes
that ``len(num_inducing) == output_dim``.
:param share_variables: If `True`, use the same inducing variables for different
outputs. Otherwise, create a different set for each output. Set this parameter to
`False` when ``num_inducing`` is a list, because otherwise the two arguments
contradict each other. If you set this parameter to `True`, you must also specify
``output_dim``, because that is used to determine the number of inducing variable
sets to create.
:param z_init: Raw values to use to initialise
:class:`gpflow.inducing_variables.InducingPoints`. If `None` (the default), values
will be initialised from ``N(0, 1)``. The shape of ``z_init`` depends on the other
input arguments. If a single set of inducing points is used for all outputs (that
is, if ``share_variables`` is `True`), ``z_init`` should be rank two, with the
dimensions ``[M, input_dim]``. If a different set of inducing points is used for
the outputs (ithat is, if ``num_inducing`` is a list, or if ``share_variables`` is
`False`), ``z_init`` should be a rank three tensor with the dimensions
``[output_dim, M, input_dim]``.
"""
if z_init is None:
warnings.warn(
"No `z_init` has been specified in `construct_basic_inducing_variables`. "
"Default initialization using random normal N(0, 1) will be used."
)
z_init_is_given = z_init is not None
if isinstance(num_inducing, list):
if output_dim is not None:
# TODO: the following assert may clash with MixedMultiOutputFeatures
# where the number of independent GPs can differ from the output
# dimension
assert output_dim == len(num_inducing) # pragma: no cover
assert share_variables is False
inducing_variables = []
for i, num_ind_var in enumerate(num_inducing):
if z_init_is_given:
assert len(z_init[i]) == num_ind_var
z_init_i = z_init[i]
else:
z_init_i = np.random.randn(num_ind_var, input_dim).astype(dtype=default_float())
assert z_init_i.shape == (num_ind_var, input_dim)
inducing_variables.append(InducingPoints(z_init_i))
return SeparateIndependentInducingVariables(inducing_variables)
elif not share_variables:
inducing_variables = []
for o in range(output_dim):
if z_init_is_given:
if z_init.shape != (output_dim, num_inducing, input_dim):
raise ValueError(
"When not sharing variables, z_init must have shape"
"[output_dim, num_inducing, input_dim]"
)
z_init_o = z_init[o]
else:
z_init_o = np.random.randn(num_inducing, input_dim).astype(dtype=default_float())
inducing_variables.append(InducingPoints(z_init_o))
return SeparateIndependentInducingVariables(inducing_variables)
else:
# TODO: should we assert output_dim is None ?
z_init = (
z_init
if z_init_is_given
else np.random.randn(num_inducing, input_dim).astype(dtype=default_float())
)
shared_ip = InducingPoints(z_init)
return SharedIndependentInducingVariables(shared_ip)
def construct_mean_function(
X: np.ndarray, D_in: int, D_out: int
) -> gpflow.mean_functions.MeanFunction:
"""
Return :class:`gpflow.mean_functions.Identity` when ``D_in`` and ``D_out`` are
equal. Otherwise, use the principal components of the inputs matrix ``X`` to build a
:class:`~gpflow.mean_functions.Linear` mean function.
.. note::
The returned mean function is set to be untrainable.
To change this, use :meth:`gpflow.set_trainable`.
:param X: A data array with the shape ``[N, D_in]`` used to determine the principal
components to use to create a :class:`~gpflow.mean_functions.Linear` mean function
when ``D_in != D_out``.
:param D_in: The dimensionality of the input data (or features) ``X``.
Typically, this corresponds to ``X.shape[-1]``.
:param D_out: The dimensionality of the outputs (or targets) ``Y``.
Typically, this corresponds to ``Y.shape[-1]`` or the number of latent GPs in the layer.
"""
assert X.shape[-1] == D_in
if D_in == D_out:
mean_function = gpflow.mean_functions.Identity()
else:
if D_in > D_out:
_, _, V = np.linalg.svd(X, full_matrices=False)
W = V[:D_out, :].T
else:
W = np.concatenate([np.eye(D_in), np.zeros((D_in, D_out - D_in))], axis=1)
assert W.shape == (D_in, D_out)
mean_function = gpflow.mean_functions.Linear(W)
gpflow.set_trainable(mean_function, False)
return mean_function
def construct_gp_layer(
num_data: int,
num_inducing: int,
input_dim: int,
output_dim: int,
kernel_class: Type[gpflow.kernels.Stationary] = gpflow.kernels.SquaredExponential,
z_init: Optional[np.ndarray] = None,
name: Optional[str] = None,
) -> GPLayer:
"""
Builds a vanilla GP layer with a single kernel shared among all outputs,
shared inducing point variables and zero mean function.
:param num_data: total number of datapoints in the dataset, *N*.
Typically corresponds to ``X.shape[0] == len(X)``.
:param num_inducing: total number of inducing variables, *M*.
This parameter can be freely chosen by the user. General advice
is to pick it as high as possible, but smaller than *N*.
The computational complexity of the layer is cubic in *M*.
:param input_dim: dimensionality of the input data (or features) X.
Typically, this corresponds to ``X.shape[-1]``.
:param output_dim: The dimensionality of the outputs (or targets) ``Y``.
Typically, this corresponds to ``Y.shape[-1]``.
:param kernel_class: The kernel class used by the layer.
This can be as simple as :class:`gpflow.kernels.SquaredExponential`, or more complex,
for example, ``lambda **_: gpflow.kernels.Linear() + gpflow.kernels.Periodic()``.
It will be passed a ``lengthscales`` keyword argument.
:param z_init: The initial value for the inducing variable inputs.
:param name: The name for the GP layer.
"""
lengthscale = float(input_dim) ** 0.5
base_kernel = kernel_class(lengthscales=np.full(input_dim, lengthscale))
kernel = construct_basic_kernel(base_kernel, output_dim=output_dim, share_hyperparams=True)
inducing_variable = construct_basic_inducing_variables(
num_inducing,
input_dim,
output_dim=output_dim,
share_variables=True,
z_init=z_init,
)
gp_layer = GPLayer(
kernel=kernel,
inducing_variable=inducing_variable,
num_data=num_data,
mean_function=gpflow.mean_functions.Zero(),
name=name,
)
return gp_layer
T = TypeVar("T")
def make_dataclass_from_class(dataclass: Type[T], instance: object, **updates: object) -> T:
"""
Take a regular object ``instance`` with a superset of fields for a
:class:`dataclasses.dataclass` (``@dataclass``-decorated class), and return an
instance of the dataclass. The ``instance`` has all of the | |
<reponame>EiffL/powerbox-jax
"""
A set of tools for dealing with structured boxes, such as those output by :mod:`powerbox`. Tools include those
for averaging a field angularly, and generating the isotropic power spectrum.
"""
from . import dft
import jax
import jax.numpy as np
import warnings
def _getbins(bins, coords, log):
mx = coords.max()
if not np.iterable(bins):
if not log:
bins = np.linspace(coords.min(), mx, bins + 1)
else:
mn = coords[coords>0].min()
bins = np.logspace(np.log10(mn), np.log10(mx), bins + 1)
return bins
def angular_average(field, coords, bins, weights=1, average=True, bin_ave=True, get_variance=False, log_bins=False):
r"""
Average a given field within radial bins.
This function can be used in fields of arbitrary dimension (memory permitting), and the field need not be centred
at the origin. The averaging assumes that the grid cells fall completely into the bin which encompasses the
co-ordinate point for the cell (i.e. there is no weighted splitting of cells if they intersect a bin edge).
It is optimized for applying a set of weights, and obtaining the variance of the mean, at the same time as
averaging.
Parameters
----------
field: nd-array
An array of arbitrary dimension specifying the field to be angularly averaged.
coords: nd-array or list of n arrays.
Either the *magnitude* of the co-ordinates at each point of `field`, or a list of 1D arrays specifying the
co-ordinates in each dimension.
bins: float or array.
The ``bins`` argument provided to histogram. Can be an int or array specifying radial bin edges.
weights: array, optional
An array of the same shape as `field`, giving a weight for each entry.
average: bool, optional
Whether to take the (weighted) average. If False, returns the (unweighted) sum.
bin_ave : bool, optional
Whether to return the bin co-ordinates as the (weighted) average of cells within the bin (if True), or
the regularly spaced edges of the bins.
get_variance : bool, optional
Whether to also return an estimate of the variance of the power in each bin.
log_bins : bool, optional
Whether to create bins in log-space.
Returns
-------
field_1d : 1D-array
The angularly-averaged field.
bins : 1D-array
Array of same shape as field_1d specifying the radial co-ordinates of the bins. Either the mean co-ordinate
from the input data, or the regularly spaced bins, dependent on `bin_ave`.
var : 1D-array, optional
The variance of the averaged field (same shape as bins), estimated from the mean standard error.
Only returned if `get_variance` is True.
Notes
-----
If desired, the variance is calculated as the weight unbiased variance, using the formula at
https://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Reliability_weights for the variance in each cell, and
normalising by a factor of :math:`V_2/V_1^2` to estimate the variance of the average.
Examples
--------
Create a 3D radial function, and average over radial bins:
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-5,5,128) # Setup a grid
>>> X,Y,Z = np.meshgrid(x,x,x)
>>> r = np.sqrt(X**2+Y**2+Z**2) # Get the radial co-ordinate of grid
>>> field = np.exp(-r**2) # Generate a radial field
>>> avgfunc, bins = angular_average(field,r,bins=100) # Call angular_average
>>> plt.plot(bins, np.exp(-bins**2), label="Input Function") # Plot input function versus ang. avg.
>>> plt.plot(bins, avgfunc, label="Averaged Function")
See Also
--------
angular_average_nd : Perform an angular average in a subset of the total dimensions.
"""
if len(coords) == len(field.shape):
# coords are a segmented list of dimensional co-ordinates
coords = _magnitude_grid(coords)
indx, bins, sumweight = _get_binweights(coords, weights, bins, average, bin_ave=bin_ave, log_bins=log_bins)
if np.any(sumweight==0):
warnings.warn("One or more radial bins had no cells within it.")
res = _field_average(indx, field, weights, sumweight)
if get_variance:
var = _field_variance(indx, field, res, weights, sumweight)
return res, bins, var
else:
return res, bins
def _magnitude_grid(x, dim=None):
if dim is not None:
return np.sqrt(np.sum(np.array(np.meshgrid(*([x ** 2] * dim))), axis=0))
else:
return np.sqrt(np.sum(np.array(np.meshgrid(*([X ** 2 for X in x]))), axis=0))
def _get_binweights(coords, weights, bins, average=True, bin_ave=True, log_bins=False):
# Get a vector of bin edges
bins = _getbins(bins, coords, log_bins)
indx = np.digitize(coords.flatten(), bins)
if average or bin_ave:
if not np.isscalar(weights):
if coords.shape != weights.shape:
raise ValueError("coords and weights must have the same shape!")
sumweights = np.bincount(indx, weights=weights.flatten(), minlength=len(bins)+1)[1:-1]
else:
sumweights = np.bincount(indx, minlength=len(bins)+1)[1:-1]
if average:
binweight = sumweights
else:
binweight = 1*sumweights
sumweights = np.ones_like(binweight)
if bin_ave:
bins = np.bincount(indx, weights=(weights * coords).flatten(), minlength=len(bins)+1)[1:-1] / binweight
else:
sumweights = np.ones(len(bins)-1)
return indx, bins, sumweights
def _field_average(indx, field, weights, sumweights):
if not np.isscalar(weights) and field.shape != weights.shape:
raise ValueError("the field and weights must have the same shape!")
field = field * weights # Leave like this because field is mutable
rl = np.bincount(indx, weights=np.real(field.flatten()), minlength=len(sumweights)+2)[1:-1] / sumweights
if field.dtype.kind == "c":
im = 1j * np.bincount(indx, weights=np.imag(field.flatten()), minlength=len(sumweights)+2)[1:-1] / sumweights
else:
im = 0
return rl + im
def _field_variance(indx, field, average, weights, V1):
if field.dtype.kind == "c":
raise NotImplementedError("Cannot use a complex field when computing variance, yet.")
# Create a full flattened array of the same shape as field, with the average in that bin.
# We have to pad the average vector with 0s on either side to account for cells outside the bin range.
average_field = np.concatenate(([0],average, [0]))[indx]
# Create the V2 array
if not np.isscalar(weights):
weights = weights.flatten()
V2 = np.bincount(indx, weights=weights**2, minlength=len(V1)+2)[1:-1]
else:
V2 = V1
field = (field.flatten()-average_field)**2 * weights
# This res is the estimated variance of each cell in the bin
res = np.bincount(indx, weights=field, minlength=len(V1)+2)[1:-1] / (V1 - V2/V1)
# Modify to the estimated variance of the sum of the cells in the bin.
res *= V2 / V1**2
return res
def angular_average_nd(field, coords, bins, n=None, weights=1, average=True, bin_ave=True, get_variance=False,
log_bins=False):
"""
Average the first n dimensions of a given field within radial bins.
This function be used to take "hyper-cylindrical" averages of fields. For a 3D field, with `n=2`, this is exactly
a cylindrical average. This function can be used in fields of arbitrary dimension (memory permitting), and the field
need not be centred at the origin. The averaging assumes that the grid cells fall completely into the bin which
encompasses the co-ordinate point for the cell (i.e. there is no weighted splitting of cells if they intersect a bin
edge).
It is optimized for applying a set of weights, and obtaining the variance of the mean, at the same time as
averaging.
Parameters
----------
field : md-array
An array of arbitrary dimension specifying the field to be angularly averaged.
coords : list of n arrays
A list of 1D arrays specifying the co-ordinates in each dimension *to be average*.
bins : int or array.
Specifies the radial bins for the averaged dimensions. Can be an int or array specifying radial bin edges.
n : int, optional
The number of dimensions to be averaged. By default, all dimensions are averaged. Always uses
the first `n` dimensions.
weights : array, optional
An array of the same shape as the first `n` dimensions of `field`, giving a weight for each entry.
average : bool, optional
Whether to take the (weighted) average. If False, returns the (unweighted) sum.
bin_ave : bool, optional
Whether to return the bin co-ordinates as the (weighted) average of cells within the bin (if True), or
the linearly spaced edges of the bins
get_variance : bool, optional
Whether to also return an estimate of the variance of the power in each bin.
log_bins : bool, optional
Whether to create bins in log-space.
Returns
-------
field : (m-n+1)-array
The angularly-averaged field. The first dimension corresponds to `bins`, while the rest correspond to the
unaveraged dimensions.
bins : 1D-array
The radial co-ordinates of the bins. Either the mean co-ordinate from the input data, or the regularly spaced
bins, dependent on `bin_ave`.
var : (m-n+1)-array, optional
The variance of the averaged field (same shape as `field`), estimated from the mean standard error.
Only returned if `get_variance` is True.
Examples
--------
Create a 3D radial function, and average over radial bins. Equivalent to calling :func:`angular_average`:
>>> import numpy as np
>>> import matplotlib.pyplot as plt
| |
from py_db import db
from decimal import Decimal
import NSBL_helpers as helper
from datetime import datetime
from time import time
import numpy as np
import argparse
import math
from scipy.stats import norm as NormDist, binom as BinomDist
# script that produces in-playoffs probability charts
db = db('NSBL')
def process(year):
start_time = time()
timestamp = datetime.now()
populate_bracket(year, timestamp)
current_series(year, timestamp)
get_playoff_teams(year, timestamp)
process_wc(year, timestamp)
process_ds(year, timestamp)
process_cs(year, timestamp)
process_ws(year, timestamp)
process_champion(year, timestamp)
end_time = time()
elapsed_time = float(end_time - start_time)
print "in_playoff_probabilities.py"
print "time elapsed (in seconds): " + str(elapsed_time)
print "time elapsed (in minutes): " + str(elapsed_time/60.0)
def populate_bracket(year, timestamp):
print '\tpopulating __in_playoff_bracket'
games_query = "SELECT IFNULL(SUM(IF(winning_team IS NOT NULL,1,0)),0) FROM __in_playoff_game_results WHERE year = %s;" % (year)
total_playoff_games_played = db.query(games_query)[0][0]
for lg in ('AL', 'NL', ''):
for series in ('WC', 'DS1', 'DS2', 'CS', 'WS'):
series_id = lg+series
qry = """SELECT teamA, teamB,
SUM(IF(winning_team=teamA,1,0)) as teamA_wins,
SUM(IF(winning_team=teamB,1,0)) as teamB_wins
FROM __in_playoff_game_results
WHERE series_id = '%s'
AND year = %s;"""
query = qry % (series_id, year)
res = db.query(query)
if res[0][0] is not None:
teamA, teamB, teamA_wins, teamB_wins = res[0]
for strength_type in ('roster', 'projected'):
entries = []
for tm in (teamA, teamB):
entry = {'update_time':timestamp, 'series_id':series_id, 'year':year,'strength_type':strength_type, 'total_playoff_games_played':total_playoff_games_played}
if tm == teamA:
entry['team'] = teamA
entry['opponent'] = teamB
entry['series_wins'] = teamA_wins
entry['series_losses'] = teamB_wins
elif tm == teamB:
entry['team'] = teamB
entry['opponent'] = teamA
entry['series_wins'] = teamB_wins
entry['series_losses'] = teamA_wins
entries.append(entry)
db.insertRowDict(entries, '__in_playoff_bracket', insertMany=True, replace=True, rid=0,debug=1)
db.conn.commit()
def current_series(year, timestamp):
print '\tdetermining current series probabilities'
games_query = "SELECT IFNULL(SUM(IF(winning_team IS NOT NULL,1,0)),0) FROM __in_playoff_game_results WHERE year = %s;" % (year)
total_playoff_games_played = db.query(games_query)[0][0]
qry = """SELECT
series_id, year, strength_type,
team, opponent,
series_wins, series_losses
FROM __in_playoff_bracket
WHERE update_time = (SELECT MAX(update_time) FROM __in_playoff_bracket)
AND year = %s;"""
query = qry % (year)
res = db.query(query)
for row in res:
series_id, year, strength_type, team, opponent, series_wins, series_losses = row
series_type = series_id.replace('AL','').replace('NL','')[:2]
games_dict = {'WC':1, 'DS':5, 'CS':7, 'WS':7}
series_games = games_dict.get(series_type)
team_abb = helper.get_team_abb(team, year)
oppn_abb = helper.get_team_abb(opponent, year)
team_winProb = get_single_game_win_prob(team_abb, oppn_abb, strength_type, year)
entry = {'update_time':timestamp, 'series_id':series_id, 'year':year, 'team':team, 'opponent':opponent, 'series_wins':series_wins, 'series_losses':series_losses, 'strength_type':strength_type, 'team_winProb':team_winProb, 'total_playoff_games_played':total_playoff_games_played}
team_probs = []
if series_wins == series_games/2+1:
team_probs.append(1)
total_games = series_wins+series_losses
if total_games > 2:
colName = 'team_in'+str(total_games)
entry[colName] = 1
if series_losses == series_games/2+1:
team_probs.append(0)
if (series_wins != series_games/2+1 and series_losses != series_games/2+1):
for end_game in range(series_games/2+1, series_games+1-series_losses):
team_in_N = BinomDist.pmf(n=end_game-1-series_wins, k=(series_games/2-series_wins), p=team_winProb) * team_winProb
col_name = 'team_in'+str(end_game+series_losses)
team_probs.append(team_in_N)
if end_game > 2:
entry[col_name] = team_in_N
entry['team_seriesProb'] = sum(team_probs)
db.insertRowDict(entry, '__in_playoff_bracket', insertMany=False, replace=True, rid=0,debug=1)
db.conn.commit()
def get_playoff_teams(year, timestamp):
print '\tgetting playoff teams'
games_query = "SELECT IFNULL(SUM(IF(winning_team IS NOT NULL,1,0)),0) FROM __in_playoff_game_results WHERE year = %s;" % (year)
total_playoff_games_played = db.query(games_query)[0][0]
entries = []
for strength_type in ('projected', 'roster'):
qry = """SELECT
team_abb
, team_name
, division
, CASE
WHEN year = 2020 AND team_abb = 'NYN'
THEN 0
WHEN year = 2020 AND team_abb = 'Ari'
THEN 1
ELSE top_seed
END AS top_seed
, win_division
, (wc_1+wc_2)
FROM __playoff_probabilities
JOIN (SELECT team_abb, MAX(year) AS year, MAX(games_played) AS games_played FROM __playoff_probabilities GROUP BY team_abb, year) t2 USING (team_abb, year, games_played)
WHERE strength_type = 'projected'
AND (win_division+wc_1+wc_2 = 1.000)
AND year = %s;"""
query = qry % (year)
res = db.query(query)
for row in res:
team_abb, team_name, division, top_seed, win_division, wild_card = row
entry = {'update_time':timestamp, 'year':year, 'team_name':team_name, 'team_abb':team_abb, 'strength_type':strength_type, 'total_playoff_games_played':total_playoff_games_played, 'division':division, 'top_seed':top_seed, 'win_division':win_division, 'wild_card':wild_card}
strength_qry = """SELECT
strength_pct
FROM __playoff_probabilities
JOIN (SELECT team_abb, MAX(year) AS year, MAX(games_played) AS games_played FROM __playoff_probabilities GROUP BY team_abb, year) t2 USING (team_abb, year, games_played)
WHERE strength_type = '%s'
AND team_name = '%s'
AND year = %s;"""
strength_query = strength_qry % (strength_type, team_name, year)
strength_pct = db.query(strength_query)
entry['strength_pct'] = strength_pct
entries.append(entry)
db.insertRowDict(entries, '__in_playoff_probabilities', insertMany=True, replace=True, rid=0,debug=1)
db.conn.commit()
def process_wc(year, timestamp):
print '\tdetermining wild card winners'
teams_query = """SELECT year, team_name, team_abb, division, wild_card, total_playoff_games_played, strength_type
FROM __in_playoff_probabilities
WHERE update_time = (SELECT MAX(update_time) FROM __in_playoff_probabilities)
AND wild_card != 0;"""
res = db.query(teams_query)
for row in res:
year, team_name, team_abb, division, wild_card, total_playoff_games_played, strength_type = row
lg = division[:2]
oppn_qry = """SELECT team_name, team_abb, division, wild_card, total_playoff_games_played, strength_type
FROM __in_playoff_probabilities
WHERE update_time = (SELECT MAX(update_time) FROM __in_playoff_probabilities)
AND wild_card != 0
AND year = %s
AND total_playoff_games_played = %s
AND left(division,2) = '%s'
AND strength_type = '%s'
AND team_name != '%s';"""
oppn_query = oppn_qry % (year, total_playoff_games_played, lg, strength_type, team_name)
oppns = db.query(oppn_query)
win_wc = []
for oppn in oppns:
oppn_name, oppn_abb, oppn_division, oppn_wild_card, foo, foo = oppn
matchup_prob = 1
series_id = '%sWC' % (lg)
series_wins, series_losses = get_series_data(series_id, team_name, oppn_name, strength_type)
team_winProb = get_single_game_win_prob(team_abb, oppn_abb, strength_type, year)
series_games = 1
series_prob = get_series_prob(series_games, series_wins, series_losses, team_winProb)
win_wc.append(matchup_prob*series_prob)
win_wc = sum(win_wc)
db.updateRow({'win_wc':win_wc},"__in_playoff_probabilities",("team_name","year","total_playoff_games_played","strength_type"),(team_name,year,total_playoff_games_played,strength_type),operators=['=','=','=','='])
db.conn.commit()
def process_ds(year, timestamp):
print "\tmake division series"
query = """SELECT year, team_name, total_playoff_games_played, strength_type,
win_division+IFNULL(win_wc,0)
FROM __in_playoff_probabilities
WHERE update_time = (SELECT MAX(update_time) FROM __in_playoff_probabilities);"""
res = db.query(query)
for row in res:
year, team_name, total_playoff_games_played, strength_type, make_ds = row
db.updateRow({'make_ds':make_ds},"__in_playoff_probabilities",("team_name","year","total_playoff_games_played","strength_type"),(team_name,year,total_playoff_games_played,strength_type),operators=['=','=','=','='])
db.conn.commit()
def process_cs(year, timestamp):
print "\tmake championship series"
team_query = """SELECT team_abb, team_name, year, strength_type, total_playoff_games_played,
division, top_seed, win_division, IFNULL(win_wc,0)
FROM __in_playoff_probabilities
WHERE update_time = (SELECT MAX(update_time) FROM __in_playoff_probabilities);"""
# raw_input(team_query)
team_res = db.query(team_query)
cs_dict = {}
for team_row in team_res:
team_abb, team_name, year, strength_type, total_playoff_games_played, team_division, top_seed, win_div, win_wc = team_row
win_2_3_seed = float(win_div)-float(top_seed)
lg = team_division[:2]
oppn_qry = """SELECT team_abb, team_name, top_seed, win_division, IFNULL(win_wc,0),
IF(division != '%s', win_division-top_seed, 0) as 'middle_seed'
FROM __in_playoff_probabilities
WHERE update_time = (SELECT MAX(update_time) FROM __in_playoff_probabilities)
AND total_playoff_games_played = %s
AND strength_type = '%s'
AND LEFT(division,2) = '%s'
AND team_name != '%s'
AND year = %s;"""
oppn_query = oppn_qry % (team_division, total_playoff_games_played, strength_type, lg, team_name, year)
# raw_input(oppn_query)
oppn_res = db.query(oppn_query)
make_cs = []
for oppn_row in oppn_res:
oppn_abb, oppn_name, oppn_top, oppn_div, oppn_wc, oppn_2_3_seed = oppn_row
# probability of top_seed * (oppn_wc | not wc)
if (1.0-float(win_wc)) == 0:
matchup1_prob = 0.0
else:
matchup1_prob = float(top_seed)*float(oppn_wc)/(1.0-float(win_wc))
# probability of wc * (oppn_top_seed | not top_seed)
if (1.0-float(top_seed)) == 0:
matchup2_prob = 0.0
else:
matchup2_prob = float(win_wc)*float(oppn_top)/(1.0-float(top_seed))
matchup3_prob = float(win_2_3_seed)*float(oppn_2_3_seed)
matchup_prob = matchup1_prob + matchup2_prob + matchup3_prob
series_id = '%sDS' % (lg)
series_wins, series_losses = get_series_data(series_id, team_name, oppn_name, strength_type)
team_winProb = get_single_game_win_prob(team_abb, oppn_abb, strength_type, year)
series_games = 5
series_prob = get_series_prob(series_games, series_wins, series_losses, team_winProb)
# print oppn_name, matchup1_prob, matchup2_prob, matchup3_prob, series_prob
make_cs.append(matchup_prob*series_prob)
make_cs = sum(make_cs)
db.updateRow({'make_cs':make_cs},"__in_playoff_probabilities",("team_name","year","total_playoff_games_played","strength_type"),(team_name,year,total_playoff_games_played,strength_type),operators=['=','=','=','='])
db.conn.commit()
def process_ws(year, timestamp):
print "\tmake world series"
team_query = """SELECT team_abb, team_name, year, total_playoff_games_played, strength_type, division,
IF(top_seed + IFNULL(win_wc,0) > 0, 1, 0) as 'top_round',
IF(win_division-top_seed > 0, 1, 0) as 'middle_round',
make_cs
FROM __in_playoff_probabilities
WHERE update_time = (SELECT MAX(update_time) FROM __in_playoff_probabilities);"""
# raw_input(team_query)
team_res = db.query(team_query)
ws_dict = {}
for team_row in team_res:
team_abb, team_name, year, total_playoff_games_played, strength_type, team_division, top_round, middle_round, make_cs = team_row
lg = team_division[:2]
oppn_qry = """SELECT team_abb, team_name,
IF(top_seed + IFNULL(win_wc,0) > 0, 1, 0) as 'top_round',
IF(win_division-top_seed > 0, 1, 0) as 'middle_round',
make_cs
FROM __in_playoff_probabilities
WHERE update_time = (SELECT MAX(update_time) FROM __in_playoff_probabilities)
AND total_playoff_games_played = %s
AND strength_type = '%s'
AND LEFT(division,2) = '%s'
AND team_name != '%s'
AND year = %s;"""
oppn_query = oppn_qry % (total_playoff_games_played, strength_type, lg, team_name, year)
# raw_input(oppn_query)
oppn_res = db.query(oppn_query)
make_ws = []
for oppn_row in oppn_res:
oppn_abb, oppn_name, oppn_top_round, oppn_middle_round, oppn_cs = oppn_row
matchup_prob = float(make_cs)*float(oppn_cs)*(float(top_round)*float(oppn_middle_round) + float(middle_round)*float(oppn_top_round))
series_id = '%sCS' % (lg)
series_wins, series_losses = get_series_data(series_id, team_name, oppn_name, strength_type)
team_winProb = get_single_game_win_prob(team_abb, oppn_abb, strength_type, year)
series_games = 7
series_prob = get_series_prob(series_games, series_wins, series_losses, team_winProb)
make_ws.append(matchup_prob*series_prob)
make_ws = sum(make_ws)
db.updateRow({'make_ws':make_ws},"__in_playoff_probabilities",("team_name","year","total_playoff_games_played","strength_type"),(team_name,year,total_playoff_games_played,strength_type),operators=['=','=','=','='])
db.conn.commit()
def process_champion(year, timestamp):
pass
print "\twin world series"
team_query = """SELECT team_abb, team_name, year, total_playoff_games_played, strength_type, division,
make_ws
FROM __in_playoff_probabilities
WHERE update_time = (SELECT MAX(update_time) FROM __in_playoff_probabilities);"""
# raw_input(team_query)
team_res = db.query(team_query)
for team_row in team_res:
team_abb, team_name, year, total_playoff_games_played, strength_type, team_division, make_ws = team_row
lg = team_division[:2]
oppn_qry = """SELECT team_abb, team_name,
make_ws
FROM __in_playoff_probabilities
WHERE update_time = (SELECT MAX(update_time) FROM __in_playoff_probabilities)
AND total_playoff_games_played = %s
| |
noted below.
Attributes:
success_message (str): Message to display on successful deletion.
success_url (str): URL to redirect to on successful deletion.
"""
model = models.PlanList
permission_required = 'subscriptions.subscriptions'
raise_exception = True
context_object_name = 'plan_list'
pk_url_kwarg = 'plan_list_id'
success_message = 'Plan list successfully deleted'
success_url = reverse_lazy('dfs_plan_list_list')
template_name = 'subscriptions/plan_list_delete.html'
def delete(self, request, *args, **kwargs):
"""Override delete to allow success message to be added."""
messages.success(self.request, self.success_message)
return super(PlanListDeleteView, self).delete(request, *args, **kwargs)
# PlanListDetail Views
# -----------------------------------------------------------------------------
class PlanListDetailListView(PermissionRequiredMixin, abstract.DetailView):
"""List of plan lists."""
model = models.PlanList
pk_url_kwarg = 'plan_list_id'
permission_required = 'subscriptions.subscriptions'
raise_exception = True
context_object_name = 'plan_list'
template_name = 'subscriptions/plan_list_detail_list.html'
class PlanListDetailCreateView(
PermissionRequiredMixin, SuccessMessageMixin, abstract.CreateView
):
"""View to create a new plan list."""
model = models.PlanListDetail
fields = [
'plan', 'plan_list', 'html_content', 'subscribe_button_text', 'order'
]
permission_required = 'subscriptions.subscriptions'
raise_exception = True
success_message = 'Subscription plan successfully added to plan list'
template_name = 'subscriptions/plan_list_detail_create.html'
def get_context_data(self, **kwargs):
"""Extend context to include the parent PlanList object."""
context = super().get_context_data(**kwargs)
context['plan_list'] = get_object_or_404(
models.PlanList, id=self.kwargs.get('plan_list_id', None)
)
return context
def get_success_url(self):
return reverse_lazy(
'dfs_plan_list_detail_list',
kwargs={'plan_list_id': self.kwargs['plan_list_id']},
)
class PlanListDetailUpdateView(
PermissionRequiredMixin, SuccessMessageMixin, abstract.UpdateView
):
"""View to update the details of a plan list detail."""
model = models.PlanListDetail
permission_required = 'subscriptions.subscriptions'
raise_exception = True
fields = [
'plan', 'plan_list', 'html_content', 'subscribe_button_text', 'order'
]
success_message = 'Plan list details successfully updated'
pk_url_kwarg = 'plan_list_detail_id'
template_name = 'subscriptions/plan_list_detail_update.html'
def get_context_data(self, **kwargs):
"""Extend context to include the parent PlanList object."""
context = super().get_context_data(**kwargs)
context['plan_list'] = get_object_or_404(
models.PlanList, id=self.kwargs.get('plan_list_id', None)
)
return context
def get_success_url(self):
return reverse_lazy(
'dfs_plan_list_detail_list',
kwargs={'plan_list_id': self.kwargs['plan_list_id']},
)
class PlanListDetailDeleteView(PermissionRequiredMixin, abstract.DeleteView):
"""View to delete a plan list detail.
View is extended to handle additional attributes noted below.
Attributes:
success_message (str): Message to display on successful deletion.
success_url (str): URL to redirect to on successful deletion.
"""
model = models.PlanListDetail
permission_required = 'subscriptions.subscriptions'
raise_exception = True
context_object_name = 'plan_list_detail'
pk_url_kwarg = 'plan_list_detail_id'
success_message = 'Subscription plan successfully removed from plan list'
template_name = 'subscriptions/plan_list_detail_delete.html'
def get_context_data(self, **kwargs):
"""Extend context to include the parent PlanList object."""
context = super().get_context_data(**kwargs)
context['plan_list'] = get_object_or_404(
models.PlanList, id=self.kwargs.get('plan_list_id', None)
)
return context
def delete(self, request, *args, **kwargs):
"""Override delete to allow success message to be added."""
messages.success(self.request, self.success_message)
return super(PlanListDetailDeleteView, self).delete(
request, *args, **kwargs
)
def get_success_url(self):
return reverse_lazy(
'dfs_plan_list_detail_list',
kwargs={'plan_list_id': self.kwargs['plan_list_id']},
)
# Subscribe Views
# -----------------------------------------------------------------------------
class SubscribeList(abstract.TemplateView):
"""Detail view of the first active PlanList instance.
View is designed to be the user-facing subscription list and
customizable through the PlanList and PlanListDetail models.
"""
context_object_name = 'plan_list'
template_name = 'subscriptions/subscribe_list.html'
def get(self, request, *args, **kwargs):
"""Ensures content is available to display, then returns page."""
# Get the appropriate plan list
plan_list = models.PlanList.objects.filter(active=True).first()
# Retrieve the plan details for template display
details = models.PlanListDetail.objects.filter(
plan_list=plan_list, plan__costs__isnull=False
).order_by('order')
if plan_list:
response = TemplateResponse(
request,
self.template_name,
self.get_context_data(plan_list=plan_list, details=details)
)
return response
return HttpResponseNotFound('No subscription plans are available')
def get_context_data(self, **kwargs):
"""Extend context to include the parent PlanList object."""
context = super().get_context_data(**kwargs)
context['plan_list'] = kwargs['plan_list']
context['details'] = kwargs['details']
return context
class SubscribeView(LoginRequiredMixin, abstract.TemplateView):
"""View to handle all aspects of the subscribing process.
This view will need to be subclassed and some methods
overridden to implement the payment solution.
Additionally, this view is extended from a TemplateView with
the additional attributes noted below.
Attributes:
payment_form (obj): Django Form to handle subscription
payment.
subscription_plan (obj): A SubscriptionPlan instance. Will
be set by methods during processing.
success_url (str): URL to redirect to on successful
creation.
template_preview (str): Path to HTML template for the
preview view.
template_confirmation (str): Path to HTML template for the
confirmation view.
Notes:
View only accessible via POST requests. The request must
include an ID to a SubscriptionPlan +/- associated PlanCost
instance (if past the preview view).
"""
confirmation = False
payment_form = forms.PaymentForm
subscription_plan = None
success_url = 'dfs_subscribe_thank_you'
template_preview = 'subscriptions/subscribe_preview.html'
template_confirmation = 'subscriptions/subscribe_confirmation.html'
def get_object(self):
"""Gets the subscription plan object."""
return get_object_or_404(
models.SubscriptionPlan, id=self.request.POST.get('plan_id', None)
)
def get_context_data(self, **kwargs):
"""Overriding get_context_data to add additional context."""
context = super(SubscribeView, self).get_context_data(**kwargs)
# Whether this is a preview or confirmation step
context['confirmation'] = self.confirmation
# The plan instance to use for generating plan details
context['plan'] = self.subscription_plan
return context
def get_template_names(self):
"""Returns the proper template name based on payment stage."""
conf_templates = [self.template_confirmation]
prev_templates = [self.template_preview]
return conf_templates if self.confirmation else prev_templates
def get_success_url(self, **kwargs):
"""Returns the success URL."""
return reverse_lazy(self.success_url, kwargs=kwargs)
def get(self, request, *args, **kwargs):
"""Returns 404 error as this method is not implemented."""
return HttpResponseNotAllowed(['POST'])
def post(self, request, *args, **kwargs):
"""Handles all POST requests to the SubscribeView.
The 'action' POST argument is used to determine which
context to render.
Notes:
The ``action`` POST parameter determines what stage to
progress view to. ``None`` directs to preview
processing, ``confirm`` directs to confirmation
processing, and ``process`` directs to payment and
subscription processing.
"""
# Get the subscription plan for this POST
self.subscription_plan = self.get_object()
# Determine POST action and direct to proper function
post_action = request.POST.get('action', None)
if post_action == 'confirm':
return self.render_confirmation(request)
if post_action == 'process':
return self.process_subscription(request)
# No action - assumes payment details need to be collected
return self.render_preview(request)
def render_preview(self, request, **kwargs):
"""Renders preview of subscription and collect payment details."""
self.confirmation = False
context = self.get_context_data(**kwargs)
# Forms to collect subscription details
if 'error' in kwargs:
plan_cost_form = forms.SubscriptionPlanCostForm(
request.POST, subscription_plan=self.subscription_plan
)
payment_form = self.payment_form(request.POST)
else:
plan_cost_form = forms.SubscriptionPlanCostForm(
initial=request.POST, subscription_plan=self.subscription_plan
)
payment_form = self.payment_form(initial=request.POST)
context['plan_cost_form'] = plan_cost_form
# context['payment_form'] = payment_form
return self.render_to_response(context)
def render_confirmation(self, request, **kwargs):
"""Renders a confirmation page before processing payment.
If forms are invalid will return to preview view for user
to correct errors.
"""
# Retrive form details
plan_cost_form = forms.SubscriptionPlanCostForm(
request.POST, subscription_plan=self.subscription_plan
)
# payment_form = self.payment_form(request.POST)
# Validate form submission
if all([plan_cost_form.is_valid()]):
self.confirmation = True
context = self.get_context_data(**kwargs)
# Forms to process payment (hidden to prevent editing)
context['plan_cost_form'] = self.hide_form(plan_cost_form)
# context['payment_form'] = self.hide_form(payment_form)
# Add the PlanCost instance to context for use in template
context['plan_cost'] = plan_cost_form.cleaned_data['plan_cost']
return self.render_to_response(context)
# Invalid form submission - render preview again
kwargs['error'] = True
return self.render_preview(request, **kwargs)
def process_subscription(self, request, **kwargs):
"""Moves forward with payment & subscription processing.
If forms are invalid will move back to confirmation page
for user to correct errors.
"""
# Validate payment details again incase anything changed
plan_cost_form = forms.SubscriptionPlanCostForm(
request.POST, subscription_plan=self.subscription_plan
)
# payment_form = self.payment_form(request.POST)
if all([plan_cost_form.is_valid()]):
# Attempt to process payment
payment_transaction = self.process_payment(
# payment_form=payment_form,
plan_cost_form=plan_cost_form,
)
if payment_transaction:
# Payment successful - can handle subscription processing
subscription = UserSubscription.setup_subscription(
request.user, plan_cost_form.cleaned_data['plan_cost'], self.subscription_plan.group
)
# Record the transaction details
transaction = self.record_transaction(
subscription,
# self.retrieve_transaction_date(payment_transaction)
)
if transaction:
return redirect('https://www.zarinpal.com/pg/StartPay/' + str(transaction.reference))
# return HttpResponseRedirect(
# self.get_success_url(transaction_id=transaction.id)
# )
# Payment unsuccessful, add message for confirmation page
messages.error(request, 'Error processing payment')
# Invalid form submission/payment - render preview again
return self.render_confirmation(request, **kwargs)
def hide_form(self, form):
"""Replaces form widgets with hidden inputs.
Parameters:
form (obj): A form instance.
Returns:
obj: The modified form instance.
"""
for _, field in form.fields.items():
field.widget = HiddenInput()
return form
def process_payment(self, *args, **kwargs): # pylint: disable=unused-argument
"""Processes payment and confirms if payment is accepted.
This method needs to be overriden in a project to handle
payment processing with the appropriate payment provider.
Can return value that evalutes to ``True`` to indicate
payment success and any value that evalutes to ``False`` to
indicate payment error.
"""
return True
def retrieve_transaction_date(self, payment): # pylint: disable=unused-argument
"""Returns the transaction date from provided payment details.
Method should be overriden to accomodate the implemented
payment processing if a more accurate datetime is required.
Returns
obj: The current datetime.
"""
return timezone.now()
def record_transaction(self, subscription, transaction_date=None):
"""Records transaction details in SubscriptionTransaction.
Parameters:
subscription (obj): A UserSubscription object.
transaction_date (obj): A DateTime object of when
payment occurred (defaults to current datetime if
none provided).
Returns:
obj: The created SubscriptionTransaction instance.
"""
# if transaction_date is None:
# transaction_date = timezone.now()
return models.SubscriptionTransaction.new_transaction(subscription)
# return models.SubscriptionTransaction.objects.create(
# user=subscription.user,
# subscription=subscription.subscription,
# | |
import argparse
import cv2
import numpy as np
import os
import sys
import time
import torch.backends.cudnn
import torch.nn as nn
import torch.nn.parallel
from plyfile import PlyData, PlyElement
from typing import Tuple
from torch.utils.data import DataLoader
from datasets.data_io import read_cam_file, read_image, read_map, read_pair_file, save_image, save_map
from datasets.mvs import MVSDataset
from models.net import PatchmatchNet
from utils import print_args, tensor2numpy, to_cuda
def save_depth(args):
"""Run MVS model to save depth maps"""
if args.input_type == "params":
print("Evaluating model with params from {}".format(args.checkpoint_path))
model = PatchmatchNet(
patchmatch_interval_scale=args.patchmatch_interval_scale,
propagation_range=args.patchmatch_range,
patchmatch_iteration=args.patchmatch_iteration,
patchmatch_num_sample=args.patchmatch_num_sample,
propagate_neighbors=args.propagate_neighbors,
evaluate_neighbors=args.evaluate_neighbors
)
model = nn.DataParallel(model)
state_dict = torch.load(args.checkpoint_path)["model"]
model.load_state_dict(state_dict, strict=False)
else:
print("Using scripted module from {}".format(args.checkpoint_path))
model = torch.jit.load(args.checkpoint_path)
model = nn.DataParallel(model)
model.cuda()
model.eval()
dataset = MVSDataset(
data_path=args.input_folder,
num_views=args.num_views,
max_dim=args.image_max_dim,
scan_list=args.scan_list,
num_light_idx=args.num_light_idx
)
image_loader = DataLoader(
dataset=dataset, batch_size=args.batch_size, shuffle=False, num_workers=4, drop_last=False)
with torch.no_grad():
for batch_idx, sample in enumerate(image_loader):
start_time = time.time()
sample_cuda = to_cuda(sample)
depth, confidence, _ = model.forward(
sample_cuda["images"],
sample_cuda["intrinsics"],
sample_cuda["extrinsics"],
sample_cuda["depth_min"],
sample_cuda["depth_max"]
)
depth = tensor2numpy(depth)
confidence = tensor2numpy(confidence)
del sample_cuda
print("Iter {}/{}, time = {:.3f}".format(batch_idx + 1, len(image_loader), time.time() - start_time))
filenames = sample["filename"]
# save depth maps and confidence maps
for filename, depth_est, photometric_confidence in zip(filenames, depth, confidence):
depth_filename = os.path.join(args.output_folder, filename.format("depth_est", args.file_format))
confidence_filename = os.path.join(args.output_folder, filename.format("confidence", args.file_format))
os.makedirs(os.path.dirname(depth_filename), exist_ok=True)
os.makedirs(os.path.dirname(confidence_filename), exist_ok=True)
# save depth maps
save_map(depth_filename, depth_est.squeeze(0))
# save confidence maps
save_map(confidence_filename, photometric_confidence)
# project the reference point cloud into the source view, then project back
def reproject_with_depth(
depth_ref: np.ndarray,
intrinsics_ref: np.ndarray,
extrinsics_ref: np.ndarray,
depth_src: np.ndarray,
intrinsics_src: np.ndarray,
extrinsics_src: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Project the reference points to the source view, then project back to calculate the reprojection error
Args:
depth_ref: depths of points in the reference view, of shape (H, W)
intrinsics_ref: camera intrinsic of the reference view, of shape (3, 3)
extrinsics_ref: camera extrinsic of the reference view, of shape (4, 4)
depth_src: depths of points in the source view, of shape (H, W)
intrinsics_src: camera intrinsic of the source view, of shape (3, 3)
extrinsics_src: camera extrinsic of the source view, of shape (4, 4)
Returns:
Tuple[np.ndarray, np.ndarray, np.ndarray]:
depth_reprojected: reprojected depths of points in the reference view, of shape (H, W)
x_reprojected: reprojected x coordinates of points in the reference view, of shape (H, W)
y_reprojected: reprojected y coordinates of points in the reference view, of shape (H, W)
"""
width, height = depth_ref.shape[1], depth_ref.shape[0]
# step1. project reference pixels to the source view
# reference view x, y
x_ref, y_ref = np.meshgrid(np.arange(0, width), np.arange(0, height))
x_ref, y_ref = x_ref.reshape([-1]), y_ref.reshape([-1])
# reference 3D space
xyz_ref = np.matmul(np.linalg.inv(intrinsics_ref),
np.vstack((x_ref, y_ref, np.ones_like(x_ref))) * depth_ref.reshape([-1]))
# source 3D space
xyz_src = np.matmul(np.matmul(extrinsics_src, np.linalg.inv(extrinsics_ref)),
np.vstack((xyz_ref, np.ones_like(x_ref))))[:3]
# source view x, y
k_xyz_src = np.matmul(intrinsics_src, xyz_src)
xy_src = k_xyz_src[:2] / k_xyz_src[2:3]
# step2. reproject the source view points with source view depth estimation
# find the depth estimation of the source view
x_src = xy_src[0].reshape([height, width]).astype(np.float32)
y_src = xy_src[1].reshape([height, width]).astype(np.float32)
sampled_depth_src = cv2.remap(depth_src, x_src, y_src, interpolation=cv2.INTER_LINEAR)
# source 3D space
# NOTE that we should use sampled source-view depth_here to project back
xyz_src = np.matmul(np.linalg.inv(intrinsics_src),
np.vstack((xy_src, np.ones_like(x_ref))) * sampled_depth_src.reshape([-1]))
# reference 3D space
xyz_reprojected = np.matmul(np.matmul(extrinsics_ref, np.linalg.inv(extrinsics_src)),
np.vstack((xyz_src, np.ones_like(x_ref))))[:3]
# source view x, y, depth
depth_reprojected = xyz_reprojected[2].reshape([height, width]).astype(np.float32)
k_xyz_reprojected = np.matmul(intrinsics_ref, xyz_reprojected)
xy_reprojected = k_xyz_reprojected[:2] / k_xyz_reprojected[2:3]
x_reprojected = xy_reprojected[0].reshape([height, width]).astype(np.float32)
y_reprojected = xy_reprojected[1].reshape([height, width]).astype(np.float32)
return depth_reprojected, x_reprojected, y_reprojected
def check_geometric_consistency(
depth_ref: np.ndarray,
intrinsics_ref: np.ndarray,
extrinsics_ref: np.ndarray,
depth_src: np.ndarray,
intrinsics_src: np.ndarray,
extrinsics_src: np.ndarray,
geo_pixel_thres: float,
geo_depth_thres: float,
) -> Tuple[np.ndarray, np.ndarray]:
"""Check geometric consistency and return valid points
Args:
depth_ref: depths of points in the reference view, of shape (H, W)
intrinsics_ref: camera intrinsic of the reference view, of shape (3, 3)
extrinsics_ref: camera extrinsic of the reference view, of shape (4, 4)
depth_src: depths of points in the source view, of shape (H, W)
intrinsics_src: camera intrinsic of the source view, of shape (3, 3)
extrinsics_src: camera extrinsic of the source view, of shape (4, 4)
geo_pixel_thres: geometric pixel threshold
geo_depth_thres: geometric depth threshold
Returns:
Tuple[np.ndarray, np.ndarray]:
mask: mask for points with geometric consistency, of shape (H, W)
depth_reprojected: reprojected depths of points in the reference view, of shape (H, W)
"""
width, height = depth_ref.shape[1], depth_ref.shape[0]
x_ref, y_ref = np.meshgrid(np.arange(0, width), np.arange(0, height))
depth_reprojected, x2d_reprojected, y2d_reprojected = reproject_with_depth(
depth_ref, intrinsics_ref, extrinsics_ref, depth_src, intrinsics_src, extrinsics_src)
# check |p_reproject - p_1| < 1
dist = np.sqrt((x2d_reprojected - x_ref) ** 2 + (y2d_reprojected - y_ref) ** 2)
# check |d_reproject - d_1| / d_1 < 0.01
depth_diff = np.abs(depth_reprojected - depth_ref)
relative_depth_diff = depth_diff / depth_ref
mask = np.logical_and(dist < geo_pixel_thres, relative_depth_diff < geo_depth_thres)
depth_reprojected[~mask] = 0
return mask, depth_reprojected
def filter_depth(args, scan: str = ""):
# the pair file
pair_file = os.path.join(args.input_folder, scan, "pair.txt")
# for the final point cloud
vertices = []
vertex_colors = []
pair_data = read_pair_file(pair_file)
# for each reference view and the corresponding source views
for ref_view, src_views in pair_data:
# load the reference image
ref_img, original_h, original_w = read_image(
os.path.join(args.input_folder, scan, "images/{:0>8}.jpg".format(ref_view)), args.image_max_dim)
# load the camera parameters
ref_intrinsics, ref_extrinsics = read_cam_file(
os.path.join(args.input_folder, scan, "cams/{:0>8}_cam.txt".format(ref_view)))[0:2]
ref_intrinsics[0] *= ref_img.shape[1] / original_w
ref_intrinsics[1] *= ref_img.shape[0] / original_h
# load the estimated depth of the reference view
ref_depth_est = read_map(
os.path.join(args.output_folder, scan, "depth_est/{:0>8}{}".format(ref_view, args.file_format))).squeeze(2)
# load the photometric mask of the reference view
confidence = read_map(
os.path.join(args.output_folder, scan, "confidence/{:0>8}{}".format(ref_view, args.file_format)))
photo_mask = (confidence > args.photo_thres).squeeze(2)
all_src_view_depth_estimates = []
# compute the geometric mask
geo_mask_sum = 0
for src_view in src_views:
# camera parameters of the source view
src_image, original_h, original_w = read_image(
os.path.join(args.input_folder, scan, "images/{:0>8}.jpg".format(src_view)), args.image_max_dim)
src_intrinsics, src_extrinsics = read_cam_file(
os.path.join(args.input_folder, scan, "cams/{:0>8}_cam.txt".format(src_view)))[0:2]
src_intrinsics[0] *= src_image.shape[1] / original_w
src_intrinsics[1] *= src_image.shape[0] / original_h
# the estimated depth of the source view
src_depth_est = read_map(
os.path.join(args.output_folder, scan, "depth_est/{:0>8}{}".format(src_view, args.file_format)))
geo_mask, depth_reprojected = check_geometric_consistency(
ref_depth_est,
ref_intrinsics,
ref_extrinsics,
src_depth_est,
src_intrinsics,
src_extrinsics,
args.geo_pixel_thres,
args.geo_depth_thres
)
geo_mask_sum += geo_mask.astype(np.int32)
all_src_view_depth_estimates.append(depth_reprojected)
depth_est_averaged = (sum(all_src_view_depth_estimates) + ref_depth_est) / (geo_mask_sum + 1)
geo_mask = geo_mask_sum >= args.geo_mask_thres
final_mask = np.logical_and(photo_mask, geo_mask)
os.makedirs(os.path.join(args.output_folder, scan, "mask"), exist_ok=True)
save_image(os.path.join(args.output_folder, scan, "mask/{:0>8}_photo.png".format(ref_view)), photo_mask)
save_image(os.path.join(args.output_folder, scan, "mask/{:0>8}_geo.png".format(ref_view)), geo_mask)
save_image(os.path.join(args.output_folder, scan, "mask/{:0>8}_final.png".format(ref_view)), final_mask)
print("processing {}, ref-view{:0>3}, geo_mask:{:3f}, photo_mask:{:3f}, final_mask: {:3f}".format(
os.path.join(args.input_folder, scan), ref_view, geo_mask.mean(), photo_mask.mean(), final_mask.mean()))
if args.display:
cv2.imshow("ref_img", ref_img[:, :, ::-1])
cv2.imshow("ref_depth", ref_depth_est)
cv2.imshow("ref_depth * photo_mask", ref_depth_est * photo_mask.astype(np.float32))
cv2.imshow("ref_depth * geo_mask", ref_depth_est * geo_mask.astype(np.float32))
cv2.imshow("ref_depth * mask", ref_depth_est * final_mask.astype(np.float32))
cv2.waitKey(1)
height, width = depth_est_averaged.shape[:2]
x, y = np.meshgrid(np.arange(0, width), np.arange(0, height))
x, y, depth = x[final_mask], y[final_mask], depth_est_averaged[final_mask]
color = ref_img[final_mask]
xyz_ref = np.matmul(np.linalg.inv(ref_intrinsics), np.vstack((x, y, np.ones_like(x))) * depth)
xyz_world = np.matmul(np.linalg.inv(ref_extrinsics), np.vstack((xyz_ref, np.ones_like(x))))[:3]
vertices.append(xyz_world.transpose((1, 0)))
vertex_colors.append((color * 255).astype(np.uint8))
vertices = np.concatenate(vertices, axis=0)
vertex_colors = np.concatenate(vertex_colors, axis=0)
vertices = np.array([tuple(v) for v in vertices], dtype=[("x", "f4"), ("y", "f4"), ("z", "f4")])
vertex_colors = np.array([tuple(v) for v in vertex_colors], dtype=[("red", "u1"), ("green", "u1"), ("blue", "u1")])
vertex_all = np.empty(len(vertices), vertices.dtype.descr + vertex_colors.dtype.descr)
for prop in vertices.dtype.names:
vertex_all[prop] = vertices[prop]
for prop in vertex_colors.dtype.names:
vertex_all[prop] = vertex_colors[prop]
el = PlyElement.describe(vertex_all, "vertex")
ply_filename = os.path.join(args.output_folder, scan, "fused.ply")
PlyData([el]).write(ply_filename)
print("saving the final model to", ply_filename)
if __name__ == "__main__":
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser(description="Predict depth, filter, and fuse")
# High level input/output options
parser.add_argument("--input_folder", type=str, help="input data path")
parser.add_argument("--output_folder", type=str, default="", help="output path")
parser.add_argument("--checkpoint_path", type=str, help="load a specific checkpoint for parameters of model")
parser.add_argument("--file_format", type=str, default=".pfm", help="File format for depth maps",
choices=[".bin", ".pfm"])
parser.add_argument("--input_type", type=str, default="params", help="Input type of checkpoint",
choices=["params", "module"])
parser.add_argument("--output_type", type=str, default="both", help="Type of outputs to produce",
choices=["depth", "fusion", "both"])
# Dataset loading options
parser.add_argument("--num_views", type=int, default=2,
help="number of source views for each patch-match problem")
parser.add_argument("--image_max_dim", type=int, default=-1, help="max image dimension")
parser.add_argument("--scan_list", type=str, default="",
help="Optional scan list text file to identify input folders")
parser.add_argument("--num_light_idx", type=int, default=-1, help="Number of light indexes in source images")
parser.add_argument("--batch_size", type=int, default=1, help="evaluation batch size")
# PatchMatchNet module options (only used when not loading from file)
parser.add_argument("--patchmatch_interval_scale", nargs="+", type=float, default=[0.005, 0.0125, 0.025],
help="normalized interval in inverse depth range to generate samples in local perturbation")
parser.add_argument("--patchmatch_range", nargs="+", type=int, default=[6, 4, 2],
help="fixed offset of sampling points for propagation of patch match on stages 1,2,3")
parser.add_argument("--patchmatch_iteration", nargs="+", type=int, default=[1, | |
useful as a sanity check in algorithms that make
extensive use of linkage and distance matrices that must
correspond to the same set of original observations.
Parameters
----------
Z : array_like
The linkage matrix to check for correspondence.
Y : array_like
The condensed distance matrix to check for correspondence.
Returns
-------
b : bool
A boolean indicating whether the linkage matrix and distance
matrix could possibly correspond to one another.
See Also
--------
linkage : for a description of what a linkage matrix is.
Examples
--------
>>> from scipy.cluster.hierarchy import ward, correspond
>>> from scipy.spatial.distance import pdist
This method can be used to check if a given linkage matrix ``Z`` has been
obtained from the application of a cluster method over a dataset ``X``:
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> X_condensed = pdist(X)
>>> Z = ward(X_condensed)
Here, we can compare ``Z`` and ``X`` (in condensed form):
>>> correspond(Z, X_condensed)
True
"""
is_valid_linkage(Z, throw=True)
distance.is_valid_y(Y, throw=True)
Z = np.asarray(Z, order='c')
Y = np.asarray(Y, order='c')
return distance.num_obs_y(Y) == num_obs_linkage(Z)
def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):
"""
Form flat clusters from the hierarchical clustering defined by
the given linkage matrix.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded with the matrix returned
by the `linkage` function.
t : scalar
For criteria 'inconsistent', 'distance' or 'monocrit',
this is the threshold to apply when forming flat clusters.
For 'maxclust' or 'maxclust_monocrit' criteria,
this would be max number of clusters requested.
criterion : str, optional
The criterion to use in forming flat clusters. This can
be any of the following values:
``inconsistent`` :
If a cluster node and all its
descendants have an inconsistent value less than or equal
to `t`, then all its leaf descendants belong to the
same flat cluster. When no non-singleton cluster meets
this criterion, every node is assigned to its own
cluster. (Default)
``distance`` :
Forms flat clusters so that the original
observations in each flat cluster have no greater a
cophenetic distance than `t`.
``maxclust`` :
Finds a minimum threshold ``r`` so that
the cophenetic distance between any two original
observations in the same flat cluster is no more than
``r`` and no more than `t` flat clusters are formed.
``monocrit`` :
Forms a flat cluster from a cluster node c
with index i when ``monocrit[j] <= t``.
For example, to threshold on the maximum mean distance
as computed in the inconsistency matrix R with a
threshold of 0.8 do::
MR = maxRstat(Z, R, 3)
fcluster(Z, t=0.8, criterion='monocrit', monocrit=MR)
``maxclust_monocrit`` :
Forms a flat cluster from a
non-singleton cluster node ``c`` when ``monocrit[i] <=
r`` for all cluster indices ``i`` below and including
``c``. ``r`` is minimized such that no more than ``t``
flat clusters are formed. monocrit must be
monotonic. For example, to minimize the threshold t on
maximum inconsistency values so that no more than 3 flat
clusters are formed, do::
MI = maxinconsts(Z, R)
fcluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI)
depth : int, optional
The maximum depth to perform the inconsistency calculation.
It has no meaning for the other criteria. Default is 2.
R : ndarray, optional
The inconsistency matrix to use for the 'inconsistent'
criterion. This matrix is computed if not provided.
monocrit : ndarray, optional
An array of length n-1. `monocrit[i]` is the
statistics upon which non-singleton i is thresholded. The
monocrit vector must be monotonic, i.e., given a node c with
index i, for all node indices j corresponding to nodes
below c, ``monocrit[i] >= monocrit[j]``.
Returns
-------
fcluster : ndarray
An array of length ``n``. ``T[i]`` is the flat cluster number to
which original observation ``i`` belongs.
See Also
--------
linkage : for information about hierarchical clustering methods work.
Examples
--------
>>> from scipy.cluster.hierarchy import ward, fcluster
>>> from scipy.spatial.distance import pdist
All cluster linkage methods - e.g., `scipy.cluster.hierarchy.ward`
generate a linkage matrix ``Z`` as their output:
>>> X = [[0, 0], [0, 1], [1, 0],
... [0, 4], [0, 3], [1, 4],
... [4, 0], [3, 0], [4, 1],
... [4, 4], [3, 4], [4, 3]]
>>> Z = ward(pdist(X))
>>> Z
array([[ 0. , 1. , 1. , 2. ],
[ 3. , 4. , 1. , 2. ],
[ 6. , 7. , 1. , 2. ],
[ 9. , 10. , 1. , 2. ],
[ 2. , 12. , 1.29099445, 3. ],
[ 5. , 13. , 1.29099445, 3. ],
[ 8. , 14. , 1.29099445, 3. ],
[11. , 15. , 1.29099445, 3. ],
[16. , 17. , 5.77350269, 6. ],
[18. , 19. , 5.77350269, 6. ],
[20. , 21. , 8.16496581, 12. ]])
This matrix represents a dendrogram, where the first and second elements
are the two clusters merged at each step, the third element is the
distance between these clusters, and the fourth element is the size of
the new cluster - the number of original data points included.
`scipy.cluster.hierarchy.fcluster` can be used to flatten the
dendrogram, obtaining as a result an assignation of the original data
points to single clusters.
This assignation mostly depends on a distance threshold ``t`` - the maximum
inter-cluster distance allowed:
>>> fcluster(Z, t=0.9, criterion='distance')
array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32)
>>> fcluster(Z, t=1.1, criterion='distance')
array([1, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8], dtype=int32)
>>> fcluster(Z, t=3, criterion='distance')
array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)
>>> fcluster(Z, t=9, criterion='distance')
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
In the first case, the threshold ``t`` is too small to allow any two
samples in the data to form a cluster, so 12 different clusters are
returned.
In the second case, the threshold is large enough to allow the first
4 points to be merged with their nearest neighbors. So, here, only 8
clusters are returned.
The third case, with a much higher threshold, allows for up to 8 data
points to be connected - so 4 clusters are returned here.
Lastly, the threshold of the fourth case is large enough to allow for
all data points to be merged together - so a single cluster is returned.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
T = np.zeros((n,), dtype='i')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
if criterion == 'inconsistent':
if R is None:
R = inconsistent(Z, depth)
else:
R = np.asarray(R, order='c')
is_valid_im(R, throw=True, name='R')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[R] = _copy_arrays_if_base_present([R])
_hierarchy.cluster_in(Z, R, T, float(t), int(n))
elif criterion == 'distance':
_hierarchy.cluster_dist(Z, T, float(t), int(n))
elif criterion == 'maxclust':
_hierarchy.cluster_maxclust_dist(Z, T, int(n), int(t))
elif criterion == 'monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_monocrit(Z, monocrit, T, float(t), int(n))
elif criterion == 'maxclust_monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_maxclust_monocrit(Z, monocrit, T, int(n), int(t))
else:
raise ValueError('Invalid cluster formation criterion: %s'
% str(criterion))
return T
def fclusterdata(X, t, criterion='inconsistent',
metric='euclidean', depth=2, method='single', R=None):
"""
Cluster observation data using a given metric.
Clusters the original observations in the n-by-m data
matrix X (n observations in m dimensions), using the euclidean
distance metric to calculate distances between original observations,
performs hierarchical clustering using the single linkage algorithm,
and forms flat clusters using the inconsistency method with `t` as the
cut-off threshold.
A 1-D array ``T`` of length ``n`` is returned. ``T[i]`` is
the index of the flat cluster to which the original observation ``i``
belongs.
Parameters
----------
X : (N, M) ndarray
N by M data matrix with N observations | |
1
elif self.beforeRadioChecked == self.lineRadio2:
lineType = '-'
lineColor = 'deepskyblue'
self.list_points_type.append('White')
self.plotDraggableLine(lineType, lineColor)
select = 2
elif self.beforeRadioChecked == self.lineRadio3:
lineType = '--'
lineColor = 'deepskyblue'
self.list_points_type.append('WhiteDash')
self.plotDraggableLine(lineType, lineColor)
select = 3
elif self.beforeRadioChecked == self.polygonRadio1:
faceColor = 'mediumblue'
self.list_points_type.append('Crosswalk')
self.plotDraggablePolygon(faceColor)
select = 4
elif self.beforeRadioChecked == self.polygonRadio2:
faceColor = 'blueviolet'
self.list_points_type.append('StopLine')
self.plotDraggablePolygon(faceColor)
select = 5
elif self.beforeRadioChecked == self.polygonRadio3:
faceColor = 'saddlebrown'
self.list_points_type.append('SpeedDump')
self.plotDraggablePolygon(faceColor)
select = 6
elif self.beforeRadioChecked == self.objectRadio1:
lineColor = 'palevioletred'
self.list_points_type.append('Arrow')
self.plotDraggableObject(lineColor)
select = 7
elif self.beforeRadioChecked == self.objectRadio2:
lineColor = 'yellow'
self.list_points_type.append('Diamond')
self.plotDraggableObject(lineColor)
select = 8
elif self.beforeRadioChecked == self.objectRadio3:
lineColor = 'limegreen'
self.list_points_type.append('RoadSign')
self.plotDraggableObject(lineColor)
select = 9
elif self.beforeRadioChecked == self.pointRadio1:
select = 10
pass
else:
print("outofrange at addNewLine")
if select != 0:
# for faster scene update
self.canvas.setUpdatesEnabled(False)
self.butconnect()
# for faster scene update
self.canvas.setUpdatesEnabled(True)
def delLastLine(self):
''' del the last line to figure '''
if self.list_points:
# for faster scene update
self.canvas.setUpdatesEnabled(False)
self.butdisconnect()
self.list_points[-1].line.remove()
self.list_points.pop()
self.butconnect()
# for faster scene update
self.canvas.setUpdatesEnabled(True)
if self.axes.patches:
self.axes.patches[-1].remove()
if self.list_points_type:
self.list_points_type.pop()
def delAllLine(self):
''' del all lines to figure '''
self.butdisconnect()
while self.list_points:
if self.list_points:
self.list_points[-1].line.remove()
self.list_points.pop()
if self.axes.patches:
self.axes.patches[-1].remove()
if self.list_points_type:
self.list_points_type.pop()
self.butconnect()
def addNewPoint(self):
''' add a new line points to figure '''
if self.list_points_type[-1] in ['Yellow', 'White', 'WhiteDash']:
lineType = self.list_points_type[-1]
pts = self.list_points[-1]
pos = pts.get_position()
verts = [(256 + random.randrange(0,512), 256 + random.randrange(0,512))]
for index, (x, y) in enumerate(pos):
verts.append((x, y))
codes = [Path.MOVETO, ]
for i in range(len(pos)):
codes.append(Path.LINETO)
lineType = ''
lineColor = ''
if self.list_points_type[-1] == 'Yellow':
lineType = '-'
lineColor = 'orangered'
self.list_points_type.append('Yellow')
elif self.list_points_type[-1] == 'White':
lineType = '-'
lineColor = 'deepskyblue'
self.list_points_type.append('White')
elif self.list_points_type[-1] == 'WhiteDash':
lineType = '--'
lineColor = 'deepskyblue'
self.list_points_type.append('WhiteDash')
self.delLastLine()
self.plotDraggableLine(lineType, lineColor, verts, codes)
# for faster scene update
self.canvas.setUpdatesEnabled(False)
self.butconnect()
# for faster scene update
self.canvas.setUpdatesEnabled(True)
elif self.list_points_type[-1] in ['Crosswalk', 'StopLine', 'SpeedDump']:
lineType = self.list_points_type[-1]
pts = self.list_points[-1]
pos = pts.get_position()
verts = []
for index, (x, y) in enumerate(pos):
verts.append((x, y))
verts.insert(1, (256 + random.randrange(0,512), 256 + random.randrange(0,512)))
codes = [Path.MOVETO, ]
for i in range(len(pos)-1):
codes.append(Path.LINETO)
codes.append(Path.CLOSEPOLY)
faceColor = ''
if self.list_points_type[-1] == 'Crosswalk':
faceColor = 'mediumblue'
self.list_points_type.append('Crosswalk')
elif self.list_points_type[-1] == 'StopLine':
faceColor = 'blueviolet'
self.list_points_type.append('StopLine')
elif self.list_points_type[-1] == 'SpeedDump':
faceColor = 'saddlebrown'
self.list_points_type.append('SpeedDump')
self.delLastLine()
self.plotDraggablePolygon(faceColor, verts, codes)
# for faster scene update
self.canvas.setUpdatesEnabled(False)
self.butconnect()
# for faster scene update
self.canvas.setUpdatesEnabled(True)
else:
print("Point Type Not Supported")
def delLastPoint(self):
''' del the last line points to figure '''
# TODO: CODING
if self.list_points_type[-1] in ['Yellow', 'White', 'WhiteDash']:
lineType = self.list_points_type[-1]
pts = self.list_points[-1]
pos = pts.get_position()
if len(pos) <= 2:
self.delLastLine()
return
verts = []
for index, (x, y) in enumerate(pos):
verts.append((x, y))
verts.pop(0)
codes = [Path.MOVETO, ]
for i in range(len(pos)-2):
codes.append(Path.LINETO)
lineType = ''
lineColor = ''
if self.list_points_type[-1] == 'Yellow':
lineType = '-'
lineColor = 'orangered'
self.list_points_type.append('Yellow')
elif self.list_points_type[-1] == 'White':
lineType = '-'
lineColor = 'deepskyblue'
self.list_points_type.append('White')
elif self.list_points_type[-1] == 'WhiteDash':
lineType = '--'
lineColor = 'deepskyblue'
self.list_points_type.append('WhiteDash')
self.delLastLine()
self.plotDraggableLine(lineType, lineColor, verts, codes)
# for faster scene update
self.canvas.setUpdatesEnabled(False)
self.butconnect()
# for faster scene update
self.canvas.setUpdatesEnabled(True)
elif self.list_points_type[-1] in ['Crosswalk', 'StopLine', 'SpeedDump']:
lineType = self.list_points_type[-1]
pts = self.list_points[-1]
pos = pts.get_position()
if len(pos) <= 3:
self.delLastLine()
return
verts = []
for index, (x, y) in enumerate(pos):
verts.append((x, y))
verts.pop(1)
codes = [Path.MOVETO, ]
for i in range(len(pos) - 3):
codes.append(Path.LINETO)
codes.append(Path.CLOSEPOLY)
faceColor = ''
if self.list_points_type[-1] == 'Crosswalk':
faceColor = 'mediumblue'
self.list_points_type.append('Crosswalk')
elif self.list_points_type[-1] == 'StopLine':
faceColor = 'blueviolet'
self.list_points_type.append('StopLine')
elif self.list_points_type[-1] == 'SpeedDump':
faceColor = 'saddlebrown'
self.list_points_type.append('SpeedDump')
self.delLastLine()
self.plotDraggablePolygon(faceColor, verts, codes)
# for faster scene update
self.canvas.setUpdatesEnabled(False)
self.butconnect()
# for faster scene update
self.canvas.setUpdatesEnabled(True)
else:
print("Point Type Not Supported")
def butconnect(self):
''' connect current DraggablePoints '''
for pts in self.list_points:
pts.connect()
self.canvas.draw()
#self.fastDraw(self.canvas)
def butdisconnect(self):
''' disconnect current DraggablePoints '''
for pts in self.list_points:
pts.disconnect()
self.canvas.draw()
#self.fastDraw(self.canvas)
def savePng(self,inputName,outputName):
''' save current figure to png '''
# self.figure.savefig('test.png',bbox_inches='tight', pad_inches = 0)
# Produce an height*width black basemap
"""
self.basemap = np.zeros([self.canvasSize[1],self.canvasSize[0],1], dtype=np.uint8)
with open(inputName+".txt", "r") as text_file:
text_line = [text_line.rstrip('\n') for text_line in text_file]
curvepoints = 20
thickness = 3
for item in text_line:
pos = item.split(',')
line_type, pos = pos[0],pos[1:]
nodes = self.bezierCurve(pos,curvepoints)
nodes = nodes.reshape((-1, 1, 2))
if line_type == 'White':
cv2.polylines(self.basemap, [nodes], False, (255, 0, 0), thickness)
elif line_type == 'WhiteDash':
cv2.polylines(self.basemap, [nodes], False, (200, 0, 0), thickness)
elif line_type == 'Yellow':
cv2.polylines(self.basemap, [nodes], False, (150, 0, 0), thickness)
# cv2.imshow('image',self.basemap)
cv2.imwrite(outputName, self.basemap)
# x = cv2.imread(outputName)
# print x[np.nonzero(x)]
"""
def saveText(self,outputName):
''' save line type and positions to txt '''
with open(outputName+".txt", "w") as text_file:
for lineType, pts in zip(self.list_points_type,self.list_points):
pos = pts.get_position()
text_file.write("%s " % lineType)
for index, (x,y) in enumerate(pos):
text_file.write("%s %s" % (x, y))
if index != len(pos)-1:
text_file.write(" ")
text_file.write("\n")
self.saveFlag = True
def saveAll(self,img_path):
''' save text and save png '''
ind = self.imgIndex
if self.imgIndex == len(self.list_img_path):
ind = ind - 1
if self.imgIndex == -1:
ind = ind + 1
# f1 = img_path+"label_txt"+os.sep+self.list_img_path[ind][:-4]
# f2 = img_path+"label_png"+os.sep+self.list_img_path[ind][:-4]+".png"
# TODO: fix export png
f1 = img_path + os.sep + self.list_img_path[ind][:-4]
self.saveText(f1)
#self.savePng(f1,f2)
def loadPrevLabel(self):
''' load previous labels '''
ind = self.imgIndex
if self.imgIndex == len(self.list_img_path):
ind = ind - 1
if self.imgIndex == -1:
ind = ind + 1
status = self.isLabelExist(self.img_path, ind-1)
def msgBoxEvent(self):
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Warning)
msgBox.setWindowTitle('WARNING')
msgBox.setText( "Your changes have not been saved.\nAre you sure you want to discard the changes?" )
msgBox.setInformativeText( "Press OK to continue, or Cancel to stay on the current page." )
msgBox.addButton( QMessageBox.Ok )
msgBox.addButton( QMessageBox.Cancel )
msgBox.setDefaultButton( QMessageBox.Cancel )
ret = msgBox.exec_()
if ret == QMessageBox.Ok:
if self.imgIndex == len(self.list_img_path):
self.saveFlag = False
else:
self.saveFlag = True
return True
else:
return False
def msgBoxReachEdgeEvent(self):
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Warning)
msgBox.setWindowTitle('WARNING')
msgBox.setText( "Reach the end of image" )
msgBox.setInformativeText( "Press OK to continue" )
msgBox.addButton( QMessageBox.Ok )
msgBox.setDefaultButton( QMessageBox.Ok )
ret = msgBox.exec_()
if ret == QMessageBox.Ok:
return True
else:
return False
def isPosNotChange(self,img_path,index):
# load label txt
"""
fileName = img_path+"label_txt"+os.sep+self.list_img_path[index][:-4]+".txt"
lab = []
labnp = np.array(lab)
curnp = np.array(lab)
try:
with open(fileName, 'r') as f:
x = f.read().splitlines()
for line in x:
select,xstr1,ystr1,xstr2,ystr2,xstr3,ystr3,xstr4,ystr4 = line.split(',')
x1,y1,x2,y2,x3,y3,x4,y4 = float(xstr1),float(ystr1),float(xstr2),float(ystr2),float(xstr3),float(ystr3),float(xstr4),float(ystr4)
lab.append([x1,y1,x2,y2,x3,y3,x4,y4])
labnp = np.array(lab)
except IOError:
lab = []
# current dp
if self.list_points:
curnp = self.list_points[0].get_position()
for pts in range(1,len(self.list_points)):
curnp = np.vstack((curnp,self.list_points[pts].get_position()))
curnp = curnp.reshape(-1,8)
# check xdim
if labnp.shape[0] != curnp.shape[0]:
return False
# check content
for l in curnp:
if l not in labnp:
return False
"""
return True
def isLabelExist(self,img_path,index):
fileName = img_path+os.sep+self.list_img_path[index][:-4]+".txt"
select = ''
try:
with open(fileName, 'r') as f:
x = f.read().splitlines()
# global x1,y1,x2,y2,x3,y3,x4,y4
# tmpx1, tmpy1, tmpx2, tmpy2, tmpx3, tmpy3, tmpx4, tmpy4 = x1,y1,x2,y2,x3,y3,x4,y4
for line in x:
elemlist = line.split(' ')
category = elemlist[0]
points = elemlist[1:]
verts = [(points[i*2], points[i*2+1]) for i in range(int(len(points)/2))]
codes = []
if category in ['Yellow', 'White', 'WhiteDash']:
codes = [Path.MOVETO, ]
for i in range(len(verts)-1):
codes.append(Path.LINETO)
elif category in ['Crosswalk', 'StopLine', 'SpeedDump']:
codes = [Path.MOVETO, ]
for i in range(len(verts) - 2):
codes.append(Path.LINETO)
codes.append(Path.CLOSEPOLY)
elif category in ['Arrow', 'Diamond', 'RoadSign']:
codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY, ]
if category == 'Yellow':
lineType = '-'
lineColor = 'orangered'
self.list_points_type.append('Yellow')
self.plotDraggableLine(lineType, lineColor, verts, codes)
elif category == 'White':
lineType = '-'
lineColor = 'deepskyblue'
self.list_points_type.append('White')
self.plotDraggableLine(lineType, lineColor, verts, codes)
elif category == 'WhiteDash':
lineType = '--'
lineColor = 'deepskyblue'
self.list_points_type.append('WhiteDash')
self.plotDraggableLine(lineType, lineColor, verts, codes)
elif category == 'Crosswalk':
faceColor = 'mediumblue'
self.list_points_type.append('Crosswalk')
self.plotDraggablePolygon(faceColor, verts, codes)
elif category == 'StopLine':
faceColor = 'blueviolet'
self.list_points_type.append('StopLine')
self.plotDraggablePolygon(faceColor, verts, codes)
elif category == 'SpeedDump':
faceColor = 'saddlebrown'
self.list_points_type.append('SpeedDump')
self.plotDraggablePolygon(faceColor, verts, codes)
elif category == 'Arrow':
lineColor = 'palevioletred'
self.list_points_type.append('Arrow')
self.plotDraggableObject(lineColor, verts, codes)
elif category == 'Diamond':
lineColor = 'yellow'
self.list_points_type.append('Diamond')
self.plotDraggableObject(lineColor, verts, codes)
elif category == 'RoadSign':
lineColor = 'limegreen'
self.list_points_type.append('RoadSign')
self.plotDraggableObject(lineColor, verts, codes)
else:
pass
#self.butconnect()
# previous code\
| |
hf_net_kwargs['feedthrough'] = feedthrough
self.hf_net_kwargs = hf_net_kwargs
self.feedthrough = feedthrough
def init_nets(self, nu, ny): # a bit weird
na_right = self.na_right if hasattr(self,'na_right') else 0
nb_right = self.nb_right if hasattr(self,'nb_right') else 0
self.encoder = self.e_net(nb=self.nb+nb_right, nu=nu, na=self.na+na_right, ny=ny, nx=self.nx, **self.e_net_kwargs)
self.hfn = self.hf_net(nx=self.nx, nu=self.nu, ny=self.ny, **self.hf_net_kwargs)
def loss(self, uhist, yhist, ufuture, yfuture, **Loss_kwargs):
x = self.encoder(uhist, yhist) #initialize Nbatch number of states
errors = []
for y, u in zip(torch.transpose(yfuture,0,1), torch.transpose(ufuture,0,1)): #iterate over time
yhat, x = self.hfn(x, u)
errors.append(nn.functional.mse_loss(y, yhat)) #calculate error after taking n-steps
return torch.mean(torch.stack(errors))
def measure_act_multi(self,actions):
actions = torch.tensor(np.array(actions), dtype=torch.float32) #(N,...)
with torch.no_grad():
y_predict, self.state = self.hfn(self.state, actions)
return y_predict.numpy()
class par_start_encoder(nn.Module):
"""A network which makes the initial states a parameter of the network"""
def __init__(self, nx, nsamples):
super(par_start_encoder, self).__init__()
self.start_state = nn.parameter.Parameter(data=torch.as_tensor(np.random.normal(scale=0.1,size=(nsamples,nx)),dtype=torch.float32))
def forward(self,ids):
return self.start_state[ids]
class SS_par_start(SS_encoder): #this is not implemented in a nice manner, there might be bugs.
"""docstring for SS_par_start"""
def __init__(self, nx=10, feedthrough=False, optimizer_kwargs={}):
super(SS_par_start, self).__init__(nx=nx,na=0, nb=0, feedthrough=feedthrough)
self.optimizer_kwargs = optimizer_kwargs
########## How to fit #############
def make_training_data(self, sys_data, **Loss_kwargs):
assert sys_data.normed == True
nf = Loss_kwargs.get('nf',25)
stride = Loss_kwargs.get('stride',1)
online_construct = Loss_kwargs.get('online_construct',False)
assert online_construct==False, 'to be implemented'
hist, ufuture, yfuture = sys_data.to_encoder_data(na=0,nb=0,nf=nf,stride=stride,force_multi_u=True,force_multi_y=True)
nsamples = hist.shape[0]
ids = np.arange(nsamples,dtype=int)
self.par_starter = par_start_encoder(nx=self.nx, nsamples=nsamples)
self.optimizer = self.init_optimizer(self.parameters, **self.optimizer_kwargs) #no kwargs
return ids, ufuture, yfuture #returns np.array(hist),np.array(ufuture),np.array(yfuture)
def init_nets(self, nu, ny): # a bit weird
ny = ny if ny is not None else 1
nu = nu if nu is not None else 1
self.fn = self.f_net(self.nx+nu, self.nx, n_nodes_per_layer=self.f_n_nodes_per_layer, n_hidden_layers=self.f_n_hidden_layers, activation=self.f_activation)
hn_in = self.nx + nu if self.feedthrough else self.nx
self.hn = self.h_net(hn_in , ny, n_nodes_per_layer=self.h_n_nodes_per_layer, n_hidden_layers=self.h_n_hidden_layers, activation=self.h_activation)
def loss(self, ids, ufuture, yfuture, **Loss_kwargs):
ids = ids.numpy().astype(int)
#hist is empty
x = self.par_starter(ids)
y_predict = []
for u in torch.transpose(ufuture,0,1):
xu = torch.cat((x,u),dim=1)
y_predict.append(self.hn(x) if not self.feedthrough else self.hn(xu))
x = self.fn(xu)
return torch.mean((torch.stack(y_predict,dim=1)-yfuture)**2)
########## How to use ##############
def init_state_multi(self,sys_data,nf=100,stride=1):
hist = torch.tensor(sys_data.to_encoder_data(na=0,nb=0,nf=nf,stride=stride)[0],dtype=torch.float32) #(1,)
with torch.no_grad():
self.state = torch.as_tensor(np.random.normal(scale=0.1,size=(hist.shape[0],self.nx)),dtype=torch.float32) #detach here?
return 0
from deepSI.utils import simple_res_net, feed_forward_nn, general_koopman_forward_layer
class SS_encoder_general_koopman(SS_encoder_general):
"""
The encoder setup with a linear transition function with an affine input (kinda like an LPV), in equations
x_k = e(u_kpast,y_kpast)
x_k+1 = A@x_k + g(x_k,u_k)@u_k #affine input here (@=matrix multiply)
y_k = h(x_k)
Where g is given by g_net which is by default a feedforward nn with residual (i.e. simple_res_net) called as
'g_net(n_in=affine_dim,n_out=output_dim*input_dim,**g_net_kwargs)'
with affine_dim=nx + nu, output_dim = nx, input_dim=nu
Hence, g_net produces a vector which is reshaped into a matrix \
(See deepSI.utils.torch_nets.general_koopman_forward_layer for details).
"""
def __init__(self, nx=10, na=20, nb=20, feedthrough=False, include_u_in_g=True,
e_net=default_encoder_net, g_net=simple_res_net, h_net=default_output_net, \
e_net_kwargs={}, g_net_kwargs={}, h_net_kwargs={}):
f_net_kwargs = dict(include_u_in_g=include_u_in_g,g_net=g_net,g_net_kwargs=g_net_kwargs)
super(SS_encoder_general_koopman, self).__init__(nx=nx,na=na,nb=nb, feedthrough=feedthrough, \
e_net=e_net, f_net=general_koopman_forward_layer, h_net=h_net, \
e_net_kwargs=e_net_kwargs, f_net_kwargs=f_net_kwargs, h_net_kwargs=h_net_kwargs)
from deepSI.utils import CNN_chained_upscales, CNN_encoder
class SS_encoder_CNN_video(SS_encoder_general):
"""The subspace encoder convolutonal neural network
Notes
-----
The subspace encoder
"""
def __init__(self, nx=10, na=20, nb=20, feedthrough=False, e_net=CNN_encoder, f_net=default_state_net, h_net=CNN_chained_upscales, \
e_net_kwargs={}, f_net_kwargs={}, h_net_kwargs={}):
super(SS_encoder_CNN_video, self).__init__(nx=nx,na=na,nb=nb, feedthrough=feedthrough, \
e_net=e_net, f_net=f_net, h_net=h_net, \
e_net_kwargs=e_net_kwargs, f_net_kwargs=f_net_kwargs, h_net_kwargs=h_net_kwargs)
from deepSI.utils import Shotgun_MLP
class SS_encoder_shotgun_MLP(SS_encoder_general):
def __init__(self, nx=10, na=20, nb=20, e_net=CNN_encoder, f_net=default_state_net, h_net=Shotgun_MLP, \
e_net_kwargs={}, f_net_kwargs={}, h_net_kwargs={}):
'''Todo: fix cuda with all the arrays'''
raise NotImplementedError('not yet updated to 0.3 go back to 0.2 to use this model')
super(SS_encoder_shotgun_MLP, self).__init__(nx=nx,na=na,nb=nb,\
e_net=e_net, f_net=f_net, h_net=h_net, \
e_net_kwargs=e_net_kwargs, f_net_kwargs=f_net_kwargs, h_net_kwargs=h_net_kwargs)
self.encoder_time = 0
self.forward_time = 0
def loss(self, uhist, yhist, ufuture, yfuture, **Loss_kwargs):
# I can pre-sample it or sample it when passed. Which one is faster?
# I'm doing it here for now, maybe later I will do it in the dataset on a shuffle or something.
if len(self.ny)==3:
C,H,W = self.ny
else:
H,W = self.ny
C = None
Nb = uhist.shape[0]
Nsamp = Loss_kwargs.get('Nsamp',100) #int(800/Nb) is approx the best for speed for CPU
batchselector = torch.broadcast_to(torch.arange(Nb)[:,None],(Nb,Nsamp))
time.time()
t_start = time.time()
x = self.encoder(uhist, yhist)
self.encoder_time += time.time() - t_start
t_start = time.time()
mse_losses = []
for y, u in zip(torch.transpose(yfuture,0,1), torch.transpose(ufuture,0,1)): #iterate over time
h = torch.randint(low=0, high=H, size=(Nb,Nsamp))
w = torch.randint(low=0, high=W, size=(Nb,Nsamp))
ysamps = y[batchselector,:,h,w] if C!=None else y[batchselector,h,w]
yhat = self.hn.sampler(x, h, w)
mse_losses.append(nn.functional.mse_loss(yhat, ysamps))
x = self.fn(x,u)
self.forward_time += time.time() - t_start
return torch.mean(torch.stack(mse_losses))
def apply_experiment(self, sys_data, save_state=False): #can put this in apply controller
'''Does an experiment with for given a system data (fixed u)
Parameters
----------
sys_data : System_data or System_data_list (or list or tuple)
The experiment which should be applied
Notes
-----
This will initialize the state using self.init_state if sys_data.y (and u)
is not None and skip the appropriate number of steps associated with it.
If either is missing than self.reset_state() is used to initialize the state.
Afterwards this state is advanced using sys_data.u and the output is saved at each step.
Lastly, the number of skipped/copied steps in init_state is saved as sys_data.cheat_n such
that it can be accounted for later.
'''
if isinstance(sys_data,(tuple,list,System_data_list)):
return System_data_list([self.apply_experiment(sd, save_state=save_state) for sd in sys_data])
#check if sys_data.x holds the
#u = (Ns)
#x = (Ns, C, H, W) or (Ns, H, W)
#y = (Ns, Np, C), (Ns, Np, C)
#h = (Ns, Np)
#w = (Ns, Np)
if not (hasattr(sys_data,'h') and hasattr(sys_data,'w')):
return super(SS_encoder_shotgun_MLP, self).apply_experiment(sys_data, save_state=save_state)
h, w = sys_data.h, sys_data.w
Y = []
sys_data.x, sys_data.y = sys_data.y, sys_data.x #move image to y
sys_data_norm = self.norm.transform(sys_data) #transform image if needed
sys_data.x, sys_data.y = sys_data.y, sys_data.x #move image back to x
sys_data_norm.x, sys_data_norm.y = sys_data_norm.y, sys_data_norm.x #move image back to x
sys_data_norm.h, sys_data_norm.w = h, w #set h and w on the normed version
U = sys_data_norm.u #get the input
Images = sys_data_norm.x #get the images
assert sys_data_norm.y is not None, 'not implemented' #if y is not None than init state
obs, k0 = self.init_state(sys_data_norm) #normed obs in the shape of y in the last step.
Y.extend(sys_data_norm.y[:k0]) #h(x_{k0-1})
if save_state:
X = [self.get_state()]*(k0+1)
for k in range(k0,len(U)):
Y.append(obs)
if k < len(U)-1: #skip last step
obs = self.step(U[k], h=h[k+1], w=w[k+1])
if save_state:
X.append(self.get_state())
#how the norm? the Y need to be transformed from (Ns, Np, C) with the norm
#norm.y0 has shape (C, W, H) or (C, 1, 1) or similar
Y = np.array(Y) #(Ns, Np, C)
# if self.norm.y0 is 1:
# return System_data(u=sys_data.u, y=Y, x=np.array(X) if save_state else None,normed=False,cheat_n=k0)
#has the shape of a constant or (1, 1) or (C, 1, 1) the possiblity of (C, H, W) I will exclude for now.
from copy import deepcopy
norm_sampler = deepcopy(self.norm)
if isinstance(self.norm.y0,(int,float)):
pass
elif self.norm.y0.shape==(1,1):
norm_sampler.y0 = norm_sampler.y0[0,0]
norm_sampler.ystd = norm_sampler.ystd[0,0] #ystd to a float
elif self.norm.y0.shape==(sys_data.x.shape[0],1,1):
norm_sampler.y0 = norm_sampler.y0[:,0,0]
norm_sampler.ystd = norm_sampler.ystd[:,0,0] #ystd to (C,) such that it can divide #(Ns, Np, C)
else:
raise NotImplementedError(f'norm of {self.norm} is not yet implemented for sampled simulations')
sys_data_sim = norm_sampler.inverse_transform(System_data(u=np.array(U),y=np.array(Y),x=np.array(X) if save_state else None,normed=True,cheat_n=k0))
sys_data_sim.h, sys_data_sim.w = sys_data.h, sys_data.w
return sys_data_sim
def init_state(self, sys_data):
#sys_data is already normed
if not hasattr(sys_data,'h'):
return super(SS_encoder_shotgun_MLP, self).init_state(sys_data)
sys_data.x, sys_data.y = sys_data.y, sys_data.x #switch image to be y
uhist, yhist = sys_data[:self.k0].to_hist_future_data(na=self.na,nb=self.nb,nf=0)[:2]
sys_data.x, sys_data.y = sys_data.y, sys_data.x #switch image to be x
uhist = torch.tensor(uhist, dtype=torch.float32)
yhist = torch.tensor(yhist, dtype=torch.float32)
h,w = torch.as_tensor(sys_data.h[self.k0]), torch.as_tensor(sys_data.w[self.k0]) #needs dtype?
with torch.no_grad():
self.state = self.encoder(uhist, yhist)
# h = (Np)
# w = (Np)
# state = (1, nx)
# sampler(self, x, h, w) goes to (Nb, Nsamp, C)
y_predict = self.hn.sampler(x=self.state, h=h[None], w=w[None])[0].numpy() #output: (Nb, Nsamp, C) -> (Nsamp, C)
return y_predict, max(self.na,self.nb)
def step(self,action, h=None, w=None):
if h is None:
return super(SS_encoder_shotgun_MLP, self).step(action)
action = torch.tensor(action,dtype=torch.float32)[None] #(1,...)
h,w = torch.as_tensor(h), torch.as_tensor(w) #needs dtype?
with torch.no_grad():
self.state = self.fn(self.state,action)
y_predict = self.hn.sampler(x=self.state, h=h[None], w=w[None])[0].numpy() #output: (Nb, Nsamp, C) -> (Nsamp, C)
return y_predict
def sys_data_sampler(self, sys_data, Ndots_per_image):
u, images = sys_data.u, sys_data.y
#images has shape (Ns, C, H, W) for (Ns, H, W)
if len(images.shape)==4:
Ns, C, W, H = images.shape
elif len(images.shape)==3:
Ns, W, H = images.shape
C = None
else:
assert False, 'check images.shape'
sampleselector | |
4590, 4591, 4597, 4596)
model.createElement(2800, 4831, 4832, 4838, 4837, 4591, 4592, 4598, 4597)
model.createElement(2801, 4832, 4833, 4839, 4838, 4592, 4593, 4599, 4598)
model.createElement(2802, 4833, 4834, 4840, 4839, 4593, 4594, 4600, 4599)
model.createElement(2803, 4834, 2949, 2950, 4840, 4594, 2909, 2910, 4600)
model.createElement(2804, 2291, 4835, 4841, 2290, 2251, 4595, 4601, 2250)
model.createElement(2805, 4835, 4836, 4842, 4841, 4595, 4596, 4602, 4601)
model.createElement(2806, 4836, 4837, 4843, 4842, 4596, 4597, 4603, 4602)
model.createElement(2807, 4837, 4838, 4844, 4843, 4597, 4598, 4604, 4603)
model.createElement(2808, 4838, 4839, 4845, 4844, 4598, 4599, 4605, 4604)
model.createElement(2809, 4839, 4840, 4846, 4845, 4599, 4600, 4606, 4605)
model.createElement(2810, 4840, 2950, 2951, 4846, 4600, 2910, 2911, 4606)
model.createElement(2811, 2290, 4841, 4847, 2289, 2250, 4601, 4607, 2249)
model.createElement(2812, 4841, 4842, 4848, 4847, 4601, 4602, 4608, 4607)
model.createElement(2813, 4842, 4843, 4849, 4848, 4602, 4603, 4609, 4608)
model.createElement(2814, 4843, 4844, 4850, 4849, 4603, 4604, 4610, 4609)
model.createElement(2815, 4844, 4845, 4851, 4850, 4604, 4605, 4611, 4610)
model.createElement(2816, 4845, 4846, 4852, 4851, 4605, 4606, 4612, 4611)
model.createElement(2817, 4846, 2951, 2952, 4852, 4606, 2911, 2912, 4612)
model.createElement(2818, 2289, 4847, 4853, 2288, 2249, 4607, 4613, 2248)
model.createElement(2819, 4847, 4848, 4854, 4853, 4607, 4608, 4614, 4613)
model.createElement(2820, 4848, 4849, 4855, 4854, 4608, 4609, 4615, 4614)
model.createElement(2821, 4849, 4850, 4856, 4855, 4609, 4610, 4616, 4615)
model.createElement(2822, 4850, 4851, 4857, 4856, 4610, 4611, 4617, 4616)
model.createElement(2823, 4851, 4852, 4858, 4857, 4611, 4612, 4618, 4617)
model.createElement(2824, 4852, 2952, 2953, 4858, 4612, 2912, 2913, 4618)
model.createElement(2825, 2288, 4853, 4859, 2287, 2248, 4613, 4619, 2247)
model.createElement(2826, 4853, 4854, 4860, 4859, 4613, 4614, 4620, 4619)
model.createElement(2827, 4854, 4855, 4861, 4860, 4614, 4615, 4621, 4620)
model.createElement(2828, 4855, 4856, 4862, 4861, 4615, 4616, 4622, 4621)
model.createElement(2829, 4856, 4857, 4863, 4862, 4616, 4617, 4623, 4622)
model.createElement(2830, 4857, 4858, 4864, 4863, 4617, 4618, 4624, 4623)
model.createElement(2831, 4858, 2953, 2954, 4864, 4618, 2913, 2914, 4624)
model.createElement(2832, 2287, 4859, 4865, 2286, 2247, 4619, 4625, 2246)
model.createElement(2833, 4859, 4860, 4866, 4865, 4619, 4620, 4626, 4625)
model.createElement(2834, 4860, 4861, 4867, 4866, 4620, 4621, 4627, 4626)
model.createElement(2835, 4861, 4862, 4868, 4867, 4621, 4622, 4628, 4627)
model.createElement(2836, 4862, 4863, 4869, 4868, 4622, 4623, 4629, 4628)
model.createElement(2837, 4863, 4864, 4870, 4869, 4623, 4624, 4630, 4629)
model.createElement(2838, 4864, 2954, 2955, 4870, 4624, 2914, 2915, 4630)
model.createElement(2839, 2286, 4865, 4871, 2285, 2246, 4625, 4631, 2245)
model.createElement(2840, 4865, 4866, 4872, 4871, 4625, 4626, 4632, 4631)
model.createElement(2841, 4866, 4867, 4873, 4872, 4626, 4627, 4633, 4632)
model.createElement(2842, 4867, 4868, 4874, 4873, 4627, 4628, 4634, 4633)
model.createElement(2843, 4868, 4869, 4875, 4874, 4628, 4629, 4635, 4634)
model.createElement(2844, 4869, 4870, 4876, 4875, 4629, 4630, 4636, 4635)
model.createElement(2845, 4870, 2955, 2956, 4876, 4630, 2915, 2916, 4636)
model.createElement(2846, 2285, 4871, 4877, 2284, 2245, 4631, 4637, 2244)
model.createElement(2847, 4871, 4872, 4878, 4877, 4631, 4632, 4638, 4637)
model.createElement(2848, 4872, 4873, 4879, 4878, 4632, 4633, 4639, 4638)
model.createElement(2849, 4873, 4874, 4880, 4879, 4633, 4634, 4640, 4639)
model.createElement(2850, 4874, 4875, 4881, 4880, 4634, 4635, 4641, 4640)
model.createElement(2851, 4875, 4876, 4882, 4881, 4635, 4636, 4642, 4641)
model.createElement(2852, 4876, 2956, 2957, 4882, 4636, 2916, 2917, 4642)
model.createElement(2853, 2284, 4877, 4883, 2283, 2244, 4637, 4643, 2243)
model.createElement(2854, 4877, 4878, 4884, 4883, 4637, 4638, 4644, 4643)
model.createElement(2855, 4878, 4879, 4885, 4884, 4638, 4639, 4645, 4644)
model.createElement(2856, 4879, 4880, 4886, 4885, 4639, 4640, 4646, 4645)
model.createElement(2857, 4880, 4881, 4887, 4886, 4640, 4641, 4647, 4646)
model.createElement(2858, 4881, 4882, 4888, 4887, 4641, 4642, 4648, 4647)
model.createElement(2859, 4882, 2957, 2958, 4888, 4642, 2917, 2918, 4648)
model.createElement(2860, 2283, 4883, 4889, 2282, 2243, 4643, 4649, 2242)
model.createElement(2861, 4883, 4884, 4890, 4889, 4643, 4644, 4650, 4649)
model.createElement(2862, 4884, 4885, 4891, 4890, 4644, 4645, 4651, 4650)
model.createElement(2863, 4885, 4886, 4892, 4891, 4645, 4646, 4652, 4651)
model.createElement(2864, 4886, 4887, 4893, 4892, 4646, 4647, 4653, 4652)
model.createElement(2865, 4887, 4888, 4894, 4893, 4647, 4648, 4654, 4653)
model.createElement(2866, 4888, 2958, 2959, 4894, 4648, 2918, 2919, 4654)
model.createElement(2867, 2282, 4889, 4895, 2281, 2242, 4649, 4655, 2241)
model.createElement(2868, 4889, 4890, 4896, 4895, 4649, 4650, 4656, 4655)
model.createElement(2869, 4890, 4891, 4897, 4896, 4650, 4651, 4657, 4656)
model.createElement(2870, 4891, 4892, 4898, 4897, 4651, 4652, 4658, 4657)
model.createElement(2871, 4892, 4893, 4899, 4898, 4652, 4653, 4659, 4658)
model.createElement(2872, 4893, 4894, 4900, 4899, 4653, 4654, 4660, 4659)
model.createElement(2873, 4894, 2959, 2960, 4900, 4654, 2919, 2920, 4660)
model.createElement(2874, 2281, 4895, 4901, 2280, 2241, 4655, 4661, 2240)
model.createElement(2875, 4895, 4896, 4902, 4901, 4655, 4656, 4662, 4661)
model.createElement(2876, 4896, 4897, 4903, 4902, 4656, 4657, 4663, 4662)
model.createElement(2877, 4897, 4898, 4904, 4903, 4657, 4658, 4664, 4663)
model.createElement(2878, 4898, 4899, 4905, 4904, 4658, 4659, 4665, 4664)
model.createElement(2879, 4899, 4900, 4906, 4905, 4659, 4660, 4666, 4665)
model.createElement(2880, 4900, 2960, 2961, 4906, 4660, 2920, 2921, 4666)
model.createElement(2881, 2280, 4901, 4907, 2279, 2240, 4661, 4667, 2239)
model.createElement(2882, 4901, 4902, 4908, 4907, 4661, 4662, 4668, 4667)
model.createElement(2883, 4902, 4903, 4909, 4908, 4662, 4663, 4669, 4668)
model.createElement(2884, 4903, 4904, 4910, 4909, 4663, 4664, 4670, 4669)
model.createElement(2885, 4904, 4905, 4911, 4910, 4664, 4665, 4671, 4670)
model.createElement(2886, 4905, 4906, 4912, 4911, 4665, 4666, 4672, 4671)
model.createElement(2887, 4906, 2961, 2962, 4912, 4666, 2921, 2922, 4672)
model.createElement(2888, 2279, 4907, 4913, 2278, 2239, 4667, 4673, 2238)
model.createElement(2889, 4907, 4908, 4914, 4913, 4667, 4668, 4674, 4673)
model.createElement(2890, 4908, 4909, 4915, 4914, 4668, 4669, 4675, 4674)
model.createElement(2891, 4909, 4910, 4916, 4915, 4669, 4670, 4676, 4675)
model.createElement(2892, 4910, 4911, 4917, 4916, 4670, 4671, 4677, 4676)
model.createElement(2893, 4911, 4912, 4918, 4917, 4671, 4672, 4678, 4677)
model.createElement(2894, 4912, 2962, 2963, 4918, 4672, 2922, 2923, 4678)
model.createElement(2895, 2278, 4913, 4919, 2277, 2238, 4673, 4679, 2237)
model.createElement(2896, 4913, 4914, 4920, 4919, 4673, 4674, 4680, 4679)
model.createElement(2897, 4914, 4915, 4921, 4920, 4674, 4675, 4681, 4680)
model.createElement(2898, 4915, 4916, 4922, 4921, 4675, 4676, 4682, 4681)
model.createElement(2899, 4916, 4917, 4923, 4922, 4676, 4677, 4683, 4682)
model.createElement(2900, 4917, 4918, 4924, 4923, 4677, 4678, 4684, 4683)
model.createElement(2901, 4918, 2963, 2964, 4924, 4678, 2923, 2924, 4684)
model.createElement(2902, 2277, 4919, 4925, 2276, 2237, 4679, 4685, 2236)
model.createElement(2903, 4919, 4920, 4926, 4925, 4679, 4680, 4686, 4685)
model.createElement(2904, 4920, 4921, 4927, 4926, 4680, 4681, 4687, 4686)
model.createElement(2905, 4921, 4922, 4928, 4927, 4681, 4682, 4688, 4687)
model.createElement(2906, 4922, 4923, 4929, 4928, 4682, 4683, 4689, 4688)
model.createElement(2907, 4923, 4924, 4930, 4929, 4683, 4684, 4690, 4689)
model.createElement(2908, 4924, 2964, 2965, 4930, 4684, 2924, 2925, 4690)
model.createElement(2909, 2276, 4925, 4931, 2275, 2236, 4685, 4691, 2235)
model.createElement(2910, 4925, 4926, 4932, 4931, 4685, 4686, 4692, 4691)
model.createElement(2911, 4926, 4927, 4933, 4932, 4686, 4687, 4693, 4692)
model.createElement(2912, 4927, 4928, 4934, 4933, 4687, 4688, 4694, 4693)
model.createElement(2913, 4928, 4929, 4935, 4934, 4688, 4689, 4695, 4694)
model.createElement(2914, 4929, 4930, 4936, 4935, 4689, 4690, 4696, 4695)
model.createElement(2915, 4930, 2965, 2966, 4936, 4690, 2925, 2926, 4696)
model.createElement(2916, 2275, 4931, 4937, 2274, 2235, 4691, 4697, 2234)
model.createElement(2917, 4931, 4932, 4938, 4937, 4691, 4692, 4698, 4697)
model.createElement(2918, 4932, 4933, 4939, 4938, 4692, 4693, 4699, 4698)
model.createElement(2919, 4933, 4934, 4940, 4939, 4693, 4694, 4700, 4699)
model.createElement(2920, 4934, 4935, 4941, 4940, 4694, 4695, 4701, 4700)
model.createElement(2921, 4935, 4936, 4942, 4941, 4695, 4696, 4702, 4701)
model.createElement(2922, 4936, 2966, 2967, 4942, 4696, 2926, 2927, 4702)
model.createElement(2923, 2274, 4937, 4943, 2273, 2234, 4697, 4703, 2233)
model.createElement(2924, 4937, 4938, 4944, 4943, 4697, 4698, 4704, 4703)
model.createElement(2925, 4938, 4939, 4945, 4944, 4698, 4699, 4705, 4704)
model.createElement(2926, 4939, 4940, 4946, 4945, 4699, 4700, 4706, 4705)
model.createElement(2927, 4940, 4941, 4947, 4946, 4700, 4701, 4707, 4706)
model.createElement(2928, 4941, 4942, 4948, 4947, 4701, 4702, 4708, 4707)
model.createElement(2929, 4942, 2967, 2968, 4948, 4702, 2927, 2928, 4708)
model.createElement(2930, 2273, 4943, 4949, 2272, 2233, 4703, 4709, 2232)
model.createElement(2931, 4943, 4944, 4950, 4949, 4703, 4704, 4710, 4709)
model.createElement(2932, 4944, 4945, 4951, 4950, 4704, 4705, 4711, 4710)
model.createElement(2933, 4945, 4946, 4952, 4951, 4705, 4706, 4712, 4711)
model.createElement(2934, 4946, 4947, 4953, 4952, 4706, 4707, 4713, 4712)
model.createElement(2935, 4947, 4948, 4954, 4953, 4707, 4708, 4714, 4713)
model.createElement(2936, 4948, 2968, 2969, 4954, 4708, 2928, 2929, 4714)
model.createElement(2937, 2272, 4949, 4955, 2271, 2232, 4709, 4715, 2231)
model.createElement(2938, 4949, 4950, 4956, 4955, 4709, 4710, 4716, 4715)
model.createElement(2939, 4950, 4951, 4957, 4956, 4710, 4711, 4717, 4716)
model.createElement(2940, 4951, 4952, 4958, 4957, 4711, 4712, 4718, 4717)
model.createElement(2941, 4952, 4953, 4959, 4958, 4712, 4713, 4719, 4718)
model.createElement(2942, 4953, 4954, 4960, 4959, 4713, 4714, 4720, 4719)
model.createElement(2943, 4954, 2969, 2970, 4960, 4714, 2929, 2930, 4720)
model.createElement(2944, 2271, 4955, 4961, 2270, 2231, 4715, 4721, 2230)
model.createElement(2945, 4955, 4956, 4962, 4961, 4715, 4716, 4722, 4721)
model.createElement(2946, 4956, 4957, 4963, 4962, 4716, 4717, 4723, 4722)
model.createElement(2947, 4957, 4958, 4964, 4963, 4717, 4718, 4724, 4723)
model.createElement(2948, 4958, 4959, 4965, 4964, 4718, 4719, 4725, 4724)
model.createElement(2949, 4959, 4960, 4966, 4965, 4719, 4720, 4726, 4725)
model.createElement(2950, 4960, 2970, 2971, 4966, 4720, 2930, 2931, 4726)
model.createElement(2951, 2270, 4961, 4967, 2269, 2230, 4721, 4727, 2229)
model.createElement(2952, 4961, 4962, 4968, 4967, 4721, 4722, 4728, 4727)
model.createElement(2953, 4962, 4963, 4969, 4968, 4722, 4723, 4729, 4728)
model.createElement(2954, 4963, 4964, 4970, 4969, 4723, 4724, 4730, 4729)
model.createElement(2955, 4964, 4965, 4971, 4970, 4724, 4725, 4731, 4730)
model.createElement(2956, 4965, 4966, 4972, 4971, 4725, 4726, 4732, 4731)
model.createElement(2957, 4966, 2971, 2972, 4972, 4726, 2931, 2932, 4732)
model.createElement(2958, 2269, 4967, 4973, 2268, 2229, 4727, 4733, 2228)
model.createElement(2959, 4967, 4968, 4974, 4973, | |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Preprocess images and bounding boxes for detection.
We perform two sets of operations in preprocessing stage:
(a) operations that are applied to both training and testing data,
(b) operations that are applied only to training data for the purpose of
data augmentation.
A preprocessing function receives a set of inputs,
e.g. an image and bounding boxes,
performs an operation on them, and returns them.
Some examples are: randomly cropping the image, randomly mirroring the image,
randomly changing the brightness, contrast, hue and
randomly jittering the bounding boxes.
The image is a rank 4 tensor: [1, height, width, channels] with
dtype=tf.float32. The groundtruth_boxes is a rank 2 tensor: [N, 4] where
in each row there is a box with [ymin xmin ymax xmax].
Boxes are in normalized coordinates meaning
their coordinate values range in [0, 1]
Important Note: In tensor_dict, images is a rank 4 tensor, but preprocessing
functions receive a rank 3 tensor for processing the image. Thus, inside the
preprocess function we squeeze the image to become a rank 3 tensor and then
we pass it to the functions. At the end of the preprocess we expand the image
back to rank 4.
"""
import tensorflow as tf
import numpy as np
from official.vision.detection.utils.object_detection import box_list
def _flip_boxes_left_right(boxes):
"""Left-right flip the boxes.
Args:
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes
are in normalized form meaning their coordinates vary between [0, 1]. Each
row is in the form of [ymin, xmin, ymax, xmax].
Returns:
Flipped boxes.
"""
ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)
flipped_xmin = tf.subtract(1.0, xmax)
flipped_xmax = tf.subtract(1.0, xmin)
flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], 1)
return flipped_boxes
def _flip_masks_left_right(masks):
"""Left-right flip masks.
Args:
masks: rank 3 float32 tensor with shape [num_instances, height, width]
representing instance masks.
Returns:
flipped masks: rank 3 float32 tensor with shape
[num_instances, height, width] representing instance masks.
"""
return masks[:, :, ::-1]
def keypoint_flip_horizontal(keypoints,
flip_point,
flip_permutation,
scope=None):
"""Flips the keypoints horizontally around the flip_point.
This operation flips the x coordinate for each keypoint around the flip_point
and also permutes the keypoints in a manner specified by flip_permutation.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
flip_point: (float) scalar tensor representing the x coordinate to flip the
keypoints around.
flip_permutation: rank 1 int32 tensor containing the keypoint flip
permutation. This specifies the mapping from original keypoint indices to
the flipped keypoint indices. This is used primarily for keypoints that
are not reflection invariant. E.g. Suppose there are 3 keypoints
representing ['head', 'right_eye', 'left_eye'], then a logical choice for
flip_permutation might be [0, 2, 1] since we want to swap the 'left_eye'
and 'right_eye' after a horizontal flip.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
if not scope:
scope = 'FlipHorizontal'
with tf.name_scope(scope):
keypoints = tf.transpose(a=keypoints, perm=[1, 0, 2])
keypoints = tf.gather(keypoints, flip_permutation)
v, u = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
u = flip_point * 2.0 - u
new_keypoints = tf.concat([v, u], 2)
new_keypoints = tf.transpose(a=new_keypoints, perm=[1, 0, 2])
return new_keypoints
def keypoint_change_coordinate_frame(keypoints, window, scope=None):
"""Changes coordinate frame of the keypoints to be relative to window's frame.
Given a window of the form [y_min, x_min, y_max, x_max], changes keypoint
coordinates from keypoints of shape [num_instances, num_keypoints, 2]
to be relative to this window.
An example use case is data augmentation: where we are given groundtruth
keypoints and would like to randomly crop the image to some window. In this
case we need to change the coordinate frame of each groundtruth keypoint to be
relative to this new window.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
window we should change the coordinate frame to.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
if not scope:
scope = 'ChangeCoordinateFrame'
with tf.name_scope(scope):
win_height = window[2] - window[0]
win_width = window[3] - window[1]
new_keypoints = box_list_ops.scale(keypoints - [window[0], window[1]],
1.0 / win_height, 1.0 / win_width)
return new_keypoints
def keypoint_prune_outside_window(keypoints, window, scope=None):
"""Prunes keypoints that fall outside a given window.
This function replaces keypoints that fall outside the given window with nan.
See also clip_to_window which clips any keypoints that fall outside the given
window.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
window outside of which the op should prune the keypoints.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
if not scope:
scope = 'PruneOutsideWindow'
with tf.name_scope(scope):
y, x = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
valid_indices = tf.logical_and(
tf.logical_and(y >= win_y_min, y <= win_y_max),
tf.logical_and(x >= win_x_min, x <= win_x_max))
new_y = tf.where(valid_indices, y, np.nan * tf.ones_like(y))
new_x = tf.where(valid_indices, x, np.nan * tf.ones_like(x))
new_keypoints = tf.concat([new_y, new_x], 2)
return new_keypoints
def random_horizontal_flip(image,
boxes=None,
masks=None,
keypoints=None,
keypoint_flip_permutation=None,
seed=None):
"""Randomly flips the image and detections horizontally.
The probability of flipping the image is 50%.
Args:
image: rank 3 float32 tensor with shape [height, width, channels].
boxes: (optional) rank 2 float32 tensor with shape [N, 4] containing the
bounding boxes. Boxes are in normalized form meaning their coordinates
vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax].
masks: (optional) rank 3 float32 tensor with shape [num_instances, height,
width] containing instance masks. The masks are of the same height, width
as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape [num_instances,
num_keypoints, 2]. The keypoints are in y-x normalized coordinates.
keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip
permutation.
seed: random seed
Returns:
image: image which is the same shape as input image.
If boxes, masks, keypoints, and keypoint_flip_permutation are not None,
the function also returns the following tensors.
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
Raises:
ValueError: if keypoints are provided but keypoint_flip_permutation is not.
"""
def _flip_image(image):
# flip image
image_flipped = tf.image.flip_left_right(image)
return image_flipped
if keypoints is not None and keypoint_flip_permutation is None:
raise ValueError(
'keypoints are provided but keypoints_flip_permutation is not provided')
with tf.name_scope('RandomHorizontalFlip'):
result = []
# random variable defining whether to do flip or not
do_a_flip_random = tf.greater(tf.random.uniform([], seed=seed), 0.5)
# flip image
image = tf.cond(
pred=do_a_flip_random,
true_fn=lambda: _flip_image(image),
false_fn=lambda: image)
result.append(image)
# flip boxes
if boxes is not None:
boxes = tf.cond(
pred=do_a_flip_random,
true_fn=lambda: _flip_boxes_left_right(boxes),
false_fn=lambda: boxes)
result.append(boxes)
# flip masks
if masks is not None:
masks = tf.cond(
pred=do_a_flip_random,
true_fn=lambda: _flip_masks_left_right(masks),
false_fn=lambda: masks)
result.append(masks)
# flip keypoints
if keypoints is not None and keypoint_flip_permutation is not None:
permutation = keypoint_flip_permutation
keypoints = tf.cond(
pred=do_a_flip_random,
true_fn=lambda: keypoint_flip_horizontal(keypoints, 0.5, permutation),
false_fn=lambda: keypoints)
result.append(keypoints)
return tuple(result)
def _compute_new_static_size(image, min_dimension, max_dimension):
"""Compute new static shape for resize_to_range method."""
image_shape = image.get_shape().as_list()
orig_height = image_shape[0]
orig_width = image_shape[1]
num_channels = image_shape[2]
orig_min_dim = min(orig_height, orig_width)
# Calculates the larger of the possible sizes
large_scale_factor = min_dimension / float(orig_min_dim)
# Scaling orig_(height|width) by large_scale_factor will make the smaller
# dimension equal to min_dimension, save for floating point rounding errors.
# For reasonably-sized images, taking the nearest integer will reliably
# eliminate this error.
large_height = int(round(orig_height * large_scale_factor))
large_width = int(round(orig_width * large_scale_factor))
large_size = [large_height, large_width]
| |
prototype(IWbemClassObject_GetMethodQualifierSet_Idx,
'GetMethodQualifierSet',
paramflags)
_GetMethodQualifierSet.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _GetMethodQualifierSet(self.this,
method
)
try:
return_obj = IWbemQualifierSet(return_obj)
except WindowsError:
return_obj = None
return return_obj
def GetMethodOrigin(self, method_name):
prototype = ctypes.WINFUNCTYPE(HRESULT,
wintypes.LPCWSTR,
ctypes.POINTER(BSTR))
paramflags = ((_In_, 'wszMethodName'),
(_Out_, 'pstrClassName'),
)
_GetMethodOrigin = prototype(IWbemClassObject_GetMethodOrigin_Idx,
'GetMethodOrigin',
paramflags)
_GetMethodOrigin.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _GetMethodOrigin(self.this,
method_name
)
return_obj = winapi.convert_bstr_to_str(return_obj)
return return_obj
IEnumWbemClassObject_Reset_Idx = 3
IEnumWbemClassObject_Next_Idx = 4
IEnumWbemClassObject_NextAsync_Idx = 5
IEnumWbemClassObject_Clone_Idx = 6
IEnumWbemClassObject_Skip_Idx = 7
class IEnumWbemClassObject(com.IUnknown):
def Reset(self):
prototype = ctypes.WINFUNCTYPE(HRESULT)
paramflags = ()
_Reset = prototype(IEnumWbemClassObject_Reset_Idx,
'Reset',
paramflags)
_Reset.errcheck = winapi.RAISE_NON_ZERO_ERR
_Reset(self.this
)
def Next(self, timeout):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long,
wintypes.ULONG,
ctypes.POINTER(wintypes.LPVOID),
ctypes.POINTER(wintypes.ULONG))
paramflags = ((_In_, 'lTimeout'),
(_In_, 'uCount'),
(_Out_, 'apObjects'),
(_Out_, 'puReturned'),
)
_Next = prototype(IEnumWbemClassObject_Next_Idx,
'Next',
paramflags)
_Next.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj, return_obj2 = _Next(self.this,
timeout,
1
)
try:
return_obj = IWbemClassObject(return_obj)
except WindowsError:
return_obj = None
return return_obj
def NextAsync(self, count, sink):
prototype = ctypes.WINFUNCTYPE(HRESULT,
wintypes.ULONG,
ctypes.POINTER(IWbemObjectSink))
paramflags = ((_In_, 'uCount'),
(_In_, 'pSink'),
)
_NextAsync = prototype(IEnumWbemClassObject_NextAsync_Idx,
'NextAsync',
paramflags)
_NextAsync.errcheck = winapi.RAISE_NON_ZERO_ERR
_NextAsync(self.this,
count,
sink.this if sink else None
)
def Clone(self):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_Out_, 'ppEnum'),
)
_Clone = prototype(IEnumWbemClassObject_Clone_Idx,
'Clone',
paramflags)
_Clone.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _Clone(self.this
)
try:
return_obj = IEnumWbemClassObject(return_obj)
except WindowsError:
return_obj = None
return return_obj
def Skip(self, timeout, count):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long,
wintypes.ULONG)
paramflags = ((_In_, 'lTimeout'),
(_In_, 'nCount'),
)
_Skip = prototype(IEnumWbemClassObject_Skip_Idx,
'Skip',
paramflags)
_Skip.errcheck = winapi.RAISE_NON_ZERO_ERR
_Skip(self.this,
timeout,
count
)
IWbemCallResult_GetResultObject_Idx = 3
IWbemCallResult_GetResultString_Idx = 4
IWbemCallResult_GetResultServices_Idx = 5
IWbemCallResult_GetCallStatus_Idx = 6
class IWbemCallResult(com.IUnknown):
def GetResultObject(self, timeout):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long,
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'lTimeout'),
(_Out_, 'ppResultObject'),
)
_GetResultObject = prototype(IWbemCallResult_GetResultObject_Idx,
'GetResultObject',
paramflags)
_GetResultObject.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _GetResultObject(self.this,
timeout
)
try:
return_obj = IWbemClassObject(return_obj)
except WindowsError:
return_obj = None
return return_obj
def GetResultString(self, timeout):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long,
ctypes.POINTER(BSTR))
paramflags = ((_In_, 'lTimeout'),
(_Out_, 'pstrResultString'),
)
_GetResultString = prototype(IWbemCallResult_GetResultString_Idx,
'GetResultString',
paramflags)
_GetResultString.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _GetResultString(self.this,
timeout
)
return_obj = winapi.convert_bstr_to_str(return_obj)
return return_obj
def GetResultServices(self, timeout):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long,
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'lTimeout'),
(_Out_, 'ppServices'),
)
_GetResultServices = prototype(IWbemCallResult_GetResultServices_Idx,
'GetResultServices',
paramflags)
_GetResultServices.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _GetResultServices(self.this,
timeout
)
try:
return_obj = IWbemServices(return_obj)
except WindowsError:
return_obj = None
return return_obj
def GetCallStatus(self, timeout):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long,
ctypes.POINTER(ctypes.c_long))
paramflags = ((_In_, 'lTimeout'),
(_Out_, 'plStatus'),
)
_GetCallStatus = prototype(IWbemCallResult_GetCallStatus_Idx,
'GetCallStatus',
paramflags)
_GetCallStatus.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _GetCallStatus(self.this,
timeout
)
return return_obj
IWbemContext_Clone_Idx = 3
IWbemContext_GetNames_Idx = 4
IWbemContext_BeginEnumeration_Idx = 5
IWbemContext_Next_Idx = 6
IWbemContext_EndEnumeration_Idx = 7
IWbemContext_SetValue_Idx = 8
IWbemContext_GetValue_Idx = 9
IWbemContext_DeleteValue_Idx = 10
IWbemContext_DeleteAll_Idx = 11
class IWbemContext(com.IUnknown):
def Clone(self):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_Out_, 'ppNewCopy'),
)
_Clone = prototype(IWbemContext_Clone_Idx,
'Clone',
paramflags)
_Clone.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _Clone(self.this
)
try:
return_obj = IWbemContext(return_obj)
except WindowsError:
return_obj = None
return return_obj
def GetNames(self, flags):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long,
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'lFlags'),
(_Out_, 'pNames'),
)
_GetNames = prototype(IWbemContext_GetNames_Idx,
'GetNames',
paramflags)
_GetNames.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _GetNames(self.this,
flags
)
return_obj = ctypes.cast(wintypes.LPVOID(return_obj), ctypes.POINTER(winapi.SAFEARRAY))
return return_obj
def BeginEnumeration(self, flags):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long)
paramflags = ((_In_, 'lFlags'),
)
_BeginEnumeration = prototype(IWbemContext_BeginEnumeration_Idx,
'BeginEnumeration',
paramflags)
_BeginEnumeration.errcheck = winapi.RAISE_NON_ZERO_ERR
_BeginEnumeration(self.this,
flags
)
def Next(self, flags):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long,
ctypes.POINTER(BSTR),
ctypes.POINTER(winapi.VARIANT))
paramflags = ((_In_, 'lFlags'),
(_Out_, 'pstrName'),
(_Out_, 'pValue'),
)
_Next = prototype(IWbemContext_Next_Idx,
'Next',
paramflags)
_Next.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj, return_obj2 = _Next(self.this,
flags
)
return_obj = winapi.convert_bstr_to_str(return_obj)
return return_obj, return_obj2
def EndEnumeration(self):
prototype = ctypes.WINFUNCTYPE(HRESULT)
paramflags = ()
_EndEnumeration = prototype(IWbemContext_EndEnumeration_Idx,
'EndEnumeration',
paramflags)
_EndEnumeration.errcheck = winapi.RAISE_NON_ZERO_ERR
_EndEnumeration(self.this
)
def SetValue(self, name, flags, value):
prototype = ctypes.WINFUNCTYPE(HRESULT,
wintypes.LPCWSTR,
ctypes.c_long,
ctypes.POINTER(winapi.VARIANT))
paramflags = ((_In_, 'wszName'),
(_In_, 'lFlags'),
(_In_, 'pValue'),
)
_SetValue = prototype(IWbemContext_SetValue_Idx,
'SetValue',
paramflags)
_SetValue.errcheck = winapi.RAISE_NON_ZERO_ERR
_SetValue(self.this,
name,
flags,
ctypes.byref(value) if value else None
)
def GetValue(self, name, flags):
prototype = ctypes.WINFUNCTYPE(HRESULT,
wintypes.LPCWSTR,
ctypes.c_long,
ctypes.POINTER(winapi.VARIANT))
paramflags = ((_In_, 'wszName'),
(_In_, 'lFlags'),
(_Out_, 'pValue'),
)
_GetValue = prototype(IWbemContext_GetValue_Idx,
'GetValue',
paramflags)
_GetValue.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _GetValue(self.this,
name,
flags
)
return return_obj
def DeleteValue(self, name, flags):
prototype = ctypes.WINFUNCTYPE(HRESULT,
wintypes.LPCWSTR,
ctypes.c_long)
paramflags = ((_In_, 'wszName'),
(_In_, 'lFlags'),
)
_DeleteValue = prototype(IWbemContext_DeleteValue_Idx,
'DeleteValue',
paramflags)
_DeleteValue.errcheck = winapi.RAISE_NON_ZERO_ERR
_DeleteValue(self.this,
name,
flags
)
def DeleteAll(self):
prototype = ctypes.WINFUNCTYPE(HRESULT)
paramflags = ()
_DeleteAll = prototype(IWbemContext_DeleteAll_Idx,
'DeleteAll',
paramflags)
_DeleteAll.errcheck = winapi.RAISE_NON_ZERO_ERR
_DeleteAll(self.this
)
IWbemServices_OpenNamespace_Idx = 3
IWbemServices_CancelAsyncCall_Idx = 4
IWbemServices_QueryObjectSink_Idx = 5
IWbemServices_GetObject_Idx = 6
IWbemServices_GetObjectAsync_Idx = 7
IWbemServices_PutClass_Idx = 8
IWbemServices_PutClassAsync_Idx = 9
IWbemServices_DeleteClass_Idx = 10
IWbemServices_DeleteClassAsync_Idx = 11
IWbemServices_CreateClassEnum_Idx = 12
IWbemServices_CreateClassEnumAsync_Idx = 13
IWbemServices_PutInstance_Idx = 14
IWbemServices_PutInstanceAsync_Idx = 15
IWbemServices_DeleteInstance_Idx = 16
IWbemServices_DeleteInstanceAsync_Idx = 17
IWbemServices_CreateInstanceEnum_Idx = 18
IWbemServices_CreateInstanceEnumAsync_Idx = 19
IWbemServices_ExecQuery_Idx = 20
IWbemServices_ExecQueryAsync_Idx = 21
IWbemServices_ExecNotificationQuery_Idx = 22
IWbemServices_ExecNotificationQueryAsync_Idx = 23
IWbemServices_ExecMethod_Idx = 24
IWbemServices_ExecMethodAsync_Idx = 25
class IWbemServices(com.IUnknown):
def OpenNamespaceWithResult(self, namespace, flags, ctx):
prototype = ctypes.WINFUNCTYPE(HRESULT,
BSTR,
ctypes.c_long,
ctypes.POINTER(IWbemContext),
ctypes.POINTER(wintypes.LPVOID),
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'strNamespace'),
(_In_, 'lFlags'),
(_In_, 'pCtx'),
(_Out_, 'ppWorkingNamespace'),
(_Out_, 'ppResult'),
)
_OpenNamespace = prototype(IWbemServices_OpenNamespace_Idx,
'OpenNamespace',
paramflags)
_OpenNamespace.errcheck = winapi.RAISE_NON_ZERO_ERR
namespace_bstr = winapi.SysAllocString(namespace) if namespace is not None else None
try:
return_obj, return_obj2 = _OpenNamespace(self.this,
namespace_bstr,
flags,
ctx.this if ctx else None
)
finally:
if namespace_bstr is not None:
winapi.SysFreeString(namespace_bstr)
try:
return_obj = IWbemServices(return_obj)
except WindowsError:
return_obj = None
try:
return_obj2 = IWbemCallResult(return_obj2)
except WindowsError:
return_obj2 = None
return return_obj, return_obj2
def OpenNamespace(self, namespace, flags, ctx):
return_obj, return_obj2 = self.OpenNamespaceWithResult(namespace, flags, ctx)
if return_obj2:
return_obj2.Release()
return return_obj
def CancelAsyncCall(self, sink):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.POINTER(IWbemObjectSink))
paramflags = ((_In_, 'pSink'),
)
_CancelAsyncCall = prototype(IWbemServices_CancelAsyncCall_Idx,
'CancelAsyncCall',
paramflags)
_CancelAsyncCall.errcheck = winapi.RAISE_NON_ZERO_ERR
_CancelAsyncCall(self.this,
sink.this if sink else None
)
def QueryObjectSink(self, flags):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long,
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'lFlags'),
(_Out_, 'ppResponseHandler'),
)
_QueryObjectSink = prototype(IWbemServices_QueryObjectSink_Idx,
'QueryObjectSink',
paramflags)
_QueryObjectSink.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _QueryObjectSink(self.this,
flags
)
try:
return_obj = IWbemObjectSink(return_obj)
except WindowsError:
return_obj = None
return return_obj
def GetObjectWithResult(self, object_path, flags, ctx):
prototype = ctypes.WINFUNCTYPE(HRESULT,
BSTR,
ctypes.c_long,
ctypes.POINTER(IWbemContext),
ctypes.POINTER(wintypes.LPVOID),
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'strObjectPath'),
(_In_, 'lFlags'),
(_In_, 'pCtx'),
(_Out_, 'ppObject'),
(_Out_, 'ppCallResult'),
)
_GetObject = prototype(IWbemServices_GetObject_Idx,
'GetObject',
paramflags)
_GetObject.errcheck = winapi.RAISE_NON_ZERO_ERR
object_path_bstr = winapi.SysAllocString(object_path) if object_path is not None else None
try:
return_obj, return_obj2 = _GetObject(self.this,
object_path_bstr,
flags,
ctx.this if ctx else None
)
finally:
if object_path_bstr is not None:
winapi.SysFreeString(object_path_bstr)
try:
return_obj = IWbemClassObject(return_obj)
except WindowsError:
return_obj = None
try:
return_obj2 = IWbemCallResult(return_obj2)
except WindowsError:
return_obj2 = None
return return_obj, return_obj2
def GetObject(self, object_path, flags, ctx):
return_obj, return_obj2 = self.GetObjectWithResult(object_path, flags, ctx)
if return_obj2:
return_obj2.Release()
return return_obj
def GetObjectAsync(self, object_path, flags, ctx, response_handler):
prototype = ctypes.WINFUNCTYPE(HRESULT,
BSTR,
ctypes.c_long,
ctypes.POINTER(IWbemContext),
ctypes.POINTER(IWbemObjectSink))
paramflags = ((_In_, 'strObjectPath'),
(_In_, 'lFlags'),
(_In_, 'pCtx'),
(_In_, 'pResponseHandler'),
)
_GetObjectAsync = prototype(IWbemServices_GetObjectAsync_Idx,
'GetObjectAsync',
paramflags)
_GetObjectAsync.errcheck = winapi.RAISE_NON_ZERO_ERR
object_path_bstr = winapi.SysAllocString(object_path) if object_path is not None else None
try:
_GetObjectAsync(self.this,
object_path_bstr,
flags,
ctx.this if ctx else None,
response_handler.this if response_handler else None
)
finally:
if object_path_bstr is not None:
winapi.SysFreeString(object_path_bstr)
def PutClassWithResult(self, object_param, flags, ctx):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.POINTER(IWbemClassObject),
ctypes.c_long,
ctypes.POINTER(IWbemContext),
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'pObject'),
(_In_, 'lFlags'),
(_In_, 'pCtx'),
(_Out_, 'ppCallResult'),
)
_PutClass = prototype(IWbemServices_PutClass_Idx,
'PutClass',
paramflags)
_PutClass.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _PutClass(self.this,
object_param.this if object_param else None,
flags,
ctx.this if ctx else None
)
try:
return_obj = IWbemCallResult(return_obj)
except WindowsError:
return_obj = None
return return_obj
def PutClass(self, object_param, flags, ctx):
return_obj = self.PutClassWithResult(object_param, flags, ctx)
if return_obj:
return_obj.Release()
def PutClassAsync(self, object_param, flags, ctx, response_handler):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.POINTER(IWbemClassObject),
ctypes.c_long,
ctypes.POINTER(IWbemContext),
ctypes.POINTER(IWbemObjectSink))
paramflags = ((_In_, 'pObject'),
(_In_, 'lFlags'),
(_In_, 'pCtx'),
(_In_, 'pResponseHandler'),
)
_PutClassAsync = prototype(IWbemServices_PutClassAsync_Idx,
'PutClassAsync',
paramflags)
_PutClassAsync.errcheck = winapi.RAISE_NON_ZERO_ERR
_PutClassAsync(self.this,
object_param.this if object_param else None,
flags,
ctx.this if ctx else None,
response_handler.this if response_handler else None
)
def DeleteClassWithResult(self, class_param, flags, ctx):
prototype = ctypes.WINFUNCTYPE(HRESULT,
BSTR,
ctypes.c_long,
ctypes.POINTER(IWbemContext),
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'strClass'),
(_In_, 'lFlags'),
(_In_, 'pCtx'),
(_Out_, 'ppCallResult'),
)
_DeleteClass = prototype(IWbemServices_DeleteClass_Idx,
'DeleteClass',
paramflags)
_DeleteClass.errcheck = winapi.RAISE_NON_ZERO_ERR
class_param_bstr = winapi.SysAllocString(class_param) if class_param is not None else None
try:
return_obj = _DeleteClass(self.this,
class_param_bstr,
flags,
ctx.this if ctx else None
)
finally:
if class_param_bstr is not None:
winapi.SysFreeString(class_param_bstr)
try:
return_obj = IWbemCallResult(return_obj)
except WindowsError:
return_obj = None
return return_obj
def DeleteClass(self, class_param, flags, ctx):
return_obj = self.DeleteClassWithResult(class_param, flags, ctx)
if return_obj:
return_obj.Release()
def DeleteClassAsync(self, class_param, flags, ctx, response_handler):
prototype = ctypes.WINFUNCTYPE(HRESULT,
BSTR,
ctypes.c_long,
ctypes.POINTER(IWbemContext),
ctypes.POINTER(IWbemObjectSink))
paramflags = ((_In_, 'strClass'),
(_In_, 'lFlags'),
(_In_, 'pCtx'),
(_In_, 'pResponseHandler'),
)
_DeleteClassAsync = prototype(IWbemServices_DeleteClassAsync_Idx,
'DeleteClassAsync',
paramflags)
_DeleteClassAsync.errcheck = winapi.RAISE_NON_ZERO_ERR
class_param_bstr = winapi.SysAllocString(class_param) if class_param is | |
option to Write product to L2 resolution has been DISABLE !!!")
LOGGER.info(
"**************************************************************************************************")
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** *
# Define the Output L2 product directory where it will be generated
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** *
if p_write_temporary_l2_product:
l_L2OutputDirectory = self._apphandler.get_directory_manager().get_temporary_directory("L2Products_",do_always_remove=False)
else:
l_L2OutputDirectory = self._apphandler.get_output_directory()
"""* Snowrate """
l_SnowRate = 0
"""* Validity of the current LTC contribution """
"""* Usefull when cloud rate > threshold and finalizing a backward mode(FA1407) """
l_IgnoreCurrentLTC = False
"""* STO list of dates """
"""* Indicate if default AOT is used """
l_IsDefaultAOT = False
"""* Indicate if the private part is copied to L2 output """
l_CopyPrivateFromL2InputToL2Output = False
"""* global parameters directionary """
l_global_params_dict = {}
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
# In NOMINAL and BACKWARD only
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
if p_initmode == False and p_InputL2ImageFileReader is None:
raise MajaException("ScientificSingleProductProcessing: Internal error. For Nominal or Backward mode, the "
"L2 INPUT image file reader must be initialized !")
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
# Get a pointer of the current plugin activated
l_CurrentPluginBase = p_InputL1ImageFileReader.get_plugin()
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
# Get band definitions
l_BandDefinitons = l_CurrentPluginBase.BandsDefinitions
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
# Getreference of the current config admin camera activated
l_CurrentConfigAdminCamera = l_CurrentPluginBase.ConfigAdminCamera
# Get the satellite of the current L1 product(date D)
l_SatelliteD = p_InputL1ImageInformationsProvider.Satellite
l_UniqueSatelliteD = l_CurrentPluginBase.UniqueSatellite
l_PluginName = l_CurrentPluginBase.PluginName
LOGGER.debug("SatelliteD from the L1 product = " + l_SatelliteD +
" (from p_InputL1ImageInformationsProvider.GetSatellite())")
LOGGER.debug("l_UniqueSatelliteD deduces from the L1 product = " + l_UniqueSatelliteD)
LOGGER.debug("Plugin Name = " + l_PluginName)
# Get the L2COMM
l_GIPPL2COMMHandler = p_InputL1ImageFileReader.get_l2comm()
""" ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
Register the GIPP file "GIP_L2SITE" */
** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** *"""
l_GIP_L2SITE_Filename = gipp_utils.get_gipp_filename_with_mission(self._apphandler.get_input_directory(),
"GIP_L2SITE",
l_UniqueSatelliteD)
LOGGER.info("The GIP_L2SITE file detected for the satellite '" +
l_UniqueSatelliteD + "' is <" + l_GIP_L2SITE_Filename + ">.")
l_GIPPL2SITEHandler = GippL2SITEEarthExplorerXMLFileHandler(l_GIP_L2SITE_Filename)
# ---------------------------------------------------------------------------------------------
l_AOTMethodString = l_GIPPL2COMMHandler.get_value("AOTMethod")
l_AOTMethod = AOTEstimation.get_aot_method(l_AOTMethodString)
LOGGER.info("AOT Estimation - l2 processor : " + l_AOTMethodString + " method")
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
# START COMMON VARIABLES
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
# Get the maximum cloud percentage for which a L2 product is generated
l_MaxCloudPercentage = float(l_GIPPL2COMMHandler.get_value("MaxCloudPercentage"))
l_MaxNoDataPercentage = float(l_GIPPL2COMMHandler.get_value("MaxNoDataPercentage"))
# Get the environment correction option
l_EnvCorOption = xml_tools.as_bool(l_GIPPL2COMMHandler.get_value("EnvCorrOption"))
# Get the cams data use option
l_UseCamsData = l_GIPPL2COMMHandler.get_value_b("UseCamsData") and self._CAMS_Files_HandlersMAP[l_SatelliteD].valid
# init cloud rate boolean(false if there are to many clouds)
p_checking_conditional_clouds[0] = True
# = == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == ==
# Get the date in the string YYYYMMDD
# = == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == == ==
# Get the reference date double
l_JDay = date_utils.get_julianday_as_double(p_InputL1ImageInformationsProvider.ProductDate)
LOGGER.debug("Current JDay : "+str(l_JDay))
LOGGER.debug("Current Product Date : " )
LOGGER.debug(p_InputL1ImageInformationsProvider.ProductDate)
l_JDayRef = date_utils.get_julianday_as_double(
date_utils.get_datetime_from_yyyymmdd(l_CurrentPluginBase.ReferenceDate))
LOGGER.debug("Current JDayRef : " + str(l_JDayRef))
l_JDay = int(math.floor(l_JDay - l_JDayRef))
l_global_params_dict["JDay"] = l_JDay
l_global_params_dict["JDayRef"] = l_JDayRef
# Get STO date list since V4 - 1, disable IsBackWard because STO variable is local(no m_STOListOfStringDates.
# So to read STO from the previsous product, read the STO header
l_STOListOfStringDates = []
if not p_initmode:
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** **
# Read the STO Headerfor the L2 Input image file
# ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** *
l_STOListOfStringDates = p_InputL2ImageFileReader.STOListOfStringDates
nbL2inSTODates = len(l_STOListOfStringDates)
maxSTODates = int(l_GIPPL2COMMHandler.get_value("NumberOfStackImages"))
nbSTODates = nbL2inSTODates
LOGGER.debug("Number of L2 in STO dates: " + str(nbL2inSTODates))
l_STOListOfJDates = []
# it is unlikely that the number of STO dates is greater than the maximum number of STO dates but...
if nbL2inSTODates > maxSTODates:
nbSTODates = maxSTODates
# LAIG - FA - MAC - 1180 - CNES: limit the number of STO dates to the maximum number set in the GIP L2COMM
for i in range(0, nbSTODates):
STOJDate = date_utils.get_julianday_as_double(date_utils.get_datetime_from_yyyymmdd(l_STOListOfStringDates[i]))
# LAIG - FA - MAC - 1180 - CNES
# The list of dates used for the correlation with the STO image is reversed compared to the
# order of the band in the STO image
# l_STOListOfJDates.insert(l_STOListOfJDates.begin(), STOJDate - l_JDayRef)
l_STOListOfJDates.append(str(STOJDate - l_JDayRef))
LOGGER.debug("L2inSTODate i : " + l_STOListOfStringDates[i] + " = " + str(l_STOListOfJDates[-1]))
l_DateString = p_InputL1ImageInformationsProvider.ProductDate # m_L1DataFilter.GetAcquisitionDate()
# LAIG - FA - MAC - 1180 - CNES
# The list of STO dates should not be updated if the STO imagefile is not updated too in the"composite image"algorithm
# It happens when an invalid L2 product(L2NOTV) is generated(too cloudy)
# This update was movedafter the"composite image" algorithm
l_L2NoData = float(l_GIPPL2COMMHandler.get_value("NoData"))
l_ReflectanceQuantificationValue = float(p_InputL1ImageInformationsProvider.ReflectanceQuantification)
l_RealL2NoData = l_L2NoData * l_ReflectanceQuantificationValue
l_global_params_dict["RealL2NoData"] = l_RealL2NoData
# Store the value of the satellite, use to write headers and Product report files
LOGGER.debug("Start getting information from the DataDEMHandler instance...")
l_DEMDataProvider = self.DataDEMMap.get(l_UniqueSatelliteD)
# Get the L2 resolutions in the final L2 Product
l_ListOfL2Resolution = l_BandDefinitons.ListOfL2Resolution
l_NumberOfL2Resolution = | |
<reponame>wingify/vwo-python-sdk
# Copyright 2019-2021 Wingify Software Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Various settings_file for testings
Notes:
Abbreviations: T = percentTraffic
W = weight split
AB = VISUAL_AB
FT = FEATURE_TEST
FR = FEATURE_ROLLOUT
IFEF = isFeatureEnabled is False
WS = With Segments
WW = With Whitelisting
Campaigns key of each campaign is same as setttings_file name.
"""
SETTINGS_FILES = {
"EMPTY_SETTINGS_FILE": {},
"AB_T_50_W_50_50": {
"sdkKey": "someuniquestuff1234567",
"campaigns": [
{
"goals": [{"identifier": "CUSTOM", "id": 213, "type": "CUSTOM_GOAL"}],
"variations": [
{"id": 1, "name": "Control", "changes": {}, "weight": 50},
{"id": 2, "name": "Variation-1", "changes": {}, "weight": 50},
],
"id": 230,
"name": "Campaign-230",
"percentTraffic": 50,
"key": "<KEY>",
"status": "RUNNING",
"type": "VISUAL_AB",
}
],
"accountId": 88888888,
"version": 1,
},
"AB_T_100_W_50_50": {
"sdkKey": "someuniquestuff1234567",
"campaigns": [
{
"goals": [
{"identifier": "abcd", "id": 1, "type": "REVENUE_TRACKING"},
{"identifier": "CUSTOM", "id": 214, "type": "CUSTOM_GOAL"},
],
"variations": [
{"id": 1, "name": "Control", "changes": {}, "weight": 50},
{"id": 2, "name": "Variation-1", "changes": {}, "weight": 50},
],
"id": 231,
"name": "Campaign-231",
"percentTraffic": 100,
"key": "<KEY>",
"status": "RUNNING",
"type": "VISUAL_AB",
}
],
"accountId": 88888888,
"version": 1,
},
"AB_T_100_W_20_80": {
"sdkKey": "someuniquestuff1234567",
"campaigns": [
{
"goals": [{"identifier": "CUSTOM", "id": 215, "type": "CUSTOM_GOAL"}],
"variations": [
{"id": 1, "name": "Control", "changes": {}, "weight": 20},
{"id": 2, "name": "Variation-1", "changes": {}, "weight": 80},
],
"id": 232,
"name": "Campaign-232",
"percentTraffic": 100,
"key": "<KEY>",
"status": "RUNNING",
"type": "VISUAL_AB",
}
],
"accountId": 88888888,
"version": 1,
},
"AB_T_20_W_10_90": {
"sdkKey": "someuniquestuff1234567",
"campaigns": [
{
"goals": [{"identifier": "CUSTOM", "id": 216, "type": "CUSTOM_GOAL"}],
"variations": [
{"id": 1, "name": "Control", "changes": {}, "weight": 10},
{"id": 2, "name": "Variation-1", "changes": {}, "weight": 90},
],
"id": 233,
"name": "Campaign-233",
"percentTraffic": 20,
"key": "<KEY>",
"status": "RUNNING",
"type": "VISUAL_AB",
}
],
"accountId": 88888888,
"version": 1,
},
"AB_T_100_W_0_100": {
"sdkKey": "someuniquestuff1234567",
"campaigns": [
{
"goals": [{"identifier": "CUSTOM", "id": 217, "type": "CUSTOM_GOAL"}],
"variations": [
{"id": 1, "name": "Control", "changes": {}, "weight": 0},
{"id": 2, "name": "Variation-1", "changes": {}, "weight": 100},
],
"id": 234,
"name": "Campaign-234",
"percentTraffic": 100,
"key": "<KEY>",
"status": "RUNNING",
"type": "VISUAL_AB",
}
],
"accountId": 88888888,
"version": 1,
},
"AB_T_100_W_33_33_33": {
"sdkKey": "someuniquestuff1234567",
"campaigns": [
{
"goals": [{"identifier": "CUSTOM", "id": 218, "type": "CUSTOM_GOAL"}],
"variations": [
{"id": 1, "name": "Control", "changes": {}, "weight": 33.3333},
{"id": 2, "name": "Variation-1", "changes": {}, "weight": 33.3333},
{"id": 3, "name": "Variation-2", "changes": {}, "weight": 33.3333},
],
"id": 235,
"name": "Campaign-235",
"percentTraffic": 100,
"key": "<KEY>",
"status": "RUNNING",
"type": "VISUAL_AB",
}
],
"accountId": 88888888,
"version": 1,
},
"DUMMY_SETTINGS_FILE": {
"sdkKey": "someuniquestuff1234567",
"campaigns": [
{
"goals": [{"identifier": "GOAL_NEW", "id": 203, "type": "CUSTOM_GOAL"}],
"variations": [
{"id": "1", "name": "Control", "weight": 40},
{"id": "2", "name": "Variation-1", "weight": 60},
],
"id": 22,
"name": "Campaign-22",
"percentTraffic": 50,
"key": "DUMMY_SETTINGS_FILE",
"status": "RUNNING",
"type": "VISUAL_AB",
}
],
"accountId": 88888888,
"version": 1,
},
"FR_T_0_W_100": {
"sdkKey": "someuniquestuff1234567",
"campaigns": [
{
"goals": [{"identifier": "CUSTOM", "id": 213, "type": "CUSTOM_GOAL"}],
"variations": [{"id": "1", "name": "Control", "weight": 100}],
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "this_is_a_string"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 123},
{"id": 3, "key": "FLOAT_VARIABLE", "type": "double", "value": 123.456},
{"id": 4, "key": "BOOLEAN_VARIABLE", "type": "boolean", "value": True},
{
"id": 5,
"key": "JSON_VARIABLE",
"type": "json",
"value": {
"data_string": "this_is_a_string",
"data_integer": "123",
"data_boolean": True,
"data_double": 123.456,
"data_json": {"json": "json"},
},
},
],
"id": 29,
"name": "Campaign-29",
"percentTraffic": 0,
"key": "FR_T_0_W_100",
"status": "RUNNING",
"type": "FEATURE_ROLLOUT",
}
],
"accountId": 123456,
"version": 2,
},
"FR_T_25_W_100": {
"sdkKey": "someuniquestuff1234567",
"campaigns": [
{
"goals": [],
"variations": [{"id": "1", "name": "Control", "weight": 100}],
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "this_is_a_string"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 123},
{"id": 3, "key": "FLOAT_VARIABLE", "type": "double", "value": 123.456},
{"id": 4, "key": "BOOLEAN_VARIABLE", "type": "boolean", "value": True},
{
"id": 5,
"key": "JSON_VARIABLE",
"type": "json",
"value": {
"data_string": "this_is_a_string",
"data_integer": "123",
"data_boolean": True,
"data_double": 123.456,
"data_json": {"json": "json"},
},
},
],
"id": 29,
"name": "Campaign-29",
"percentTraffic": 25,
"key": "FR_T_25_W_100",
"status": "RUNNING",
"type": "FEATURE_ROLLOUT",
}
],
"accountId": 123456,
"version": 2,
},
"FR_T_50_W_100": {
"sdkKey": "someuniquestuff1234567",
"campaigns": [
{
"goals": [],
"variations": [{"id": "1", "name": "Control", "weight": 100}],
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "this_is_a_string"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 123},
{"id": 3, "key": "FLOAT_VARIABLE", "type": "double", "value": 123.456},
{"id": 4, "key": "BOOLEAN_VARIABLE", "type": "boolean", "value": True},
{
"id": 5,
"key": "JSON_VARIABLE",
"type": "json",
"value": {
"data_string": "this_is_a_string",
"data_integer": "123",
"data_boolean": True,
"data_double": 123.456,
"data_json": {"json": "json"},
},
},
],
"id": 29,
"name": "Campaign-29",
"percentTraffic": 50,
"key": "FR_T_50_W_100",
"status": "RUNNING",
"type": "FEATURE_ROLLOUT",
}
],
"accountId": 123456,
"version": 2,
},
"FR_T_75_W_100": {
"sdkKey": "someuniquestuff1234567",
"campaigns": [
{
"goals": [],
"variations": [{"id": "1", "name": "Control", "weight": 100}],
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "this_is_a_string"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 123},
{"id": 3, "key": "FLOAT_VARIABLE", "type": "double", "value": 123.456},
{"id": 4, "key": "BOOLEAN_VARIABLE", "type": "boolean", "value": True},
{
"id": 5,
"key": "JSON_VARIABLE",
"type": "json",
"value": {
"data_string": "this_is_a_string",
"data_integer": "123",
"data_boolean": True,
"data_double": 123.456,
"data_json": {"json": "json"},
},
},
],
"id": 29,
"name": "Campaign-29",
"percentTraffic": 75,
"key": "FR_T_75_W_100",
"status": "RUNNING",
"type": "FEATURE_ROLLOUT",
}
],
"accountId": 123456,
"version": 2,
},
"FR_T_100_W_100": {
"sdkKey": "someuniquestuff1234567",
"campaigns": [
{
"goals": [],
"variations": [{"id": "1", "name": "Control", "weight": 100}],
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "this_is_a_string"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 123},
{"id": 3, "key": "FLOAT_VARIABLE", "type": "double", "value": 123.456},
{"id": 4, "key": "BOOLEAN_VARIABLE", "type": "boolean", "value": True},
{
"id": 5,
"key": "JSON_VARIABLE",
"type": "json",
"value": {
"data_string": "this_is_a_string",
"data_integer": "123",
"data_boolean": True,
"data_double": 123.456,
"data_json": {"json": "json"},
},
},
],
"id": 29,
"name": "Campaign-29",
"percentTraffic": 100,
"key": "FR_T_100_W_100",
"status": "RUNNING",
"type": "FEATURE_ROLLOUT",
}
],
"accountId": 123456,
"version": 2,
},
"FR_T_100_WW": {
"sdkKey": "someuniquestuff1234567",
"groups": {},
"campaignGroups": {},
"campaigns": [
{
"goals": [],
"variations": [
{
"id": "1",
"name": "Control",
"weight": 100,
"segments": {"or": [{"custom_variable": {"safari": "true"}}]},
}
],
"variables": [{"id": 2, "key": "BOOLEAN_VARIABLE", "type": "boolean", "value": True}],
"id": 29,
"percentTraffic": 100,
"isForcedVariationEnabled": True,
"key": "FR_T_100_WW",
"name": "Campaign-24",
"status": "RUNNING",
"type": "FEATURE_ROLLOUT",
"segments": {},
}
],
"accountId": 123456,
"version": 2,
},
"FR_WRONG_VARIABLE_TYPE": {
"sdkKey": "someuniquestuff1234567",
"campaigns": [
{
"goals": [],
"variations": [{"id": "1", "name": "Control", "weight": 100}],
"variables": [
# STRING:
{"id": 1, "key": "STRING_TO_INTEGER", "type": "integer", "value": "123"},
{"id": 2, "key": "STRING_TO_FLOAT", "type": "double", "value": "123.456"},
# STRING_TO_BOOLEAN NOT POSSIBLE
# BOLLEAN:
{"id": 3, "key": "BOOLEAN_TO_STRING", "type": "string", "value": True},
# BOOLEAN TO INT, DOUBLE NOT POSSIBLE
# INTEGER:
{"id": 4, "key": "INTEGER_TO_STRING", "type": "string", "value": 24},
{"id": 5, "key": "INTEGER_TO_FLOAT", "type": "double", "value": 24},
# INTEGER TO BOOLEAN NOT POSSIBLE
# FLOAT:
{"id": 6, "key": "FLOAT_TO_STRING", "type": "string", "value": 24.24},
{"id": 7, "key": "FLOAT_TO_INTEGER", "type": "integer", "value": 24.0},
# FLOAT TO BOOLEAN NOT POSSIBLE
# JSON:
{"id": 8, "key": "JSON_STRING_TO_JSON", "type": "json", "value": '{"json": "json"}'},
# JSON TO BOOLEAN, INT, DOUBLE NOT POSSIBLE
# WRONG CASES
{"id": 9, "key": "WRONG_BOOLEAN", "type": "boolean", "value": "True"},
{"id": 10, "key": "WRONG_JSON_1", "type": "json", "value": True},
{"id": 11, "key": "WRONG_JSON_2", "type": "json", "value": "this_is_a_string"},
{"id": 12, "key": "WRONG_JSON_3", "type": "json", "value": 123},
{"id": 13, "key": "WRONG_JSON_4", "type": "json", "value": 123.234},
],
"id": 29,
"name": "Campaign-29",
"percentTraffic": 100,
"key": "FR_WRONG_VARIABLE_TYPE",
"status": "RUNNING",
"type": "FEATURE_ROLLOUT",
}
],
"accountId": 123456,
"version": 2,
},
"FT_T_0_W_10_20_30_40": {
"sdkKey": "someuniquestuff1234567",
"campaigns": [
{
"goals": [{"identifier": "FEATURE_TEST_GOAL", "id": 203, "type": "CUSTOM_GOAL"}],
"variations": [
{
"id": "1",
"name": "Control",
"weight": 10,
"variables": [
{"id": 1, "key": "STRING_VARIABLE", "type": "string", "value": "Control string"},
{"id": 2, "key": "INTEGER_VARIABLE", "type": "integer", "value": 123},
],
"isFeatureEnabled": False,
},
{
"id": "2",
"name": "Variation-1",
"weight": 20,
"variables": [
{"id": 1, | |
<gh_stars>10-100
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pydcgm
import dcgm_structs
import dcgm_agent
from dcgm_structs import dcgmExceptionClass
import test_utils
import logger
import os
import option_parser
import time
import dcgm_fields
import dcgm_structs_internal
import dcgm_agent_internal
import DcgmReader
import random
import dcgm_field_helpers
import apps
g_profNotSupportedErrorStr = "Continuous mode profiling is not supported for this GPU group. Either libnvperf_dcgm_host.so isn't in your LD_LIBRARY_PATH or it is not the NDA version that supports DC profiling"
g_moduleNotLoadedErrorStr = "Continuous mode profiling is not supported for this system because the profiling module could not be loaded. This is likely due to libnvperf_dcgm_host.so not being in LD_LIBRARY_PATH"
DLG_MAX_METRIC_GROUPS = 5 #This is taken from modules/profiling/DcgmLopConfig.h. These values need to be in sync for multipass tests to pass
def helper_check_profiling_environment(dcgmGroup):
try:
dcgmGroup.profiling.GetSupportedMetricGroups()
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_PROFILING_NOT_SUPPORTED) as e:
test_utils.skip_test(g_profNotSupportedErrorStr)
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_MODULE_NOT_LOADED) as e:
test_utils.skip_test(g_moduleNotLoadedErrorStr)
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_NOT_SUPPORTED) as e:
test_utils.skip_test(g_profNotSupportedErrorStr)
def helper_get_supported_field_ids(dcgmGroup):
'''
Get a list of the supported fieldIds for the provided DcgmGroup object.
It's important to query this dynamically, as field IDs can vary from chip to chip
and driver version to driver version
'''
fieldIds = []
metricGroups = dcgmGroup.profiling.GetSupportedMetricGroups()
for i in range(metricGroups.numMetricGroups):
for j in range(metricGroups.metricGroups[i].numFieldIds):
fieldIds.append(metricGroups.metricGroups[i].fieldIds[j])
return fieldIds
def helper_get_multipass_field_ids(dcgmGroup):
'''
Get a list of the supported fieldIds for the provided DcgmGroup object that
require multiple passes in the hardware
Returns None if no such combination exists. Otherwise a list of lists
where the first dimension is groups of fields that are exclusive with each other.
the second dimension are the fieldIds within an exclusive group.
'''
exclusiveFields = {} #Key by major ID
#First, look for two metric groups that have the same major version but different minor version
#That is the sign of being multi-pass
metricGroups = dcgmGroup.profiling.GetSupportedMetricGroups()
for i in range(metricGroups.numMetricGroups):
majorId = metricGroups.metricGroups[i].majorId
if majorId not in exclusiveFields:
exclusiveFields[majorId] = []
fieldIds = metricGroups.metricGroups[i].fieldIds[0:metricGroups.metricGroups[i].numFieldIds]
exclusiveFields[majorId].append(fieldIds)
#See if any groups have > 1 element. Volta and turing only have one multi-pass group, so we
#can just return one if we find it
for group in list(exclusiveFields.values()):
if len(group) > 1:
return group
return None
def helper_get_single_pass_field_ids(dcgmGroup):
'''
Get a list of the supported fieldIds for the provided DcgmGroup object that can
be watched at the same time
Returns None if no field IDs are supported
'''
fieldIds = []
#Try to return the largest single-pass group
largestMetricGroupIndex = None
largestMetricGroupCount = 0
metricGroups = dcgmGroup.profiling.GetSupportedMetricGroups()
for i in range(metricGroups.numMetricGroups):
if metricGroups.metricGroups[i].numFieldIds > largestMetricGroupCount:
largestMetricGroupIndex = i
largestMetricGroupCount = metricGroups.metricGroups[i].numFieldIds
if largestMetricGroupIndex is None:
return None
for i in range(metricGroups.metricGroups[largestMetricGroupIndex].numFieldIds):
fieldIds.append(metricGroups.metricGroups[largestMetricGroupIndex].fieldIds[i])
return fieldIds
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_dcgm_prof_get_supported_metric_groups_sanity(handle, gpuIds):
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetGroupWithGpuIds('mygroup', gpuIds)
helper_check_profiling_environment(dcgmGroup)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
def test_dcgm_prof_watch_fields_sanity(handle, gpuIds):
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetGroupWithGpuIds('mygroup', gpuIds)
helper_check_profiling_environment(dcgmGroup)
fieldIds = helper_get_single_pass_field_ids(dcgmGroup)
assert fieldIds is not None
logger.info("Single pass field IDs: " + str(fieldIds))
watchFields = dcgmGroup.profiling.WatchFields(fieldIds, 1000000, 3600.0, 0)
assert watchFields.version == dcgm_structs.dcgmProfWatchFields_version1
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
def test_dcgm_prof_all_supported_fields_watchable(handle, gpuIds):
'''
Verify that all fields that are reported as supported are watchable and
that values can be returned for them
'''
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetGroupWithGpuIds('mygroup', gpuIds)
helper_check_profiling_environment(dcgmGroup)
fieldIds = helper_get_supported_field_ids(dcgmGroup)
assert fieldIds is not None
watchFreq = 1000 #1 ms
maxKeepAge = 60.0
maxKeepSamples = 0
maxAgeUsec = int(maxKeepAge) * watchFreq
entityPairList = []
for gpuId in gpuIds:
entityPairList.append(dcgm_structs.c_dcgmGroupEntityPair_t(dcgm_fields.DCGM_FE_GPU, gpuId))
for fieldId in fieldIds:
dcgmGroup.profiling.WatchFields([fieldId, ], watchFreq, maxKeepAge, maxKeepSamples)
# Sending a request to the profiling manager guarantees that an update cycle has happened since
# the last request
dcgmGroup.profiling.GetSupportedMetricGroups()
# validate watch freq, quota, and watched flags
for gpuId in gpuIds:
cmfi = dcgm_agent_internal.dcgmGetCacheManagerFieldInfo(handle, gpuId, fieldId)
assert (cmfi.flags & dcgm_structs_internal.DCGM_CMI_F_WATCHED) != 0, "gpuId %u, fieldId %u not watched" % (gpuId, fieldId)
assert cmfi.numSamples > 0
assert cmfi.numWatchers == 1, "numWatchers %d" % cmfi.numWatchers
assert cmfi.monitorFrequencyUsec == watchFreq, "monitorFrequencyUsec %u != watchFreq %u" % (cmfi.monitorFrequencyUsec, watchFreq)
assert cmfi.lastStatus == dcgm_structs.DCGM_ST_OK, "lastStatus %u != DCGM_ST_OK" % (cmfi.lastStatus)
fieldValues = dcgm_agent.dcgmEntitiesGetLatestValues(handle, entityPairList, [fieldId, ], 0)
for i, fieldValue in enumerate(fieldValues):
logger.debug(str(fieldValue))
assert(fieldValue.status == dcgm_structs.DCGM_ST_OK), "idx %d status was %d" % (i, fieldValue.status)
assert(fieldValue.ts != 0), "idx %d timestamp was 0" % (i)
dcgmGroup.profiling.UnwatchFields()
#Validate watch flags after unwatch
for gpuId in gpuIds:
cmfi = dcgm_agent_internal.dcgmGetCacheManagerFieldInfo(handle, gpuId, fieldId)
assert (cmfi.flags & dcgm_structs_internal.DCGM_CMI_F_WATCHED) == 0, "gpuId %u, fieldId %u still watched. flags x%X" % (gpuId, fieldId, cmfi.flags)
assert cmfi.numWatchers == 0, "numWatchers %d" % cmfi.numWatchers
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
def test_dcgm_prof_watch_multipass(handle, gpuIds):
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetGroupWithGpuIds('mygroup', gpuIds)
helper_check_profiling_environment(dcgmGroup)
mpFieldIds = helper_get_multipass_field_ids(dcgmGroup)
if mpFieldIds is None:
test_utils.skip_test("No multipass profiling fields exist for the gpu group")
logger.info("Multipass fieldIds: " + str(mpFieldIds))
#Make sure that multipass watching up to DLG_MAX_METRIC_GROUPS groups works
for i in range(min(len(mpFieldIds), DLG_MAX_METRIC_GROUPS)):
fieldIds = []
for j in range(i+1):
fieldIds.extend(mpFieldIds[j])
logger.info("Positive testing multipass fieldIds %s" % str(fieldIds))
dcgmGroup.profiling.WatchFields(fieldIds, 1000000, 3600.0, 0)
dcgmGroup.profiling.UnwatchFields()
if len(mpFieldIds) <= DLG_MAX_METRIC_GROUPS:
test_utils.skip_test("Skipping multipass failure test since there are %d <= %d multipass groups." %
(len(mpFieldIds), DLG_MAX_METRIC_GROUPS))
for i in range(DLG_MAX_METRIC_GROUPS+1, len(mpFieldIds)+1):
fieldIds = []
for j in range(i):
fieldIds.extend(mpFieldIds[j])
logger.info("Negative testing multipass fieldIds %s" % str(fieldIds))
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_PROFILING_MULTI_PASS)):
dcgmGroup.profiling.WatchFields(fieldIds, 1000000, 3600.0, 0)
dcgmGroup.profiling.UnwatchFields()
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_dcgm_prof_unwatch_fields_sanity(handle, gpuIds):
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetGroupWithGpuIds('mygroup', gpuIds)
helper_check_profiling_environment(dcgmGroup)
unwatchFields = dcgmGroup.profiling.UnwatchFields()
assert unwatchFields.version == dcgm_structs.dcgmProfUnwatchFields_version1
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
def test_dcgm_prof_watch_fields_multi_user(handle, gpuIds):
dcgmHandle = pydcgm.DcgmHandle(ipAddress="127.0.0.1")
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetGroupWithGpuIds('mygroup', gpuIds)
helper_check_profiling_environment(dcgmGroup)
dcgmHandle2 = pydcgm.DcgmHandle(ipAddress="127.0.0.1")
dcgmSystem2 = dcgmHandle2.GetSystem()
dcgmGroup2 = dcgmSystem2.GetGroupWithGpuIds('mygroup2', gpuIds)
helper_check_profiling_environment(dcgmGroup)
fieldIds = helper_get_single_pass_field_ids(dcgmGroup)
assert fieldIds is not None
#Take ownership of the profiling watches
dcgmGroup.profiling.WatchFields(fieldIds, 1000000, 3600.0, 0)
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_IN_USE)):
dcgmGroup2.profiling.WatchFields(fieldIds, 1000000, 3600.0, 0)
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_IN_USE)):
dcgmGroup2.profiling.UnwatchFields()
#Release the watches
dcgmGroup.profiling.UnwatchFields()
#Now dcgmHandle2 owns the watches
dcgmGroup2.profiling.WatchFields(fieldIds, 1000000, 3600.0, 0)
#connection 1 should fail to acquire the watches
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_IN_USE)):
dcgmGroup.profiling.WatchFields(fieldIds, 1000000, 3600.0, 0)
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_IN_USE)):
dcgmGroup.profiling.UnwatchFields()
dcgmHandle.Shutdown()
dcgmHandle2.Shutdown()
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
def test_dcgm_prof_with_dcgmreader(handle, gpuIds):
"""
Verifies that we can access profiling data with DcgmReader, which is the
base class for dcgm exporters
"""
dcgmHandle = pydcgm.DcgmHandle(handle)
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetGroupWithGpuIds('mygroup', gpuIds)
helper_check_profiling_environment(dcgmGroup)
fieldIds = helper_get_single_pass_field_ids(dcgmGroup)
updateFrequencyUsec=10000
sleepTime = 2 * (updateFrequencyUsec / 1000000.0) #Sleep 2x the update freq so we get new values each time
dr = DcgmReader.DcgmReader(fieldIds=fieldIds, updateFrequency=updateFrequencyUsec, maxKeepAge=30.0, gpuIds=gpuIds)
dr.SetHandle(handle)
for i in range(10):
time.sleep(sleepTime)
latest = dr.GetLatestGpuValuesAsFieldIdDict()
logger.info(str(latest))
for gpuId in gpuIds:
assert len(latest[gpuId]) == len(fieldIds), "i=%d, gpuId %d, len %d != %d" % (i, gpuId, len(latest[gpuIds[i]]), len(fieldIds))
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
def test_dcgm_prof_initial_valid_record(handle, gpuIds):
'''
Test that we can retrieve a valid FV for a profiling field immediately after watching
'''
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetGroupWithGpuIds('mygroup', gpuIds)
helper_check_profiling_environment(dcgmGroup)
fieldIds = helper_get_single_pass_field_ids(dcgmGroup)
assert fieldIds is not None
#Set watches using a large interval so we don't get a record for 10 seconds in the bug case
dcgmGroup.profiling.WatchFields(fieldIds, 10000000, 3600.0, 0)
gpuId = gpuIds[0]
fieldValues = dcgm_agent.dcgmEntityGetLatestValues(handle, dcgm_fields.DCGM_FE_GPU, gpuId, fieldIds)
assert len(fieldValues) == len(fieldIds), "%d != %d" % (len(fieldValues), len(fieldIds))
for i, fieldValue in enumerate(fieldValues):
logger.info(str(fieldValue))
assert(fieldValue.version != 0), "idx %d Version was 0" % i
assert(fieldValue.fieldId == fieldIds[i]), "idx %d fieldValue.fieldId %d != fieldIds[i] %d" % (i, fieldValue.fieldId, fieldIds[i])
assert(fieldValue.status == dcgm_structs.DCGM_ST_OK), "idx %d status was %d" % (i, fieldValue.status)
#The following line catches the bug in Jira DCGM-1357. Previously, a record would be returned with a
#0 timestamp
assert(fieldValue.ts != 0), "idx %d timestamp was 0" % i
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_dcgm_prof_multi_pause_resume(handle, gpuIds):
'''
Test that we can pause and resume profiling over and over without error
'''
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetGroupWithGpuIds('mygroup', gpuIds)
helper_check_profiling_environment(dcgmGroup)
#We should never get an error | |
numpy.min(self.image)
zmax = numpy.max(self.image)
general_utilities.put_value(zmin, self.minField)
general_utilities.put_value(zmax, self.maxField)
zmin1, zmax1 = self.get_limits(self.image)
general_utilities.put_value(zmin1, self.zsminField)
general_utilities.put_value(zmax1, self.zsmaxField)
self.displayImage()
def imageExit(self, window):
"""
Close a Tkinter window.
This routine closes the window for the image display (or
whichever top level window variable is passed into the routine).
Parameters
----------
window : A tkinter Toplevel variable (or equivalent), the window
to be closed.
No values are returned by this routine.
"""
window.destroy()
def keyPress(self, event):
"""
Routine for applying imaging key press events.
Holder routine for key press events in the image window. Sets the
image position.
"""
if (event.xdata is None) or (event.ydata is None):
return
xpixel = int(self.zoom[1]+event.xdata+0.5)
ypixel = int(self.zoom[2]+event.ydata+0.5)
if (xpixel is None) or (ypixel is None):
return
self.xposition = self.zoom[1]+event.xdata
self.yposition = self.zoom[2]+event.ydata
sh1 = self.image.shape
if event.key == 'l':
yvalues = numpy.squeeze(self.image[ypixel, :])
xvalues = numpy.arange(sh1[0])+1
self.plotxy(xvalues, yvalues, colour='blue', symb=None,
xlabel='Column (Pixels)', ylabel='Pixel Value',
title='Line %d' % (ypixel))
return
if event.key == 'c':
yvalues = numpy.squeeze(self.image[:, xpixel])
xvalues = numpy.arange(sh1[1])+1
self.plotxy(xvalues, yvalues, colour='blue', symb=None,
xlabel='Line (Pixels)', ylabel='Pixel Value',
title='Column %d' % (xpixel))
return
if event.key == 'j':
xstart = xpixel - self.segment
if xstart < 0:
xstart = 0
xend = xstart + self.segment+self.segment + 2
if xend > sh1[1]:
xend = sh1[1]
xstart = xend - self.segment-self.segment - 2
ystart = ypixel - self.cross
if ystart < 0:
ystart = 0
yend = ystart + self.cross + self.cross + 2
if yend > sh1[0]:
yend = sh1[0]
ystart = yend - self.cross - self.cross - 2
subim = numpy.copy(self.image[ystart:yend, xstart:xend])
yvalues = numpy.mean(subim, axis=0)
xvalues = numpy.arange(len(yvalues))+xstart
tstring = 'Mean of columns (y): %d:%d' % (ystart, yend)
self.plotxy(xvalues, yvalues, symbol=None, colour='blue',
xlabel='x pixel position', ylabel='Mean Signal',
title=tstring)
return
if event.key == 'k':
ystart = ypixel-self.segment
if ystart < 0:
ystart = 0
yend = ystart + self.segment + self.segment + 2
if yend > sh1[0]:
yend = sh1[0]
ystart = yend - self.segment - self.segment - 2
xstart = xpixel-2
if xstart < 0:
xstart = 0
xend = xstart + self.cross + self.cross + 2
if xend >= sh1[1]:
xend = sh1[1]
xstart = xend - self.cross - self.cross - 2
subim = numpy.copy(self.image[ystart:yend, xstart:xend])
yvalues = numpy.mean(subim, axis=1)
xvalues = numpy.arange(len(yvalues))+ystart
tstring = 'Mean of rows (x) %d:%d' % (ystart, yend)
self.plotxy(xvalues, yvalues, symbol='-', colour='blue',
xlabel='y pixel position', ylabel='Signal (ADU/s)',
title=tstring)
return
if event.key == 'r':
tstring = 'Radial profile at (%.3f %.3f)' % (
event.xdata, event.ydata)
self.plot_radial_profile(event.xdata, event.ydata,
xlabel='Radius (pixels)',
ylabel='Signal', title=tstring)
return
if event.key == 's':
self.plot_surface_fit()
return
if event.key == 'h':
self.show_key_help()
return
# all other keys move the zoom window to be centred on the position
xmin, ymin = self.zoom_corner(sh1, self.zoom[0], self.xposition,
self.yposition)
self.zoom[1] = xmin
self.zoom[2] = ymin
self.displayImage()
return
def buttonPress(self, event):
"""
Routine for applying imaging button press events.
Holder routine for button press events in the image window.
Not currently active.
"""
return
def buttonRelease(self, event):
"""
Routine for applying imaging button release events.
Holder routine for button release events in the image window.
"""
if (event.xdata is None) or (event.ydata is None):
return
sh1 = self.image.shape
xpixel = int(self.zoom[1]+event.xdata+0.5)
ypixel = int(self.zoom[2]+event.ydata+0.5)
if (xpixel is None) or (ypixel is None):
return
self.xposition = self.zoom[1]+event.xdata
self.yposition = self.zoom[2]+event.ydata
xmin, ymin = self.zoom_corner(sh1, self.zoom[0], self.xposition,
self.yposition)
self.zoom[1] = xmin
self.zoom[2] = ymin
self.displayImage()
return
def setPlotPosition(self, event):
"""
Post the image position to the information line on the image display.
Routine to post the image position and the image value (if possible)
to the text area above the image display.
Parameters
----------
event : a motion-notify event from the image display window
Returns
-------
No values are returned by this routine.
"""
try:
event.canvas.get_tk_widget().focus_set()
x1 = int(self.zoom[1]+event.xdata+0.5)
y1 = int(self.zoom[2]+event.ydata+0.5)
try:
value = '%.6g' % (self.image[y1, x1])
except ValueError:
value = ' '
s1 = "Position: x = %.2f y = %.2f Value: %s" % (x1, y1, value)
self.imagePosLabelText.set(s1)
self.imagexpos = event.xdata
self.imageypos = event.ydata
except Exception:
pass
def plotxy(self, xvalues, yvalues, **parameters):
BGCOL = '#F8F8FF'
if self.image is None:
return
try:
plotwindow = Tk.Toplevel()
plotwindow.config(bg=BGCOL)
self.plotLabelText = Tk.StringVar()
self.plotLabel = Tk.Label(
plotwindow, textvariable=self.plotLabelText,
anchor=Tk.N, width=70)
self.plotLabel.pack()
self.plotLabelText.set("Value:")
self.p4 = Figure(figsize=(6, 6), dpi=100)
sp1 = self.p4.add_subplot(1, 1, 1)
c1 = FigureCanvasTkAgg(self.p4, master=plotwindow)
c1.mpl_connect("motion_notify_event", self.plotPosition)
symbol = parameters.get('symb')
if symbol is None:
symbol='-'
colour = parameters.get('colour')
if colour is None:
colour = 'blue'
sp1.plot(xvalues, yvalues, symbol, color=colour)
sp1.set_xlabel(parameters.get('xlabel'))
sp1.set_ylabel(parameters.get('ylabel'))
sp1.set_title(parameters.get('title'))
c1.draw()
c1.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=Tk.YES)
h1 = Tk.Frame(plotwindow)
h1.pack(side=Tk.TOP)
h1.config(bg=BGCOL)
button = Tk.Button(
h1, text="Save values",
command=lambda: general_utilities.save_data_set_values(
valuesx, yvalues, outstring))
button.pack(side=Tk.LEFT)
button.config(bg=BGCOL)
button = Tk.Button(
h1, text="Save as PS",
command=lambda: general_utilities.save_ps_figure(self.p4))
button.pack(side=Tk.LEFT)
button.config(bg=BGCOL)
button = Tk.Button(
h1, text="Save as PNG",
command=lambda: general_utilities.save_png_figure(self.p4))
button.pack(side=Tk.LEFT)
button.config(bg=BGCOL)
button = Tk.Button(h1, text="Close",
command=plotwindow.destroy)
button.pack()
button.config(bg=BGCOL)
except Exception:
pass
def plot_radial_profile(self, xposition, yposition, **parameters):
BGCOL = '#F8F8FF'
if self.image is None:
return
try:
xvalues, yvalues, yerror = self.radial_profile(
self.image, 1.0, 10., centre=[xposition, yposition])
profilewindow = Tk.Toplevel()
profilewindow.config(bg=BGCOL)
self.profileLabelText = Tk.StringVar()
self.profileLabel = Tk.Label(
profilewindow, textvariable=self.profileLabelText,
anchor=Tk.N, width=70)
self.profileLabel.pack()
self.profileLabelText.set("Value:")
self.p5 = Figure(figsize=(6, 6), dpi=100)
sp1 = self.p5.add_subplot(1, 1, 1)
c1 = FigureCanvasTkAgg(self.p5, master=profilewindow)
c1.mpl_connect("motion_notify_event", self.profilePosition)
symbol = parameters.get('symb')
if symbol is None:
symbol='-'
colour = parameters.get('colour')
if colour is None:
colour = 'blue'
sp1.plot(xvalues, yvalues, symbol, color=colour)
sp1.set_xlabel(parameters.get('xlabel'))
sp1.set_ylabel(parameters.get('ylabel'))
sp1.set_title(parameters.get('title'))
c1.draw()
c1.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=Tk.YES)
h1 = Tk.Frame(profilewindow)
h1.pack(side=Tk.TOP)
h1.config(bg=BGCOL)
button = Tk.Button(
h1, text="Save values",
command=lambda: general_utilities.save_data_set_values(
valuesx, yvalues, outstring))
button.pack(side=Tk.LEFT)
button.config(bg=BGCOL)
button = Tk.Button(
h1, text="Save as PS",
command=lambda: general_utilities.save_ps_figure(self.p5))
button.pack(side=Tk.LEFT)
button.config(bg=BGCOL)
button = Tk.Button(
h1, text="Save as PNG",
command=lambda: general_utilities.save_png_figure(self.p5))
button.pack(side=Tk.LEFT)
button.config(bg=BGCOL)
button = Tk.Button(h1, text="Close",
command=profilewindow.destroy)
button.pack()
button.config(bg=BGCOL)
except Exception:
pass
def radial_profile(self, array, rstep=0.5, rmax=0.0, centre=None, error=None):
"""
This routine calculates the radial profile of values for an image. The
image is passed as the first argument. The centre pixel is used for the
centre point of the calculations unless a value is passed to the routine.
Parameters
----------
array : A two-dimensional numpy image (assumed to be float values).
rstep : Optional floating point step value in pixels for the encircled
energy function.
rmax : Optional floating point value, the maximum radius in pixels for
the encircled energy function. If no value is given, the
distance from the centre position to the nearest image edge
is used.
centre : Optional two-element list of the [x, y] values of the centre from
which the radius is calculated. It is assumed to be two float
values, x and then y. If not provided, the image centre is used.
error : An optional two-dimensional numpy image (assumed to be float
values) of the uncertainties per pixel; if nothing is passed,
the uncertainties are all set to zero. The errors must all be
positive.
Returns
-------
radius : A one-dimensional numpy float array of radius values in pixels.
signal : A one-dimensional numpy float array of the signal values
signal_error : A one-dimensional numpy float array of the uncertainties
in the signal values
"""
shape = array.shape
if len(shape) != 2:
print('Error: input image needs to be two dimensional.')
return None, None, None
if error is None:
uncertainties = array*0.
else:
uncertainties = numpy.copy(error)
if uncertainites.shape != shape:
print('Error: the uncertainty array is not the same shape as the ')
print(' signal array, setting to zero.')
uncertainties = array*0.
# The following assumes that an integer value corresponds to the pixel
# centre, as opposed to the IRAF convention that a value of 0.5 denotes
# the pixel centre
if centre is None:
centre = ((shape[1]-1)/2., (shape[0]-1)/2.)
if rmax <= 0.:
rmax = max(centre[0], abs(centre[0] - shape[1]),
centre[1], abs(centre[1] - shape[0]))
if rmax <= rstep:
print('Error: maxumum radius value less than the step value.')
return None, None, None
nrad = int(rmax/rstep) + 2
rout = numpy.zeros((nrad), dtype=numpy.float32)
signal = numpy.zeros((nrad), dtype=numpy.float32)
signal_error = numpy.zeros((nrad), dtype=numpy.float32)
nout = | |
Orientation.Quaternion2OrientationMatrix(q)
o = Orientation(g)
return o
@staticmethod
def Zrot2OrientationMatrix(x1=None, x2=None, x3=None):
"""Compute the orientation matrix from the rotated coordinates given
in the .inp file for Zebulon's computations.
The function needs two of the three base vectors, the third one is
computed using a cross product.
.. note::
Still need some tests to validate this function.
:param x1: the first basis vector.
:param x2: the second basis vector.
:param x3: the third basis vector.
:return: the corresponding 3x3 orientation matrix.
"""
if x1 is None and x2 is None:
raise NameError('Need at least two vectors to compute the matrix')
elif x1 is None and x3 is None:
raise NameError('Need at least two vectors to compute the matrix')
elif x3 is None and x2 is None:
raise NameError('Need at least two vectors to compute the matrix')
if x1 is None:
x1 = np.cross(x2, x3)
elif x2 is None:
x2 = np.cross(x3, x1)
elif x3 is None:
x3 = np.cross(x1, x2)
x1 = x1 / np.linalg.norm(x1)
x2 = x2 / np.linalg.norm(x2)
x3 = x3 / np.linalg.norm(x3)
g = np.array([x1, x2, x3]).transpose()
return g
@staticmethod
def OrientationMatrix2EulerSF(g):
"""
Compute the Euler angles (in degrees) from the orientation matrix
in a similar way as done in Mandel_crystal.c
"""
tol = 0.1
r = np.zeros(9, dtype=np.float64) # double precision here
# Z-set order for tensor is 11 22 33 12 23 13 21 32 31
r[0] = g[0, 0]
r[1] = g[1, 1]
r[2] = g[2, 2]
r[3] = g[0, 1]
r[4] = g[1, 2]
r[5] = g[0, 2]
r[6] = g[1, 0]
r[7] = g[2, 1]
r[8] = g[2, 0]
phi = np.arccos(r[2])
if phi == 0.:
phi2 = 0.
phi1 = np.arcsin(r[6])
if abs(np.cos(phi1) - r[0]) > tol:
phi1 = np.pi - phi1
else:
x2 = r[5] / np.sin(phi)
x1 = r[8] / np.sin(phi);
if x1 > 1.:
x1 = 1.
if x2 > 1.:
x2 = 1.
if x1 < -1.:
x1 = -1.
if x2 < -1.:
x2 = -1.
phi2 = np.arcsin(x2)
phi1 = np.arcsin(x1)
if abs(np.cos(phi2) * np.sin(phi) - r[7]) > tol:
phi2 = np.pi - phi2
if abs(np.cos(phi1) * np.sin(phi) + r[4]) > tol:
phi1 = np.pi - phi1
return np.degrees(np.array([phi1, phi, phi2]))
@staticmethod
def OrientationMatrix2Euler(g):
"""
Compute the Euler angles from the orientation matrix.
This conversion follows the paper of Rowenhorst et al. :cite:`Rowenhorst2015`.
In particular when :math:`g_{33} = 1` within the machine precision,
there is no way to determine the values of :math:`\phi_1` and :math:`\phi_2`
(only their sum is defined). The convention is to attribute
the entire angle to :math:`\phi_1` and set :math:`\phi_2` to zero.
:param g: The 3x3 orientation matrix
:return: The 3 euler angles in degrees.
"""
eps = np.finfo('float').eps
(phi1, Phi, phi2) = (0.0, 0.0, 0.0)
# treat special case where g[2, 2] = 1
if np.abs(g[2, 2]) >= 1 - eps:
if g[2, 2] > 0.0:
phi1 = np.arctan2(g[0][1], g[0][0])
else:
phi1 = -np.arctan2(-g[0][1], g[0][0])
Phi = np.pi
else:
Phi = np.arccos(g[2][2])
zeta = 1.0 / np.sqrt(1.0 - g[2][2] ** 2)
phi1 = np.arctan2(g[2][0] * zeta, -g[2][1] * zeta)
phi2 = np.arctan2(g[0][2] * zeta, g[1][2] * zeta)
# ensure angles are in the range [0, 2*pi]
if phi1 < 0.0:
phi1 += 2 * np.pi
if Phi < 0.0:
Phi += 2 * np.pi
if phi2 < 0.0:
phi2 += 2 * np.pi
return np.degrees([phi1, Phi, phi2])
@staticmethod
def OrientationMatrix2Rodrigues(g):
"""
Compute the rodrigues vector from the orientation matrix.
:param g: The 3x3 orientation matrix representing the rotation.
:returns: The Rodrigues vector as a 3 components array.
"""
t = g.trace() + 1
if np.abs(t) < np.finfo(g.dtype).eps:
print('warning, returning [0., 0., 0.], consider using axis, angle '
'representation instead')
return np.zeros(3)
else:
r1 = (g[1, 2] - g[2, 1]) / t
r2 = (g[2, 0] - g[0, 2]) / t
r3 = (g[0, 1] - g[1, 0]) / t
return np.array([r1, r2, r3])
@staticmethod
def OrientationMatrix2Quaternion(g, P=1):
q0 = 0.5 * np.sqrt(1 + g[0, 0] + g[1, 1] + g[2, 2])
q1 = P * 0.5 * np.sqrt(1 + g[0, 0] - g[1, 1] - g[2, 2])
q2 = P * 0.5 * np.sqrt(1 - g[0, 0] + g[1, 1] - g[2, 2])
q3 = P * 0.5 * np.sqrt(1 - g[0, 0] - g[1, 1] + g[2, 2])
if g[2, 1] < g[1, 2]:
q1 = q1 * -1
elif g[0, 2] < g[2, 0]:
q2 = q2 * -1
elif g[1, 0] < g[0, 1]:
q3 = q3 * -1
q = Quaternion(np.array([q0, q1, q2, q3]), convention=P)
return q.quat
@staticmethod
def Rodrigues2OrientationMatrix(rod):
"""
Compute the orientation matrix from the Rodrigues vector.
:param rod: The Rodrigues vector as a 3 components array.
:returns: The 3x3 orientation matrix representing the rotation.
"""
r = np.linalg.norm(rod)
I = np.diagflat(np.ones(3))
if r < np.finfo(r.dtype).eps:
# the rodrigues vector is zero, return the identity matrix
return I
theta = 2 * np.arctan(r)
n = rod / r
omega = np.array([[0.0, n[2], -n[1]],
[-n[2], 0.0, n[0]],
[n[1], -n[0], 0.0]])
g = I + np.sin(theta) * omega + (1 - np.cos(theta)) * omega.dot(omega)
return g
@staticmethod
def Rodrigues2Axis(rod):
"""
Compute the axis/angle representation from the Rodrigues vector.
:param rod: The Rodrigues vector as a 3 components array.
:returns: A tuple in the (axis, angle) form.
"""
r = np.linalg.norm(rod)
axis = rod / r
angle = 2 * np.arctan(r)
return axis, angle
@staticmethod
def Axis2OrientationMatrix(axis, angle):
"""
Compute the (passive) orientation matrix associated the rotation defined by the given (axis, angle) pair.
:param axis: the rotation axis.
:param angle: the rotation angle (degrees).
:returns: the 3x3 orientation matrix.
"""
omega = np.radians(angle)
c = np.cos(omega)
s = np.sin(omega)
g = np.array([[c + (1 - c) * axis[0] ** 2,
(1 - c) * axis[0] * axis[1] + s * axis[2],
(1 - c) * axis[0] * axis[2] - s * axis[1]],
[(1 - c) * axis[0] * axis[1] - s * axis[2],
c + (1 - c) * axis[1] ** 2,
(1 - c) * axis[1] * axis[2] + s * axis[0]],
[(1 - c) * axis[0] * axis[2] + s * axis[1],
(1 - c) * axis[1] * axis[2] - s * axis[0],
c + (1 - c) * axis[2] ** 2]])
return g
@staticmethod
def Euler2Axis(euler):
"""Compute the (axis, angle) representation associated to this (passive)
rotation expressed by the Euler angles.
:param euler: 3 euler angles (in degrees).
:returns: a tuple containing the axis (a vector) and the angle (in radians).
"""
(phi1, Phi, phi2) = np.radians(euler)
t = np.tan(0.5 * Phi)
s = 0.5 * (phi1 + phi2)
d = 0.5 * (phi1 - phi2)
tau = np.sqrt(t ** 2 + np.sin(s) ** 2)
alpha = 2 * np.arctan2(tau, np.cos(s))
if alpha > np.pi:
axis = np.array([-t / tau * np.cos(d), -t / tau * np.sin(d), -1 / tau * np.sin(s)])
angle = 2 * np.pi - alpha
else:
axis = np.array([t / tau * np.cos(d), t / tau * np.sin(d), 1 / tau * np.sin(s)])
angle = alpha
return axis, angle
@staticmethod
def Euler2Quaternion(euler, P=1):
"""Compute the quaternion from the 3 euler angles (in degrees).
:param tuple euler: the 3 euler angles in degrees.
:param int P: +1 to compute an active quaternion (default), -1 for a passive quaternion.
:return: a `Quaternion` instance representing the rotation.
"""
(phi1, Phi, phi2) = np.radians(euler)
q0 = np.cos(0.5 * (phi1 + phi2)) * np.cos(0.5 * Phi)
q1 = np.cos(0.5 * (phi1 - phi2)) * np.sin(0.5 * Phi)
q2 = np.sin(0.5 * (phi1 - phi2)) * np.sin(0.5 * Phi)
q3 = np.sin(0.5 * (phi1 + phi2)) * np.cos(0.5 * Phi)
q = Quaternion(np.array([q0, -P * q1, -P * q2, -P * q3]), convention=P)
if q0 < 0:
# the scalar part must be positive
q.quat = q.quat * | |
<reponame>kkiningh/cs448h-project
from __future__ import print_function
from __future__ import division
import sys
import numpy as np
import matplotlib.pyplot as plt
import cvxpy as cvx
import hypergraph
import hypergraph.core
from hypergraph.core import Graph, Edge
from hypergraph.convert.nx import networkx_export
import networkx
from scipy.stats import rankdata
def _bbox_default(bbox=None):
# Default to placing on 11x11 area
return np.array([10, 10]) if bbox is None else bbox
def random_circuit(n_vertices=100, seed=None, directed=True):
rand = np.random.RandomState(seed)
# Create the graph
vertices = np.arange(n_vertices)
graph = Graph(vertices=vertices, directed=directed)
# Starting from the root vertex, create a random "circuit like" graph
for i, v in enumerate(vertices):
# Randomly sample vertices
if i < n_vertices - 1:
sample = rand.choice(vertices[i+1:], rand.randint(1, 4))
else:
sample = rand.choice(vertices[:i], 2)
# Add the edge to the graph
edge = Edge(np.hstack([v, sample]), head=v if directed else None)
graph.add_edge(edge)
return graph
def random_placement(graph, bbox=None, fixed_placement={}, seed=None, best_of=1):
bbox = _bbox_default(bbox)
rand = np.random.RandomState(seed)
cost = float('inf')
placement = None
for _ in range(best_of):
# The placement is a matrix with rows representing x, y of node
new_placement = rand.random_sample((len(graph.vertices), 2)) * bbox
# Fix the positions for the fixed nodes
for v, pos in fixed_placement.viewitems():
new_placement[v] = pos
# check if we want to switch
new_cost = cost_HPWL(graph, new_placement)
if new_cost < cost:
cost = new_cost
placement = new_placement
return placement
def random_fixed(graph, bbox=None, n_fixed=4, seed=None):
"""Randomly create a fixed placement for some nodes in the graph"""
bbox = _bbox_default(bbox)
rand = np.random.RandomState(seed)
assert n_fixed >= 2, "Number of fixed nodes must be >= 2"
# Number of nodes to choose from
N = len(graph.vertices) - 1
# fixed_placement is a dict from node -> [x, y]
fixed_placement = {}
# Pick a couple nodes from the center and fix them
for _ in range(n_fixed - 2):
fixed_placement[rand.randint(1, N)] = (
[rand.randint(0, bbox[0] + 1), rand.randint(0, bbox[1] + 1)])
# Always pick the first and last node and place them on the boundry
fixed_placement[0] = [0, rand.randint(bbox[1] + 1)]
fixed_placement[N] = [bbox[0], rand.randint(bbox[1] + 1)]
return fixed_placement
def convex_overlap_constraints(graph, x, y, bbox=None, fixed_placement={}):
bbox = _bbox_default(bbox)
# Non-overlap constraints
z = cvx.Int(len(graph.vertices), 4 * len(graph.vertices))
overlap_constraints = [1 >= z, z >= 0]
for a in graph.vertices:
for b in range(a+1, len(graph.vertices)):
# if a == b, we don't need an overlap constraint
if a == b:
continue
# if a and b are fixed, we don't need any more constraints
if a in fixed_placement.keys() and b in fixed_placement.keys():
print("skipping {} and {}".format(a, b))
continue
# Here, there are 4 possibilities between each node A and B
#
# 1) A left of B => x[B] - x[A] + M * z'[A, 4 * B + 0] >= 1
# 2) A right of B => x[A] - x[B] + M * z'[A, 4 * B + 1] >= 1
# 4) A below B => y[B] - y[A] + M * z'[A, 4 * B + 2] >= 1
# 3) A above B => y[A] - y[B] + M * z'[A, 4 * B + 3] >= 1
#
# Where M and N are values big enough to make the constraint
# non-active when z is 1.
overlap_constraints.extend(
[x[b] - x[a] + (bbox[0] + 1) * (1 - z[a, 4 * b + 0]) >= 1,
x[a] - x[b] + (bbox[0] + 1) * (1 - z[a, 4 * b + 1]) >= 1])
# y[b] - y[a] + (bbox[1] + 1) * (1 - z[a, 4 * b + 2]) >= 1,
# y[a] - y[b] + (bbox[1] + 1) * (1 - z[a, 4 * b + 3]) >= 1])
print("x[{0}] - x[{1}] + {2} * (1 - z[{1}, {3}]) >= 1".format(
b, a, bbox[0] + 1, 4 * b + 0))
print("x[{1}] - x[{0}] + {2} * (1 - z[{1}, {3}]) >= 1".format(
b, a, bbox[0] + 1, 4 * b + 1))
# Note that z is zero for the active constraint and one otherwise.
# i.e. only one of z[A, 4 * B + 0,1,2,3] may be active (zero).
# To force this, we require that the sum is equal to one and that Z is a bool
overlap_constraints.extend(
[z[a, 4 * b + 0] +
z[a, 4 * b + 1] == 1])
print("z[{0}, {1}] + z[{0}, {2}] == 1".format(a, 4 * b + 0, 4 * b + 1))
# overlap_constraints.extend(
# [z[a, 4 * b + 0] +
# z[a, 4 * b + 1] +
# z[a, 4 * b + 2] +
# z[a, 4 * b + 3] == 1])
return z, overlap_constraints
def convex_placement_problem(graph, bbox=None, fixed_placement={}, ranks=None):
"""Construct a convex problem for the given graph"""
bbox = _bbox_default(bbox)
# Weight of each edge (i.e. how important it is)
w = cvx.Parameter(len(graph.edges), sign="Positive")
# Set the weights to be the weights specified in the graph
# We return w so that this can be overriden later by the caller
w.value = np.array([graph.weights[edge] for edge in graph.edges])
# Positions for each node
x = cvx.Int(len(graph.vertices))
y = cvx.Int(len(graph.vertices))
# Iterate over each edge and calculate that edge's cost
# Split the x and y costs since they can be optimized seperately
xcost = 0
ycost = 0
cost = 0
for i, edge in enumerate(graph.edges):
# Convert the edge to a list of indices that correspond to the vertices
# connected by the edge
vs = list(edge)
# Compute the HPWL cost for this edge
xcost += w[i] * cvx.max_entries(x[vs]) - cvx.min_entries(x[vs])
ycost += w[i] * cvx.max_entries(y[vs]) - cvx.min_entries(y[vs])
# head = edge.head
# for v in edge.tail:
# cost += w[i] * cvx.norm2(cvx.hstack(x[head], y[head]) - cvx.hstack(x[v], y[v])) ** 2
cost = xcost + ycost
# Constrain the coords to the specified bounding box
bbox_constraints_x = [bbox[0] >= x, x >= 0]
bbox_constraints_y = [bbox[1] >= y, y >= 0]
# Constrain the coords to the specified fixed placement
fixed_constraints_x = [x[v] == pos[0]
for (v, pos) in fixed_placement.viewitems()]
fixed_constraints_y = [y[v] == pos[1]
for (v, pos) in fixed_placement.viewitems()]
# Overlap constraints
# z, overlap_constraints = convex_overlap_constraints(graph, x, y, bbox, fixed_placement)
z = None; overlap_constraints = []
if ranks is not None:
for x_rank, x_rank_next in zip(ranks[0:], ranks[:-1]):
overlap_constraints += [x[x_rank] <= x[x_rank_next]]
for y_rank, y_rank_next in zip(ranks[0:], ranks[:-1]):
overlap_constraints += [y[y_rank] <= y[y_rank_next]]
problem = cvx.Problem(cvx.Minimize(cost),
bbox_constraints_x + fixed_constraints_x
+ bbox_constraints_y + fixed_constraints_y
+ overlap_constraints)
return w, x, y, z, problem
def heruistic_initializer(graph, fixed_placement={}):
# Convert the graph to a networkx graph since hypergraph is shitty for this
nx = networkx_export(graph)
# Make it undirected
nx = nx.to_undirected()
# change the weights to be 1/w
for _, _, d in nx.edges_iter(data=True):
d['weight'] = 1 / d['weight']
# Add edges between the fixed nodes
for h in fixed_placement.keys():
for t in fixed_placement.keys():
if h != t:
nx.add_edge(h, t, weight=np.sum(np.abs(
np.array(fixed_placement[h]) - np.array(fixed_placement[t]))))
# Find the path lenght between all pairs
paths = networkx.all_pairs_dijkstra_path_length(nx, weight='weight')
# Convert the paths into a matrix of distances
N = len(graph.vertices)
dists = np.array([v for row in paths.values() for v in row.values()]).reshape(N, N)
# Do least squares
x = np.zeros(N)
y = np.zeros(N)
for n in range(N):
A = 2 * np.array([[fixed_placement[0][0] - pos[0], fixed_placement[0][1] - pos[1]]
for pos in fixed_placement.values()])[1:]
b = np.array([pos[0] ** 2 + pos[1] ** 2
- fixed_placement[0][0] ** 2 - fixed_placement[0][1] ** 2
+ dists[n][0] ** 2 - dists[n][v] ** 2
for v, pos in fixed_placement.items()])[1:]
(x[n], y[n]), _, _, _ = np.linalg.lstsq(A, b)
# Round to the nearest 1
x = np.round(x)
y = np.round(y)
# Rank in the x and y direction
# Breaks ties randomly
rank_x = argsort(x)
rank_y = argsort(y)
return x, y
def simulated_anneal_convex(graph, bbox=None, fixed_placement={}, initial_ranks=None):
if initial_ranks is None:
initial_ranks = np.vstack(heruistic_initializer(graph, fixed_placement))
# Constant temp
t = 1
non_fixed = [v for v in graph.vertices if v not in fixed_placement.keys()]
ranks = np.copy(initial_ranks)
sample_cost = float('inf')
| |
<filename>src/business/models.py
from django.conf import settings
from fractions import Fraction
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth import get_user_model
from django.contrib.auth import models as auth_models
from django.core.urlresolvers import reverse
from django.db.models import *
from django.db import models as models
from django.utils.translation import ugettext_lazy as _
from django_extensions.db import fields as extension_fields
from django_extensions.db.fields import AutoSlugField
from timezone_field import TimeZoneField
import uuid
from decimal import *
from pyPrintful import pyPrintful
from django.core.exceptions import ObjectDoesNotExist
from business.helper_backend import *
from storemanager.logger import *
logger = StyleAdapter(logging.getLogger("project"))
class commonBusinessModel(models.Model):
id = UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
date_added = DateTimeField(auto_now_add=True, verbose_name=_("Added"))
date_updated = DateTimeField(auto_now=True, verbose_name=_("Updated"))
class Meta:
abstract = True
# Primarily Store App Related
class bzBrand(commonBusinessModel):
# Fields
code = CharField(_("Code"), max_length=2)
name = CharField(_("Name"), max_length=255,
default="", blank=True, null=True)
# Relationship Fields
vendor = ForeignKey('business.pfStore', blank=True, null=True, )
outlet = ForeignKey('business.wooStore', blank=True, null=True,)
class Meta:
ordering = ('code',)
verbose_name = _("Brand")
verbose_name_plural = _("Brands")
def __str__(self):
if self.code and self.name:
return "{} - {}".format(self.code, self.name)
elif self.name:
return "{}".format(self.name)
return _("Unknown Brand")
def get_absolute_url(self):
return reverse('business:app_store_brand_detail', args=(self.pk,))
def get_update_url(self):
return reverse('business:app_store_brand_update', args=(self.pk,))
class pfStore(commonBusinessModel):
# Fields
code = CharField(_("Code"), max_length=50,
default="", blank=True, null=True)
name = CharField(_("Name"), max_length=255,
default="", blank=True, null=True)
pid = IntegerField(_("Printful ID"), default=0)
website = CharField(_("Website"), max_length=255,
default="", blank=True, null=True)
created = CharField(_("Created"), max_length=255,
default="", blank=True, null=True)
key = CharField(_("API Key"), max_length=64, default="", blank=True)
return_address = ForeignKey("business.pfAddress", verbose_name=_(
"Return Address"), related_name="returnaddress", blank=True, null=True)
billing_address = ForeignKey("business.pfAddress", verbose_name=_(
"Billing Address"), related_name="billingaddress", blank=True, null=True)
payment_type = CharField(_("Payment Card Type"),
max_length=64, default="", blank=True, null=True)
payment_number_mask = CharField(
_("Payment Card Type"), max_length=64, default="", blank=True, null=True)
payment_expires = CharField(
_("Payment Card Type"), max_length=64, default="", blank=True, null=True)
packingslip_email = EmailField(
_("Packing Slip Email"), default="", blank=True, null=True)
packingslip_phone = CharField(
_("Packing Slip Phone"), max_length=64, default="", blank=True, null=True)
packingslip_message = CharField(
_("Packing Slip Message"), max_length=255, default="", blank=True, null=True)
class Meta:
ordering = ('-created',)
verbose_name = _("Printful Store")
verbose_name_plural = _("Printful Stores")
def __str__(self):
rv = []
if self.code and self.name:
return "{} - {}".format(self.code, self.name)
elif self.code:
return "{}".format(self.code)
return _("Unknown Store")
def get_absolute_url(self):
return reverse('business:app_store_pf_detail', args=(self.pk,))
def get_update_url(self):
return reverse('business:app_store_pf_update', args=(self.pk,))
def has_auth(self):
return True if self.key else False
has_auth.short_description = _("Auth?")
has_auth.boolean = True
def save(self, *args, **kwargs):
logger.debug('Method: pfStore.save() Called')
if self.pid == 0:
if pfCountry.objects.all().count() == 0:
pfCountry.api_pull(key=self.key)
self.api_pull()
self.api_push()
self.api_pull()
super(pfStore, self).save(*args, **kwargs)
@staticmethod
def get_store(store=None):
"""
Gets a 'default' Printful store, generally for use with the Printful API
methods on other related objects. If a store is provided, then it is
validated and returned. Otherwise, this method will attempt to grab the
first Printful store object in the database and return that.
If no stores are in the database, then this method will raise an exception.
The wrapping method will need to catch this and respond appropriately.
:param store: Optional. pfStore object. Will validate that it is a valid
pfStore object and return it back.
"""
if type(store) is pfStore and store.has_auth():
return store
else:
store = pfStore.objects.exclude(
key__isnull=True).exclude(key__exact='').first()
if store:
return store
raise ObjectDoesNotExist(
"Either provide a store object or add at least one pfStore with an API key to the database.")
def api_pull(self):
"""
Update current store with data from Printful API.
"""
if not self.has_auth():
raise Exception("This store is missing the API Key.")
# TODO Handle states/countries lookup Exceptions
api = pyPrintful(key=self.key)
sData = api.get_store_info()
print(sData)
print(api._store['last_response_raw'])
self.website = sData['website']
self.name = sData['name']
self.pid = sData['id']
self.created = sData['created']
self.packingslip_phone = sData['packing_slip']['phone']
self.packingslip_email = sData['packing_slip']['email']
self.packingslip_message = sData['packing_slip']['message']
self.payment_type = sData['payment_card']['type']
self.payment_number_mask = sData['payment_card']['number_mask']
self.payment_expires = sData['payment_card']['expires']
if sData['billing_address']:
_state = pfState.objects.get(
code=sData['billing_address']['state_code'])
_country = pfCountry.objects.get(
code=sData['billing_address']['country_code'])
self.billing_address, created = pfAddress.objects.update_or_create(
name=sData['billing_address']['name'],
company=sData['billing_address']['company'],
address1=sData['billing_address']['address1'],
address2=sData['billing_address']['address2'],
city=sData['billing_address']['city'],
zip=sData['billing_address']['zip'],
phone=sData['billing_address']['phone'],
email=sData['billing_address']['email'],
state=_state,
country=_country,
defaults={}
)
if sData['return_address']:
_state = pfState.objects.get(
code=sData['return_address']['state_code'])
_country = pfCountry.objects.get(
code=sData['return_address']['country_code'])
self.return_address, created = pfAddress.objects.update_or_create(
name=sData['return_address']['name'],
company=sData['return_address']['company'],
address1=sData['return_address']['address1'],
address2=sData['return_address']['address2'],
city=sData['return_address']['city'],
zip=sData['return_address']['zip'],
phone=sData['return_address']['phone'],
email=sData['return_address']['email'],
state=_state,
country=_country,
defaults={}
)
def api_push(self):
"""
Pushes the only data available to update via the API: packing slip info.
"""
if not self.has_auth():
raise Exception("This store is missing the API Key.")
data = {
'email': self.packingslip_email,
'phone': self.packingslip_phone,
'message': self.packingslip_message,
}
api = pyPrintful(key=self.key)
api.put_store_packingslip(data)
class wooStore(commonBusinessModel):
# Fields
code = CharField(_("Code"), max_length=2, default="", blank=True, null=True,
help_text=_("Generally, a two-character uppercase code. Used in SKUs."))
base_url = URLField(_("Base URL"), default="", blank=True, null=True, help_text=_(
"Include the schema and FQDN only (e.g., 'https://example.com'). No trailing slash."))
consumer_key = CharField(
_("Consumer Key"), max_length=64, blank=True, null=True)
consumer_secret = CharField(
_("Consumer Secret"), max_length=64, blank=True, null=True)
timezone = TimeZoneField(default='America/New_York')
verify_ssl = BooleanField(_("Verify SSL?"), default=True, help_text=_(
"Uncheck this if you are using a self-signed SSL certificate to disable ssl verification."))
class Meta:
ordering = ('code',)
verbose_name = _("WP Store")
verbose_name_plural = _("WP Stores")
def __str__(self):
rv = []
if self.code and self.base_url:
return "{} - {}".format(self.code, self.base_url)
elif self.code:
return "{}".format(self.code)
return _("Unknown Store")
def get_absolute_url(self):
return reverse('business:app_store_wp_detail', args=(self.pk,))
def get_update_url(self):
return reverse('business:app_store_wp_update', args=(self.pk,))
# Primarily Creative App Related
class bzCreativeCollection(commonBusinessModel):
# Fields
code = CharField(_("Code"), max_length=3)
name = CharField(_("Name"), max_length=255,
default="", blank=True, null=True)
# Relationship Fields
bzbrand = ForeignKey('business.bzBrand', verbose_name=_("Brand"))
class Meta:
ordering = ('code',)
verbose_name = _("Creative Collection")
verbose_name_plural = _("Creative Collections")
def __str__(self):
if self.code and self.name:
return "{} - {}".format(self.code, self.name)
elif self.name:
return "{}".format(self.name)
return _("Unknown Collection")
def get_absolute_url(self):
return reverse(
'business:business_bzcreativecollection_detail', args=(self.pk,))
def get_update_url(self):
return reverse(
'business:business_bzcreativecollection_update', args=(self.pk,))
def get_designs(self):
return bzCreativeDesign.objects.filter(bzcreativecollection=self)
get_designs.short_description = _("Designs")
def num_designs(self):
return self.get_designs().count()
num_designs.short_description = _("Designs")
def get_layouts(self):
return bzCreativeLayout.objects.filter(bzcreativecollection=self)
get_designs.short_description = _("Layouts")
def num_layouts(self):
return self.get_layouts().count()
num_layouts.short_description = _("Layouts")
class bzCreativeDesign(commonBusinessModel):
# Fields
code = CharField(_("Code"), max_length=2)
name = CharField(_("Name"), max_length=255,
default="", blank=True, null=True)
# Relationship Fields
bzcreativecollection = ForeignKey(
'business.bzCreativeCollection', verbose_name=_("Collection"))
class Meta:
ordering = ('bzcreativecollection__code', 'code',)
verbose_name = _("Creative Design")
verbose_name_plural = _("Creative Designs")
def __str__(self):
rv = []
if self.bzcreativecollection:
if self.bzcreativecollection.code:
rv.append(self.bzcreativecollection.code + "-")
if self.code:
rv.append(self.code)
if self.bzcreativecollection:
if self.bzcreativecollection.code:
rv.append(" / " + self.bzcreativecollection.name)
if self.name:
rv.append(" / " + self.name)
if rv:
return "".join(rv)
return _("Unknown Design")
def get_absolute_url(self):
return reverse('business:business_bzcreativedesign_detail',
args=(self.pk,))
def get_update_url(self):
return reverse('business:business_bzcreativedesign_update',
args=(self.pk,))
def get_products(self):
return bzProduct.objects.filter(bzDesign=self)
get_products.short_description = _("Products")
def num_products(self):
return self.get_products().count()
num_products.short_description = _("Products")
class bzCreativeLayout(commonBusinessModel):
# Fields
code = CharField(_("Code"), max_length=2)
name = CharField(_("Name"), max_length=255,
default="", blank=True, null=True)
# Relationship Fields
bzcreativecollection = ForeignKey(
'business.bzCreativeCollection', verbose_name=_("Collection"))
class Meta:
ordering = ('bzcreativecollection__code', 'code',)
verbose_name = _("Creative Layout")
verbose_name_plural = _("Creative Layouts")
def __str__(self):
if self.code and self.name:
return "{} - {}".format(self.code, self.name)
elif self.name:
return "{}".format(self.name)
return _("Unknown Design")
def get_absolute_url(self):
return reverse('business:business_bzcreativelayout_detail',
args=(self.pk,))
def get_update_url(self):
return reverse('business:business_bzcreativelayout_update',
args=(self.pk,))
def get_products(self):
return bzProduct.objects.filter(bzLayout=self)
get_products.short_description = _("Products")
def num_products(self):
return self.get_products().count()
num_products.short_description = _("Products")
class bzCreativeRendering(commonBusinessModel):
# Fields
# Relationship Fields
bzcreativedesign = ForeignKey(
'business.bzCreativeDesign', verbose_name=_("Design"))
bzcreativelayout = ForeignKey(
'business.bzCreativeLayout', verbose_name=_("Layout"))
class Meta:
ordering = ('bzcreativedesign__code', 'bzcreativelayout__code',)
verbose_name = _("Creative Rendering")
verbose_name_plural = _("Creative Renderings")
def __str__(self):
if self.bzcreativedesign and self.bzcreativelayout:
return "{} - {}".format(self.bzcreativedesign.code,
self.bzcreativelayout.code)
return _("Unknown Rendering")
def get_absolute_url(self):
return reverse('business:business_bzcreativerendering_detail',
args=(self.pk,))
def get_update_url(self):
return reverse('business:business_bzcreativerendering_update',
args=(self.pk,))
class bzProduct(commonBusinessModel):
STATUS_DRAFT = "draft"
STATUS_PUBLIC = "public"
STATUS_CHOICES = (
(STATUS_DRAFT, "Draft"),
(STATUS_PUBLIC, "Public"),
)
# Fields
code = CharField(_("Code"), max_length=64,
default="", blank=True, null=True)
name = CharField(_("Name"), max_length=255,
default="", blank=True, null=True)
status = CharField(_("Status"), max_length=32,
default=STATUS_DRAFT, choices=STATUS_CHOICES)
# Relationship Fields
bzDesign = ForeignKey('business.bzCreativeDesign',
verbose_name=_("Design"))
bzLayout = ForeignKey('business.bzCreativeLayout',
verbose_name=_("Layout"), null=True, blank=True)
pfProduct = ForeignKey('business.pfCatalogProduct',
verbose_name=_("Vendor Product"),
blank=True, null=True, )
wooProduct = ForeignKey('business.wooProduct',
verbose_name=_("Outlet Product"),
blank=True, null=True, )
pfSyncProduct = ForeignKey('business.pfSyncProduct',
verbose_name=_("Sync Product"),
blank=True, null=True, )
colors = ManyToManyField('business.pfCatalogColor',
blank=True, verbose_name=_("Colors"))
sizes = ManyToManyField('business.pfCatalogSize',
blank=True, verbose_name=_("Sizes"))
class Meta:
ordering = ('code',)
verbose_name = _("Product")
verbose_name_plural = _("Products")
def __str__(self):
return self.get_friendly_name()
def get_friendly_name(self):
if self.code and self.name:
return "{} - {}".format(self.code, self.name)
elif self.name:
return "{}".format(self.name)
return "Unknown Product"
def __unicode__(self):
return self.__str__
def get_absolute_url(self):
return reverse('business:business_bzproduct_detail', args=(self.pk,))
def get_update_url(self):
return reverse('business:business_bzproduct_update', args=(self.pk,))
def get_variants(self):
return bzProductVariant.objects.filter(bzproduct=self)
get_variants.short_description = _("Variants")
def num_variants(self):
return self.get_variants().count()
num_variants.short_description = _("Variants")
def get_colors_as_string(self):
rv = []
for i in self.colors.all():
rv.append(i.__str__())
return ", ".join(rv)
def get_sizes_as_string(self):
rv = []
for i in self.sizes.all():
rv.append(i.__str__())
return ", ".join(rv)
class bzProductVariant(commonBusinessModel):
# Fields
code = CharField(verbose_name=_("Code"), max_length=64,
default="", blank=True, null=True)
is_active = BooleanField(verbose_name=_("Is Active"), default=True)
# | |
+ 0.5*m.b371*m.b515 + 0.5*m.b371*m.b554 + m.b371*m.b555 + 0.5*m.b371*
m.b558 + 0.5*m.b371*m.b582 + 0.5*m.b371*m.b587 + 0.5*m.b371*m.b603 + 0.5*m.b371*m.b608 + 0.5*
m.b371*m.b614 + 0.5*m.b371*m.b619 + 0.5*m.b371*m.b628 + 0.5*m.b371*m.b632 + 0.5*m.b371*m.b633 +
0.5*m.b371*m.b638 + 0.5*m.b371*m.b639 + 0.5*m.b371*m.b645 + 0.5*m.b371*m.b646 + 0.5*m.b371*m.b657
+ m.b371*m.b660 + 0.5*m.b371*m.b671 + 0.5*m.b371*m.b677 + 0.5*m.b371*m.b678 + m.b371*m.x861 +
0.5*m.b372*m.b376 + 0.5*m.b372*m.b383 + 0.5*m.b372*m.b390 + m.b372*m.b394 + 0.5*m.b372*m.b423 +
0.5*m.b372*m.b424 + 0.5*m.b372*m.b428 + 0.5*m.b372*m.b458 + m.b372*m.b467 + 0.5*m.b372*m.b470 +
0.5*m.b372*m.b477 + 0.5*m.b372*m.b482 + 0.5*m.b372*m.b488 + 0.5*m.b372*m.b490 + 0.5*m.b372*m.b497
+ 0.5*m.b372*m.b499 + 0.5*m.b372*m.b500 + 0.5*m.b372*m.b526 + 0.5*m.b372*m.b530 + 0.5*m.b372*
m.b531 + 0.5*m.b372*m.b562 + 0.5*m.b372*m.b566 + 0.5*m.b372*m.b570 + 0.5*m.b372*m.b572 + 0.5*
m.b372*m.b574 + 0.5*m.b372*m.b587 + 0.5*m.b372*m.b603 + 0.5*m.b372*m.b605 + 0.5*m.b372*m.b608 +
0.5*m.b372*m.b623 + 0.5*m.b372*m.b628 + 0.5*m.b372*m.b664 + 0.5*m.b372*m.b670 + 0.5*m.b372*m.b673
+ 0.5*m.b372*m.b674 + 0.5*m.b372*m.b676 + 0.5*m.b372*m.b677 + 0.5*m.b372*m.b681 + m.b372*m.x854
+ 0.5*m.b373*m.b374 + 0.5*m.b373*m.b391 + 0.5*m.b373*m.b393 + m.b373*m.b408 + 0.5*m.b373*m.b414
+ 0.5*m.b373*m.b420 + 0.5*m.b373*m.b421 + 0.5*m.b373*m.b438 + 0.5*m.b373*m.b444 + 0.5*m.b373*
m.b450 + 0.5*m.b373*m.b451 + m.b373*m.b454 + 0.5*m.b373*m.b462 + 0.5*m.b373*m.b473 + m.b373*
m.b481 + 0.5*m.b373*m.b486 + 0.5*m.b373*m.b510 + 0.5*m.b373*m.b524 + 0.5*m.b373*m.b540 + 0.5*
m.b373*m.b541 + 0.5*m.b373*m.b544 + 0.5*m.b373*m.b550 + m.b373*m.b563 + 0.5*m.b373*m.b567 + 0.5*
m.b373*m.b568 + 0.5*m.b373*m.b583 + 0.5*m.b373*m.b585 + 0.5*m.b373*m.b588 + 0.5*m.b373*m.b595 +
0.5*m.b373*m.b601 + 0.5*m.b373*m.b606 + 0.5*m.b373*m.b613 + 0.5*m.b373*m.b620 + 0.5*m.b373*m.b624
+ 0.5*m.b373*m.b635 + 0.5*m.b373*m.b636 + 0.5*m.b373*m.b647 + 0.5*m.b374*m.b383 + 0.5*m.b374*
m.b393 + 0.5*m.b374*m.b397 + 0.5*m.b374*m.b402 + 0.5*m.b374*m.b408 + 0.5*m.b374*m.b410 + 0.5*
m.b374*m.b411 + 0.5*m.b374*m.b450 + 0.5*m.b374*m.b451 + 0.5*m.b374*m.b454 + 0.5*m.b374*m.b481 +
0.5*m.b374*m.b505 + 0.5*m.b374*m.b509 + m.b374*m.b510 + 0.5*m.b374*m.b530 + 0.5*m.b374*m.b536 +
m.b374*m.b544 + 0.5*m.b374*m.b547 + 0.5*m.b374*m.b553 + 0.5*m.b374*m.b562 + 0.5*m.b374*m.b563 +
0.5*m.b374*m.b567 + 0.5*m.b374*m.b569 + 0.5*m.b374*m.b574 + 0.5*m.b374*m.b576 + m.b374*m.b583 +
0.5*m.b374*m.b585 + 0.5*m.b374*m.b586 + 0.5*m.b374*m.b591 + 0.5*m.b374*m.b595 + 0.5*m.b374*m.b602
+ 0.5*m.b374*m.b605 + 0.5*m.b374*m.b624 + 0.5*m.b374*m.b635 + 0.5*m.b374*m.b636 + 0.5*m.b374*
m.b641 + 0.5*m.b374*m.b645 + 0.5*m.b374*m.b648 + 0.5*m.b374*m.b650 + 0.5*m.b374*m.b656 + 0.5*
m.b374*m.b658 + 0.5*m.b374*m.b662 + 0.5*m.b374*m.b666 + m.b375*m.b377 + 0.5*m.b375*m.b386 + 0.5*
m.b375*m.b392 + 0.5*m.b375*m.b395 + 0.5*m.b375*m.b396 + 0.5*m.b375*m.b419 + 0.5*m.b375*m.b440 +
0.5*m.b375*m.b442 + 0.5*m.b375*m.b451 + 0.5*m.b375*m.b479 + 0.5*m.b375*m.b485 + 0.5*m.b375*m.b489
+ 0.5*m.b375*m.b493 + 0.5*m.b375*m.b505 + 0.5*m.b375*m.b520 + 0.5*m.b375*m.b537 + m.b375*m.b552
+ m.b375*m.b556 + 0.5*m.b375*m.b569 + m.b375*m.b578 + 0.5*m.b375*m.b581 + 0.5*m.b375*m.b585 +
0.5*m.b375*m.b609 + 0.5*m.b375*m.b611 + 0.5*m.b375*m.b618 + 0.5*m.b375*m.b624 + 0.5*m.b375*m.b625
+ 0.5*m.b375*m.b627 + 0.5*m.b375*m.b630 + 0.5*m.b375*m.b631 + 0.5*m.b375*m.b636 + 0.5*m.b375*
m.b638 + 0.5*m.b375*m.b639 + 0.5*m.b375*m.b646 + 0.5*m.b375*m.b648 + 0.5*m.b375*m.b651 + 0.5*
m.b375*m.b656 + 0.5*m.b375*m.b666 + 0.5*m.b376*m.b383 + 0.5*m.b376*m.b390 + 0.5*m.b376*m.b394 +
0.5*m.b376*m.b423 + m.b376*m.b424 + 0.5*m.b376*m.b428 + 0.5*m.b376*m.b458 + 0.5*m.b376*m.b461 +
0.5*m.b376*m.b467 + 0.5*m.b376*m.b472 + 0.5*m.b376*m.b474 + 0.5*m.b376*m.b477 + 0.5*m.b376*m.b482
+ 0.5*m.b376*m.b488 + 0.5*m.b376*m.b490 + 0.5*m.b376*m.b492 + 0.5*m.b376*m.b497 + 0.5*m.b376*
m.b499 + 0.5*m.b376*m.b500 + 0.5*m.b376*m.b526 + 0.5*m.b376*m.b530 + 0.5*m.b376*m.b531 + 0.5*
m.b376*m.b562 + 0.5*m.b376*m.b566 + m.b376*m.b570 + 0.5*m.b376*m.b572 + 0.5*m.b376*m.b574 + 0.5*
m.b376*m.b587 + 0.5*m.b376*m.b603 + 0.5*m.b376*m.b605 + 0.5*m.b376*m.b608 + 0.5*m.b376*m.b623 +
0.5*m.b376*m.b628 + 0.5*m.b376*m.b664 + 0.5*m.b376*m.b670 + m.b376*m.b673 + 0.5*m.b376*m.b674 +
0.5*m.b376*m.b676 + 0.5*m.b376*m.b681 + m.b376*m.x853 + 0.5*m.b377*m.b386 + 0.5*m.b377*m.b392 +
0.5*m.b377*m.b395 + 0.5*m.b377*m.b396 + 0.5*m.b377*m.b419 + 0.5*m.b377*m.b440 + 0.5*m.b377*m.b442
+ 0.5*m.b377*m.b451 + 0.5*m.b377*m.b479 + 0.5*m.b377*m.b485 + 0.5*m.b377*m.b489 + 0.5*m.b377*
m.b493 + 0.5*m.b377*m.b505 + 0.5*m.b377*m.b520 + 0.5*m.b377*m.b537 + m.b377*m.b552 + m.b377*
m.b556 + 0.5*m.b377*m.b569 + m.b377*m.b578 + 0.5*m.b377*m.b581 + 0.5*m.b377*m.b585 + 0.5*m.b377*
m.b609 + 0.5*m.b377*m.b611 + 0.5*m.b377*m.b618 + 0.5*m.b377*m.b624 + 0.5*m.b377*m.b625 + 0.5*
m.b377*m.b627 + 0.5*m.b377*m.b630 + 0.5*m.b377*m.b631 + 0.5*m.b377*m.b636 + 0.5*m.b377*m.b638 +
0.5*m.b377*m.b639 + 0.5*m.b377*m.b646 + 0.5*m.b377*m.b648 + 0.5*m.b377*m.b651 + 0.5*m.b377*m.b656
+ 0.5*m.b377*m.b666 + 0.5*m.b378*m.b379 + 0.5*m.b378*m.b382 + m.b378*m.b384 + 0.5*m.b378*m.b389
+ 0.5*m.b378*m.b391 + 0.5*m.b378*m.b392 + 0.5*m.b378*m.b401 + 0.5*m.b378*m.b405 + 0.5*m.b378*
m.b415 + 0.5*m.b378*m.b433 + 0.5*m.b378*m.b444 + 0.5*m.b378*m.b448 + m.b378*m.b459 + 0.5*m.b378*
m.b462 + 0.5*m.b378*m.b463 + 0.5*m.b378*m.b469 + 0.5*m.b378*m.b495 + 0.5*m.b378*m.b503 + 0.5*
m.b378*m.b504 + 0.5*m.b378*m.b508 + 0.5*m.b378*m.b514 + 0.5*m.b378*m.b517 + 0.5*m.b378*m.b518 +
0.5*m.b378*m.b521 + 0.5*m.b378*m.b523 + 0.5*m.b378*m.b524 + 0.5*m.b378*m.b527 + 0.5*m.b378*m.b528
+ 0.5*m.b378*m.b542 + 0.5*m.b378*m.b543 + 0.5*m.b378*m.b548 + 0.5*m.b378*m.b577 + 0.5*m.b378*
m.b579 + 0.5*m.b378*m.b588 + 0.5*m.b378*m.b596 + 0.5*m.b378*m.b609 + 0.5*m.b378*m.b610 + 0.5*
m.b378*m.b612 + 0.5*m.b378*m.b618 + 0.5*m.b378*m.b630 + 0.5*m.b378*m.b634 + 0.5*m.b378*m.b640 +
0.5*m.b378*m.b644 + 0.5*m.b378*m.b649 + m.b378*m.b654 + 0.5*m.b378*m.b655 + 0.5*m.b378*m.b665 +
0.5*m.b378*m.b668 + 0.5*m.b378*m.b675 + 0.5*m.b378*m.b679 + 0.5*m.b379*m.b382 + 0.5*m.b379*m.b384
+ 0.5*m.b379*m.b389 + 0.5*m.b379*m.b391 + 0.5*m.b379*m.b392 + 0.5*m.b379*m.b417 + 0.5*m.b379*
m.b441 + 0.5*m.b379*m.b444 + 0.5*m.b379*m.b448 + 0.5*m.b379*m.b455 + 0.5*m.b379*m.b459 + 0.5*
m.b379*m.b460 + 0.5*m.b379*m.b462 + 0.5*m.b379*m.b463 + 0.5*m.b379*m.b466 + 0.5*m.b379*m.b469 +
0.5*m.b379*m.b494 + 0.5*m.b379*m.b496 + 0.5*m.b379*m.b503 + 0.5*m.b379*m.b506 + m.b379*m.b514 +
m.b379*m.b521 + m.b379*m.b523 + 0.5*m.b379*m.b524 + 0.5*m.b379*m.b532 + 0.5*m.b379*m.b534 + 0.5*
m.b379*m.b542 + 0.5*m.b379*m.b588 + 0.5*m.b379*m.b593 + 0.5*m.b379*m.b598 + 0.5*m.b379*m.b599 +
0.5*m.b379*m.b609 + 0.5*m.b379*m.b610 + m.b379*m.b612 + 0.5*m.b379*m.b617 + 0.5*m.b379*m.b618 +
0.5*m.b379*m.b630 + 0.5*m.b379*m.b644 + 0.5*m.b379*m.b654 + 0.5*m.b379*m.b668 + 0.5*m.b379*m.b679
+ 0.5*m.b380*m.b387 + 0.5*m.b380*m.b401 + 0.5*m.b380*m.b403 + 0.5*m.b380*m.b405 + 0.5*m.b380*
m.b426 + 0.5*m.b380*m.b447 + 0.5*m.b380*m.b452 + 0.5*m.b380*m.b453 + 0.5*m.b380*m.b511 + 0.5*
m.b380*m.b525 + 0.5*m.b380*m.b528 + 0.5*m.b380*m.b565 + m.b380*m.b575 + 0.5*m.b380*m.b577 + 0.5*
m.b380*m.b579 + 0.5*m.b380*m.b580 + 0.5*m.b380*m.b594 + 0.5*m.b380*m.b607 + 0.5*m.b380*m.b653 +
0.5*m.b380*m.b667 + 0.5*m.b380*m.b680 + m.b380*m.x863 + 0.5*m.b381*m.b386 + 0.5*m.b381*m.b393 +
0.5*m.b381*m.b397 + 0.5*m.b381*m.b428 + 0.5*m.b381*m.b440 + 0.5*m.b381*m.b450 + 0.5*m.b381*m.b458
+ 0.5*m.b381*m.b465 + m.b381*m.b471 + 0.5*m.b381*m.b490 + 0.5*m.b381*m.b491 + 0.5*m.b381*m.b526
+ 0.5*m.b381*m.b547 + 0.5*m.b381*m.b549 + m.b381*m.b559 + 0.5*m.b381*m.b561 + 0.5*m.b381*m.b567
+ 0.5*m.b381*m.b581 + 0.5*m.b381*m.b582 + 0.5*m.b381*m.b591 + 0.5*m.b381*m.b595 + 0.5*m.b381*
m.b602 + 0.5*m.b381*m.b611 + 0.5*m.b381*m.b614 + 0.5*m.b381*m.b619 + 0.5*m.b381*m.b626 + 0.5*
m.b381*m.b627 + 0.5*m.b381*m.b632 + 0.5*m.b381*m.b635 + 0.5*m.b381*m.b657 + m.b381*m.b661 + 0.5*
m.b381*m.b670 + 0.5*m.b381*m.b671 + m.b381*m.b672 + 0.5*m.b381*m.b678 + 0.5*m.b381*m.b714 + 0.5*
m.b381*m.b761 + 0.5*m.b381*m.b765 + 0.5*m.b381*m.b790 + 0.5*m.b381*m.b798 + 0.5*m.b381*m.b804 +
0.5*m.b381*m.b809 + 0.5*m.b381*m.b811 + 0.5*m.b381*m.b816 + 0.5*m.b381*m.b823 + 0.5*m.b381*m.b826
+ 0.5*m.b382*m.b384 + 0.5*m.b382*m.b387 + m.b382*m.b389 + 0.5*m.b382*m.b391 + 0.5*m.b382*m.b392
+ 0.5*m.b382*m.b403 + 0.5*m.b382*m.b416 + 0.5*m.b382*m.b422 + 0.5*m.b382*m.b425 + 0.5*m.b382*
m.b426 + 0.5*m.b382*m.b437 + 0.5*m.b382*m.b444 + 0.5*m.b382*m.b448 + 0.5*m.b382*m.b459 + 0.5*
m.b382*m.b462 + 0.5*m.b382*m.b463 + 0.5*m.b382*m.b468 + m.b382*m.b469 + 0.5*m.b382*m.b476 + 0.5*
m.b382*m.b478 + 0.5*m.b382*m.b498 + 0.5*m.b382*m.b501 + 0.5*m.b382*m.b502 + 0.5*m.b382*m.b503 +
0.5*m.b382*m.b512 + 0.5*m.b382*m.b514 + 0.5*m.b382*m.b521 + 0.5*m.b382*m.b522 + 0.5*m.b382*m.b523
+ 0.5*m.b382*m.b524 + 0.5*m.b382*m.b525 + 0.5*m.b382*m.b529 + 0.5*m.b382*m.b538 + 0.5*m.b382*
m.b542 + 0.5*m.b382*m.b571 + 0.5*m.b382*m.b588 + 0.5*m.b382*m.b604 + 0.5*m.b382*m.b609 + m.b382*
m.b610 + 0.5*m.b382*m.b612 + 0.5*m.b382*m.b618 + 0.5*m.b382*m.b629 + 0.5*m.b382*m.b630 + 0.5*
m.b382*m.b637 + 0.5*m.b382*m.b644 + 0.5*m.b382*m.b653 + 0.5*m.b382*m.b654 + 0.5*m.b382*m.b668 +
0.5*m.b382*m.b679 + 0.5*m.b383*m.b390 + 0.5*m.b383*m.b394 + 0.5*m.b383*m.b397 + 0.5*m.b383*m.b402
+ 0.5*m.b383*m.b410 + 0.5*m.b383*m.b411 + 0.5*m.b383*m.b423 + 0.5*m.b383*m.b424 + 0.5*m.b383*
m.b428 + 0.5*m.b383*m.b458 + 0.5*m.b383*m.b467 + 0.5*m.b383*m.b477 + 0.5*m.b383*m.b482 + 0.5*
m.b383*m.b488 + 0.5*m.b383*m.b490 + 0.5*m.b383*m.b497 + 0.5*m.b383*m.b499 + 0.5*m.b383*m.b500 +
0.5*m.b383*m.b505 + 0.5*m.b383*m.b509 + 0.5*m.b383*m.b510 + 0.5*m.b383*m.b526 + m.b383*m.b530 +
0.5*m.b383*m.b531 + 0.5*m.b383*m.b536 + 0.5*m.b383*m.b544 + 0.5*m.b383*m.b547 + 0.5*m.b383*m.b553
+ m.b383*m.b562 + 0.5*m.b383*m.b566 + 0.5*m.b383*m.b569 + 0.5*m.b383*m.b570 + 0.5*m.b383*m.b572
+ m.b383*m.b574 + 0.5*m.b383*m.b576 + 0.5*m.b383*m.b583 + 0.5*m.b383*m.b586 + 0.5*m.b383*m.b587
+ 0.5*m.b383*m.b591 + 0.5*m.b383*m.b602 + 0.5*m.b383*m.b603 + m.b383*m.b605 + 0.5*m.b383*m.b608
+ 0.5*m.b383*m.b623 + 0.5*m.b383*m.b628 + 0.5*m.b383*m.b641 + 0.5*m.b383*m.b645 + 0.5*m.b383*
m.b648 + 0.5*m.b383*m.b650 + 0.5*m.b383*m.b656 + 0.5*m.b383*m.b658 + 0.5*m.b383*m.b662 + 0.5*
m.b383*m.b664 + 0.5*m.b383*m.b666 + 0.5*m.b383*m.b670 + 0.5*m.b383*m.b673 + 0.5*m.b383*m.b674 +
0.5*m.b383*m.b676 + 0.5*m.b383*m.b681 + 0.5*m.b384*m.b389 + 0.5*m.b384*m.b391 + 0.5*m.b384*m.b392
+ 0.5*m.b384*m.b401 + 0.5*m.b384*m.b405 + 0.5*m.b384*m.b415 + 0.5*m.b384*m.b433 + 0.5*m.b384*
m.b444 + 0.5*m.b384*m.b448 + m.b384*m.b459 + 0.5*m.b384*m.b462 + 0.5*m.b384*m.b463 + 0.5*m.b384*
m.b469 + 0.5*m.b384*m.b495 + 0.5*m.b384*m.b503 + 0.5*m.b384*m.b504 + 0.5*m.b384*m.b508 + 0.5*
m.b384*m.b514 + 0.5*m.b384*m.b517 + 0.5*m.b384*m.b518 + 0.5*m.b384*m.b521 + 0.5*m.b384*m.b523 +
0.5*m.b384*m.b524 + 0.5*m.b384*m.b527 + 0.5*m.b384*m.b528 + 0.5*m.b384*m.b542 + 0.5*m.b384*m.b543
+ 0.5*m.b384*m.b548 + 0.5*m.b384*m.b577 + 0.5*m.b384*m.b579 + 0.5*m.b384*m.b588 + 0.5*m.b384*
m.b596 + 0.5*m.b384*m.b609 + 0.5*m.b384*m.b610 + 0.5*m.b384*m.b612 + 0.5*m.b384*m.b618 + 0.5*
m.b384*m.b630 + 0.5*m.b384*m.b634 + 0.5*m.b384*m.b640 + 0.5*m.b384*m.b644 + 0.5*m.b384*m.b649 +
m.b384*m.b654 + 0.5*m.b384*m.b655 + 0.5*m.b384*m.b665 + 0.5*m.b384*m.b668 + 0.5*m.b384*m.b675 +
0.5*m.b384*m.b679 + 0.5*m.b385*m.b388 + 0.5*m.b385*m.b398 + 0.5*m.b385*m.b400 + m.b385*m.b409 +
0.5*m.b385*m.b414 + 0.5*m.b385*m.b420 + 0.5*m.b385*m.b422 + 0.5*m.b385*m.b435 + 0.5*m.b385*m.b443
+ 0.5*m.b385*m.b445 + 0.5*m.b385*m.b448 + 0.5*m.b385*m.b473 + 0.5*m.b385*m.b476 + 0.5*m.b385*
m.b486 + 0.5*m.b385*m.b487 + 0.5*m.b385*m.b496 + 0.5*m.b385*m.b498 + 0.5*m.b385*m.b503 + 0.5*
m.b385*m.b512 + 0.5*m.b385*m.b527 + 0.5*m.b385*m.b529 + 0.5*m.b385*m.b532 + 0.5*m.b385*m.b534 + | |
import warnings
import numpy as np
from einsteinpy.integrators import GeodesicIntegrator
from .utils import _P, _kerr, _kerrnewman, _sch
class Geodesic:
"""
Base Class for defining Geodesics
Working in Geometrized Units (M-Units),
with :math:`c = G = M = k_e = 1`
"""
def __init__(
self,
metric,
metric_params,
position,
momentum,
time_like=True,
return_cartesian=True,
**kwargs,
):
"""
Constructor
Parameters
----------
metric : str
Name of the metric. Currently, these metrics are supported:
1. Schwarzschild
2. Kerr
3. KerrNewman
metric_params : array_like
Tuple of parameters to pass to the metric
E.g., ``(a,)`` for Kerr
position : array_like
3-Position
4-Position is initialized by taking ``t = 0.0``
momentum : array_like
3-Momentum
4-Momentum is calculated automatically,
considering the value of ``time_like``
time_like : bool, optional
Determines type of Geodesic
``True`` for Time-like geodesics
``False`` for Null-like geodesics
Defaults to ``True``
return_cartesian : bool, optional
Whether to return calculated positions in Cartesian Coordinates
This only affects the coordinates. Momenta are dimensionless
quantities, and are returned in Spherical Polar Coordinates.
Defaults to ``True``
kwargs : dict
Keyword parameters for the Geodesic Integrator
See 'Other Parameters' below.
Other Parameters
----------------
steps : int
Number of integration steps
Defaults to ``50``
delta : float
Initial integration step-size
Defaults to ``0.5``
rtol : float
Relative Tolerance
Defaults to ``1e-2``
atol : float
Absolute Tolerance
Defaults to ``1e-2``
order : int
Integration Order
Defaults to ``2``
omega : float
Coupling between Hamiltonian Flows
Smaller values imply smaller integration error, but too
small values can make the equation of motion non-integrable.
For non-capture trajectories, ``omega = 1.0`` is recommended.
For trajectories, that either lead to a capture or a grazing
geodesic, a decreased value of ``0.01`` or less is recommended.
Defaults to ``1.0``
suppress_warnings : bool
Whether to suppress warnings during simulation
Warnings are shown for every step, where numerical errors
exceed specified tolerance (controlled by ``rtol`` and ``atol``)
Defaults to ``False``
"""
# Contravariant Metrics, defined so far
_METRICS = {
"Schwarzschild": _sch,
"Kerr": _kerr,
"KerrNewman": _kerrnewman,
}
if metric not in _METRICS:
raise NotImplementedError(
f"'{metric}' is unsupported. Currently, these metrics are supported:\
\n1. Schwarzschild\n2. Kerr\n3. KerrNewman"
)
self.metric_name = metric
self.metric = _METRICS[metric]
self.metric_params = metric_params
if metric == "Schwarzschild":
self.metric_params = (0.0,)
self.position = np.array([0.0, *position])
self.momentum = _P(
self.metric, metric_params, self.position, momentum, time_like
)
self.time_like = time_like
self.kind = "Time-like" if time_like else "Null-like"
self.coords = "Cartesian" if return_cartesian else "Spherical Polar"
self._trajectory = self.calculate_trajectory(**kwargs)
def __repr__(self):
return f"""Geodesic Object:(\n\
Type : ({self.kind}),\n\
Metric : ({self.metric_name}),\n\
Metric Parameters : ({self.metric_params}),\n\
Initial 4-Position : ({self.position}),\n\
Initial 4-Momentum : ({self.momentum}),\n\
Trajectory = (\n\
{self.trajectory}\n\
),\n\
Output Position Coordinate System = ({self.coords})\n\
))"""
def __str__(self):
return self.__repr__()
@property
def trajectory(self):
"""
Returns the trajectory of the test particle
"""
return self._trajectory
def calculate_trajectory(self, **kwargs):
"""
Calculate trajectory in spacetime
Parameters
----------
kwargs : dict
Keyword parameters for the Geodesic Integrator
See 'Other Parameters' below.
Returns
-------
~numpy.ndarray
N-element numpy array, containing step count
~numpy.ndarray
Shape-(N, 8) numpy array, containing
(4-Position, 4-Momentum) for each step
Other Parameters
----------------
steps : int
Number of integration steps
Defaults to ``50``
delta : float
Initial integration step-size
Defaults to ``0.5``
rtol : float
Relative Tolerance
Defaults to ``1e-2``
atol : float
Absolute Tolerance
Defaults to ``1e-2``
order : int
Integration Order
Defaults to ``2``
omega : float
Coupling between Hamiltonian Flows
Smaller values imply smaller integration error, but too
small values can make the equation of motion non-integrable.
For non-capture trajectories, ``omega = 1.0`` is recommended.
For trajectories, that either lead to a capture or a grazing
geodesic, a decreased value of ``0.01`` or less is recommended.
Defaults to ``1.0``
suppress_warnings : bool
Whether to suppress warnings during simulation
Warnings are shown for every step, where numerical errors
exceed specified tolerance (controlled by ``rtol`` and ``atol``)
Defaults to ``False``
"""
g, g_prms = self.metric, self.metric_params
q0, p0 = self.position, self.momentum
tl = self.time_like
N = kwargs.get("steps", 50)
dl = kwargs.get("delta", 0.5)
rtol = kwargs.get("rtol", 1e-2)
atol = kwargs.get("atol", 1e-2)
order = kwargs.get("order", 2)
omega = kwargs.get("omega", 1.0)
sw = kwargs.get("suppress_warnings", False)
steps = np.arange(N)
geodint = GeodesicIntegrator(
metric=g,
metric_params=g_prms,
q0=q0,
p0=p0,
time_like=tl,
steps=N,
delta=dl,
rtol=rtol,
atol=atol,
order=order,
omega=omega,
suppress_warnings=sw,
)
for i in steps:
geodint.step()
vecs = np.array(geodint.results, dtype=float)
q1 = vecs[:, 0]
p1 = vecs[:, 1]
results = np.hstack((q1, p1))
# Ignoring
# q2 = vecs[:, 2]
# p2 = vecs[:, 3]
if self.coords == "Cartesian":
# Converting to Cartesian from Spherical Polar Coordinates
# Note that momenta cannot be converted this way,
# due to ambiguities in the signs of v_r and v_th (velocities)
t, r, th, ph = q1.T
pt, pr, pth, pph = p1.T
x = r * np.sin(th) * np.cos(ph)
y = r * np.sin(th) * np.sin(ph)
z = r * np.cos(th)
cart_results = np.vstack((t, x, y, z, pt, pr, pth, pph)).T
return steps, cart_results
return steps, results
class Nulllike(Geodesic):
"""
Class for defining Null-like Geodesics
"""
def __init__(
self, metric, metric_params, position, momentum, return_cartesian=True, **kwargs
):
"""
Constructor
Parameters
----------
metric : str
Name of the metric. Currently, these metrics are supported:
1. Schwarzschild
2. Kerr
3. KerrNewman
metric_params : array_like
Tuple of parameters to pass to the metric
E.g., ``(a,)`` for Kerr
position : array_like
3-Position
4-Position is initialized by taking ``t = 0.0``
momentum : array_like
3-Momentum
4-Momentum is calculated automatically,
considering the value of ``time_like``
return_cartesian : bool, optional
Whether to return calculated positions in Cartesian Coordinates
This only affects the coordinates. The momenta dimensionless
quantities, and are returned in Spherical Polar Coordinates.
Defaults to ``True``
kwargs : dict
Keyword parameters for the Geodesic Integrator
See 'Other Parameters' below.
Other Parameters
----------------
steps : int
Number of integration steps
Defaults to ``50``
delta : float
Initial integration step-size
Defaults to ``0.5``
rtol : float
Relative Tolerance
Defaults to ``1e-2``
atol : float
Absolute Tolerance
Defaults to ``1e-2``
order : int
Integration Order
Defaults to ``2``
omega : float
Coupling between Hamiltonian Flows
Smaller values imply smaller integration error, but too
small values can make the equation of motion non-integrable.
For non-capture trajectories, ``omega = 1.0`` is recommended.
For trajectories, that either lead to a capture or a grazing
geodesic, a decreased value of ``0.01`` or less is recommended.
Defaults to ``1.0``
suppress_warnings : bool
Whether to suppress warnings during simulation
Warnings are shown for every step, where numerical errors
exceed specified tolerance (controlled by ``rtol`` and ``atol``)
Defaults to ``False``
"""
super().__init__(
metric=metric,
metric_params=metric_params,
position=position,
momentum=momentum,
time_like=False,
return_cartesian=return_cartesian,
**kwargs,
)
class Timelike(Geodesic):
"""
Class for defining Time-like Geodesics
"""
def __init__(
self, metric, metric_params, position, momentum, return_cartesian=True, **kwargs
):
"""
Constructor
Parameters
----------
metric : str
Name of the metric. Currently, these metrics are supported:
1. Schwarzschild
2. Kerr
3. KerrNewman
metric_params : array_like
Tuple of parameters to pass to the metric
E.g., ``(a,)`` for Kerr
position : array_like
3-Position
4-Position is initialized by taking ``t = 0.0``
momentum : array_like
3-Momentum
4-Momentum is calculated automatically,
considering the value of ``time_like``
return_cartesian : bool, optional
Whether to return calculated positions in Cartesian Coordinates
This only affects the coordinates. The momenta dimensionless
quantities, and are returned in Spherical Polar Coordinates.
Defaults to ``True``
kwargs : dict
Keyword parameters for the Geodesic Integrator
See 'Other Parameters' below.
Other Parameters
----------------
steps : int
Number of integration steps
Defaults to ``50``
delta : float
Initial integration step-size
Defaults to ``0.5``
rtol : float
Relative Tolerance
Defaults to ``1e-2``
atol : float
Absolute Tolerance
Defaults to ``1e-2``
order : int
Integration Order
Defaults to ``2``
omega : float
Coupling between Hamiltonian Flows
Smaller values imply smaller integration error, but too
small values can make the equation of motion non-integrable.
For non-capture trajectories, ``omega = 1.0`` is recommended.
For trajectories, that either lead to a capture or a grazing
geodesic, a decreased value of ``0.01`` or less is recommended.
| |
"""
Copyright (C) 2020 by the Georgia Tech Research Institute (GTRI)
This software may be modified and distributed under the terms of
the BSD 3-Clause license. See the LICENSE file for details.
"""
import json
import tempfile
import unittest
import uuid
from pathlib import Path
import pandas as pd
from model_processing.graph_creation import Evaluator, Manager, MDTranslator
from model_processing.graph_objects import DiEdge, PropertyDiGraph, Vertex
from . import DATA_DIRECTORY, OUTPUT_DIRECTORY, PATTERNS
class TestManager(unittest.TestCase):
def setUp(self):
pass
def test_ids_assigned_in_change(self):
manager = Manager(
excel_path=[
(
DATA_DIRECTORY
/ "Composition Example 2 Model Baseline.xlsx"
),
(DATA_DIRECTORY / "Composition Example 2 Model Changed.xlsx"),
],
json_path=[(PATTERNS / "Composition.json")],
)
eval_base = manager.evaluators[0]
eval_change = manager.evaluators[1]
eval_base.rename_df_columns()
eval_base.add_missing_columns()
eval_base.to_property_di_graph()
eval_change.rename_df_columns()
eval_change.add_missing_columns()
eval_change.to_property_di_graph()
self.assertTrue(
set(eval_base.translator.uml_id.keys()).issubset(
set(eval_change.translator.uml_id.keys())
)
)
for key in eval_base.translator.uml_id.keys():
if key != "count":
assert (
eval_base.translator.uml_id[key]
== eval_change.translator.uml_id[key]
)
def test_get_json_data(self):
manager = Manager(
excel_path=[
DATA_DIRECTORY / "Composition Example.xlsx" for i in range(2)
],
json_path=[PATTERNS / "Composition.json"],
)
expected_keys = [
"Columns to Navigation Map",
"Pattern Graph Edges",
"Root Node",
"Vertex MetaTypes",
"Vertex Settings",
"Vertex Stereotypes",
]
assert expected_keys == list(manager.json_data[0].keys())
def test_create_evaluators(self):
manager = Manager(
excel_path=[
DATA_DIRECTORY / "Composition Example.xlsx" for i in range(2)
],
json_path=[PATTERNS / "Composition.json"],
)
# weak test: create_evaluators() run during init
self.assertEqual(2, len(manager.evaluators))
for eval in manager.evaluators:
self.assertIsInstance(eval, Evaluator)
def test_get_pattern_graph_diff(self):
manager = Manager(
excel_path=[
DATA_DIRECTORY / "Composition Example.xlsx" for i in range(2)
],
json_path=[PATTERNS / "Composition.json"],
)
# Create the actual graph object because get_pattern_graph_diff
# employs the graph object properties
# with 2 different original edges of the same type I can induce a
# match based on rename and an unstable pair.
og_eval = manager.evaluators[0]
og_graph = PropertyDiGraph()
og_eval.prop_di_graph = og_graph
ch_eval = manager.evaluators[1]
ch_graph = PropertyDiGraph()
ch_eval.prop_di_graph = ch_graph
with tempfile.TemporaryDirectory() as tmpdir:
tmpdir = Path(tmpdir)
orig_edge = DiEdge(
source=Vertex(name="Car", id="_001"),
target=Vertex(name="car", id="_002"),
edge_attribute="type",
)
renm_source = DiEdge(
source=Vertex(
name="Subaru",
id="_001",
original_name="Car",
original_id="_001",
node_types=["Atomic Thing"],
),
target=Vertex(name="car", id="_002"),
edge_attribute="type",
)
orig_edge2 = DiEdge(
source=Vertex(name="Car", id="_001"),
target=Vertex(name="Vehicle", id="_003"),
edge_attribute="type",
)
unstab_edge1 = DiEdge(
source=Vertex(name="Car", id="_001"),
target=Vertex(name="Not Car", id="_100"),
edge_attribute="type",
)
unstab_edge2 = DiEdge(
source=Vertex(name="Cup", id="_101"),
target=Vertex(name="Vehicle", id="_003"),
edge_attribute="type",
)
added_edge = DiEdge(
source=Vertex(
name="New Source",
id=uuid.uuid4(),
node_types=["Atomic Thing"],
),
target=Vertex(
name="New Target",
id=uuid.uuid4(),
node_types=["Atomic Thing"],
),
edge_attribute="newEdge",
)
del_edge = DiEdge(
source=Vertex(name="Old Source", id="_010"),
target=Vertex(name="Old Target", id="_011"),
edge_attribute="oldEdge",
)
original_edges = [orig_edge, orig_edge2, del_edge]
change_edge = [
renm_source,
unstab_edge1,
unstab_edge2,
added_edge,
]
orig_attrs = [
{"diedge": edge, "edge_attribute": edge.edge_attribute}
for edge in original_edges
]
change_attrs = [
{"diedge": edge, "edge_attribute": edge.edge_attribute}
for edge in change_edge
]
for edge in zip(original_edges, orig_attrs):
og_graph.add_node(
edge[0].source.name,
**{edge[0].source.name: edge[0].source}
)
og_graph.add_node(
edge[0].target.name,
**{edge[0].target.name: edge[0].target}
)
og_graph.add_edge(
edge[0].source.name, edge[0].target.name, **edge[1]
)
for edge in zip(change_edge, change_attrs):
ch_graph.add_node(
edge[0].source.name,
**{edge[0].source.name: edge[0].source}
)
ch_graph.add_node(
edge[0].target.name,
**{edge[0].target.name: edge[0].target}
)
ch_graph.add_edge(
edge[0].source.name, edge[0].target.name, **edge[1]
)
ch_dict = manager.get_pattern_graph_diff(out_directory=tmpdir)
ch_dict = ch_dict["0-1"]
changes = ch_dict["Changes"]
add = changes["Added"] # a list
deld = changes["Deleted"] # a list
unstab = ch_dict["Unstable Pairs"] # DiEdge: [DiEdge ...]
unstab[orig_edge2] = set(unstab[orig_edge2])
change = changes[orig_edge]
assert change[0] == renm_source
# TODO: Find new edges if type is not found in original and if
# the edge is composed of at least one new model element.
assert add == [added_edge, added_edge]
assert deld == [del_edge]
assert unstab == {
orig_edge2: {unstab_edge1, renm_source, unstab_edge2}
}
def test_changes_to_excel(self):
manager = Manager(
excel_path=[
DATA_DIRECTORY / "Composition Example.xlsx" for i in range(1)
],
json_path=[PATTERNS / "Composition.json"],
)
og_edge = DiEdge(
source=Vertex(name="green"),
target=Vertex(name="apple"),
edge_attribute="fruit",
)
change_edge = DiEdge(
source=Vertex(name="gala"),
target=Vertex(name="apple"),
edge_attribute="fruit",
)
added_edge = DiEdge(
source=Vertex(name="blueberry"),
target=Vertex(name="berry"),
edge_attribute="bush",
)
deleted_edge = DiEdge(
source=Vertex(name="yellow"),
target=Vertex(name="delicious"),
edge_attribute="apple",
)
unstable_key = DiEdge(
source=Vertex(name="tomato"),
target=Vertex(name="fruit"),
edge_attribute="fruit",
)
unstable_one = DiEdge(
source=Vertex(name="tomato"),
target=Vertex(name="vegetable"),
edge_attribute="fruit",
)
unstable_two = DiEdge(
source=Vertex(name="tomahto"),
target=Vertex(name="fruit"),
edge_attribute="fruit",
)
fake_datas = {
"0-1": {
"Changes": {
"Added": [added_edge],
"Deleted": [deleted_edge],
og_edge: [change_edge],
},
"Unstable Pairs": {
unstable_key: [unstable_one, unstable_two]
},
}
}
manager.evaluator_change_dict = fake_datas
with tempfile.TemporaryDirectory() as tmpdir:
outdir = Path(tmpdir)
manager.changes_to_excel(out_directory=outdir)
created_file_name = list(outdir.glob("*.xlsx"))[0]
created_file = OUTPUT_DIRECTORY / created_file_name
created_df = pd.read_excel(created_file)
created_dict = created_df.to_dict()
expected_data = {
"Edit 1": ["('green', 'apple', 'fruit')"],
"Edit 2": ["('gala', 'apple', 'fruit')"],
"Unstable Matches Original": [
"('tomato', 'fruit', 'fruit')",
"('tomato', 'fruit', 'fruit')",
],
"Unstable Matches Change": [
"('tomato', 'vegetable', 'fruit')",
"('tomahto', 'fruit', 'fruit')",
],
"Added": ["('blueberry', 'berry', 'bush')"],
"Deleted": ["('yellow', 'delicious', 'apple')"],
}
expected_df = pd.DataFrame(
data=dict(
[(k, pd.Series(v)) for k, v in expected_data.items()]
)
)
expected_dict = expected_df.to_dict()
self.assertDictEqual(expected_dict, created_dict)
self.assertTrue(expected_df.equals(created_df))
def test_graph_difference_to_json(self):
manager = Manager(
excel_path=[
DATA_DIRECTORY / "Composition Example.xlsx" for i in range(2)
],
json_path=[PATTERNS / "Composition.json"],
)
tr = manager.translator[0]
with tempfile.TemporaryDirectory() as tmpdir:
tmpdir = Path(tmpdir)
orig_edge = DiEdge(
source=Vertex(name="Car", id="_001"),
target=Vertex(name="car", id="_002"),
edge_attribute="type",
)
renm_source = DiEdge(
source=Vertex(
name="Subaru",
id="_001",
original_name="Car",
original_id="_001",
node_types=["Atomic Thing"],
),
target=Vertex(name="car", id="_002"),
edge_attribute="type",
)
orig_edge2 = DiEdge(
source=Vertex(name="Car", id="_001"),
target=Vertex(name="Vehicle", id="_003"),
edge_attribute="type",
)
renm_target = DiEdge(
source=Vertex(name="Car", id="_001"),
target=Vertex(
name="vehicle",
id="_003",
original_name="Vehicle",
original_id="_003",
node_types=["Composite Thing"],
),
edge_attribute="type",
)
orig_edge3 = DiEdge(
source=Vertex(name="subaru", id="_004"),
target=Vertex(name="Vehicle", id="_005"),
edge_attribute="type",
)
renm_both = DiEdge(
source=Vertex(
name="Subaru",
id="_004",
original_name="subaru",
original_id="_004",
node_types=["composite owner"],
),
target=Vertex(
name="vehicle",
id="_005",
original_name="Vehicle",
original_id="_005",
node_types=["Atomic Thing"],
),
edge_attribute="type",
)
orig_edge4 = DiEdge(
source=Vertex(name="subaru", id="_004"),
target=Vertex(name="car", id="_002"),
edge_attribute="type",
)
new_source = DiEdge(
source=Vertex(
name="Subaru",
id=uuid.uuid4(),
node_types=["Composite Thing"],
),
target=Vertex(name="car", id="_002"),
edge_attribute="type",
)
orig_edge5 = DiEdge(
source=Vertex(name="Car", id="_001"),
target=Vertex(name="Vehicle", id="_005"),
edge_attribute="type",
)
new_target = DiEdge(
source=Vertex(name="Car", id="_001"),
target=Vertex(
name="vehicle",
id=uuid.uuid4(),
node_types=["Atomic Thing"],
),
edge_attribute="type",
)
orig_edge6 = DiEdge(
source=Vertex(name="Car", id="_007"),
target=Vertex(name="Vehicle", id="_005"),
edge_attribute="type",
)
new_sub = Vertex(
name="Subaru", id=uuid.uuid4(), node_types=["Composite Thing"]
)
sub_cons = {
"successors": [
{
"source": "Subaru",
"target": "Car",
"edge_attribute": "type",
}
]
}
new_sub.successors = sub_cons
new_both = DiEdge(
source=Vertex(
name="Subaru",
id=uuid.uuid4(),
node_types=["Composite Thing"],
),
target=Vertex(
name="vehicle",
id=uuid.uuid4(),
node_types=["Atomic Thing"],
),
edge_attribute="type",
)
added_edge = DiEdge(
source=Vertex(
name="New Source",
id=uuid.uuid4(),
node_types=["Atomic Thing"],
),
target=Vertex(
name="New Target",
id=uuid.uuid4(),
node_types=["Atomic Thing"],
),
edge_attribute="newEdge",
)
del_edge = DiEdge(
source=Vertex(name="Old Source", id="_010"),
target=Vertex(name="Old Target", id="_011"),
edge_attribute="oldEdge",
)
change_dict = {
orig_edge: [renm_source],
orig_edge2: [renm_target],
orig_edge3: [renm_both],
orig_edge4: [new_source],
orig_edge5: [new_target],
orig_edge6: [new_both],
"Added": [added_edge],
"Deleted": [del_edge],
}
changes = manager.graph_difference_to_json(
change_dict=change_dict,
evaluators="0-1",
translator=tr,
out_directory=tmpdir,
)
rename = 0
replace = 0
create = 0
delete = 0
fall_through_ops = []
for item in changes:
op = item["ops"][0]["op"]
if op == "create":
create += 1
elif op == "replace":
replace += 1
elif op == "rename":
rename += 1
elif op == "delete":
delete += 1
else:
fall_through_ops.append(op)
# expect 4 node Renames
# expect 7 edge replaces (1 is from add edge)
# expect 6 node creates
# expect 1 delete
assert (
rename == 4 and replace == 7 and create == 6 and delete == 1
)
assert not fall_through_ops
def tearDown(self):
pass
class TestEvaluator(unittest.TestCase):
# TODO: Make sure all additional graph objects that are desired are
# created by the graph creation logic.
# TODO: Test the PROCESS of some of these functions.
def setUp(self):
data = (PATTERNS / "Composition.json").read_text()
data = json.loads(data)
self.translator = MDTranslator(
json_path=(PATTERNS / "Composition.json"), json_data=data
)
self.evaluator = Evaluator(
excel_file=DATA_DIRECTORY / "Composition Example.xlsx",
translator=self.translator,
)
data_dict = {
"Component": [
"Car",
"Car",
"Car",
"Car",
"Car",
"Car",
"Car",
"Wheel",
"Wheel",
"Wheel",
"Engine",
"Engine",
"Engine",
"Engine",
"Engine",
"Engine",
],
"Position": [
"engine",
"chassis",
"driveshaft",
"front passenger",
"front driver",
"rear passenger",
"rear driver",
"hub",
"tire",
"lug nut",
"one",
"two",
"three",
"four",
"drive output",
"mount",
],
"Part": [
"Engine",
"Chassis",
"Driveshaft",
"Wheel",
"Wheel",
"Wheel",
"Wheel",
"Hub",
"Tire",
"Lug Nut",
"Cylinder",
"Cylinder",
"Cylinder",
"Cylinder",
"Drive Output",
"Mount",
],
}
self.evaluator.df = pd.DataFrame(data=data_dict)
def test_sheets_to_dataframe(self):
data = (PATTERNS / "Composition.json").read_text()
data = json.loads(data)
ex_f = DATA_DIRECTORY / "Composition Example Model Baseline.xlsx"
translator = MDTranslator(
json_path=(PATTERNS / "Composition.json"), json_data=data
)
evaluator = Evaluator(excel_file=ex_f, translator=translator)
file_name = "Composition Example Model Baseline.xlsx"
evaluator.sheets_to_dataframe(excel_file=DATA_DIRECTORY / file_name)
columns_list = [col for col in evaluator.df.columns]
self.assertListEqual(["Component", "Position", "Part"], columns_list)
# 63 ids provided .
self.assertEqual(63, len(evaluator.df_ids))
data2 = (PATTERNS / "Composition.json").read_text()
data2 = json.loads(data2)
ex_f2 = DATA_DIRECTORY / "Composition Example 2 Model Changed.xlsx"
tr2 = MDTranslator(
json_path=(PATTERNS / "Composition.json"), json_data=data2
)
eval = Evaluator(excel_file=ex_f2, translator=tr2)
self.assertFalse(eval.df_renames.empty)
self.assertFalse(eval.df_ids.empty)
def test_has_rename(self):
data = (PATTERNS / "Composition.json").read_text()
data | |
<gh_stars>0
import rdflib # FIXME decouple
import ontquery as oq
from hyputils.hypothesis import idFromShareLink, shareLinkFromId
from pyontutils.sheets import update_sheet_values, get_note, Sheet
from pyontutils.scigraph import Vocabulary
from pyontutils.namespaces import ilxtr, TEMP, definition
from pyontutils.closed_namespaces import rdfs, rdf
from neurondm import NeuronCUT, Config, Phenotype, LogicalPhenotype
from neurondm.models.cuts import make_cut_id, fixname
from neurondm.core import log, OntId, OntTerm
def normalizeDoi(doi):
if 'http' in doi:
doi = '10.' + doi.split('.org/10.', 1)[-1]
elif doi.startswith('doi:'):
doi = doi.strip('doi:')
elif doi.startswith('DOI:'):
doi = doi.strip('DOI:')
return doi
def select_by_curie_rank(results):
ranking = 'CHEBI', 'UBERON', 'PR', 'NCBIGene', 'NCBITaxon', 'GO', 'SAO', 'NLXMOL'
def key(result):
if 'curie' in result:
curie = result['curie']
else:
return len(results) * 3
prefix, _ = curie.split(':')
if prefix in ranking:
try:
return ranking.index(result['curie'])
except ValueError:
return len(results) + 1
else:
return len(results) * 2
return sorted(results, key=key)[0]
def process_note(raw_note):
if raw_note is None:
return None
p = ilxtr.literatureCitation
for bit in (b.strip() for b in raw_note.split('\n') if b.strip()):
maybe_hypothesis = idFromShareLink(bit)
if maybe_hypothesis:
# TODO getDocInfoFromHypothesisId(maybe_hypothesis)
yield p, rdflib.URIRef(shareLinkFromId(maybe_hypothesis))
elif 'doi:' in bit or 'DOI:' in bit or 'doi.org' in bit:
yield p, rdflib.URIRef('https://doi.org/' + normalizeDoi(bit))
elif bit.startswith('http'): # TODO parse the other things
yield p, rdflib.URIRef(bit)
else:
yield p, rdflib.Literal(bit) # FIXME cull editorial notes
def sheet_to_neurons(values, notes_index, expect_pes):
# TODO import existing ids to register by label
sgv = Vocabulary()
e_config = Config('common-usage-types')
e_config.load_existing()
query = oq.OntQuery(oq.plugin.get('rdflib')(e_config.core_graph), instrumented=OntTerm)
# FIXME clear use case for the remaining bound to whatever query produced it rather
# than the other way around ... how to support this use case ...
existing = {str(n.origLabel):n for n in e_config.neurons()}
def convert_header(header):
if header.startswith('has'): # FIXME use a closed namespace
return ilxtr[header]
else:
return None
def convert_other(header):
if header == 'label':
return rdfs.label
elif header == 'curie':
return rdf.type
elif header == 'definition':
return definition
else:
header = header.replace(' ', '_')
return TEMP[header] # FIXME
def mapCell(cell, syns=False):
search_prefixes = ('UBERON', 'CHEBI', 'PR', 'NCBITaxon', 'NCBIGene', 'ilxtr', 'NIFEXT', 'SAO', 'NLXMOL',
'BIRNLEX',)
if ':' in cell and ' ' not in cell:
log.debug(cell)
if 'http' in cell:
if cell.startswith('http'):
t = OntTerm(iri=cell)
else:
return None, None # garbage with http inline
else:
t = OntTerm(cell, exclude_prefix=('FMA',)) # FIXME need better error message in ontquery
return t.u, t.label
result = [r for r in sgv.findByTerm(cell, searchSynonyms=syns, prefix=search_prefixes)
if not r['deprecated']]
#printD(cell, result)
if not result:
log.debug(f'{cell}')
maybe = list(query(label=cell, exclude_prefix=('FMA',)))
if maybe:
qr = maybe[0]
return qr.OntTerm.u, qr.label
elif not syns:
return mapCell(cell, syns=True)
else:
return None, None
elif len(result) > 1:
#printD('WARNING', result)
result = select_by_curie_rank(result)
else:
result = result[0]
return rdflib.URIRef(result['iri']), result['labels'][0]
def lower_check(label, cell):
return label not in cell and label.lower() not in cell.lower() # have to handle comma sep case
lnlu = {v:k for k, v in LogicalPhenotype.local_names.items()}
def convert_cell(cell_or_comma_sep):
#printD('CONVERTING', cell_or_comma_sep)
for cell_w_junk in cell_or_comma_sep.split(','): # XXX WARNING need a way to alter people to this
cell = cell_w_junk.strip()
if cell.startswith('(OR') or cell.startswith('(AND'):
start, *middle, end = cell.split('" "')
OPoperator, first = start.split(' "')
operator = OPoperator[1:]
operator = lnlu[operator]
last, CP = end.rsplit('"')
iris, labels = [], []
for term in (first, *middle, last):
iri, label = mapCell(term)
if label is None:
label = cell_or_comma_sep
iris.append(iri)
labels.append(label)
yield (operator, *iris), tuple(labels)
else:
iri, label = mapCell(cell)
if label is None:
yield iri, cell_or_comma_sep # FIXME need a way to handle this that doesn't break things?
else:
yield iri, label
config = Config('cut-roundtrip')
skip = 'alignment label',
headers, *rows = values
errors = []
new = []
release = []
for i, neuron_row in enumerate(rows):
id = None
label_neuron = None
definition_neuron = None
synonyms_neuron = None
current_neuron = None
phenotypes = []
do_release = False
predicate_notes = {}
object_notes = {}
other_notes = {}
wat = {}
for j, (header, cell) in enumerate(zip(headers, neuron_row)):
notes = list(process_note(get_note(i + 1, j, notes_index))) # + 1 since headers is removed
if notes and not header.startswith('has'):
_predicate = convert_other(header)
if cell:
_object = rdflib.Literal(cell) # FIXME curies etc.
else:
_object = rdf.nil
other_notes[_predicate, _object] = notes
if header == 'curie':
id = OntId(cell).u if cell else None
continue
elif header == 'label':
label_neuron = cell
if cell in existing:
current_neuron = existing[cell]
elif cell:
# TODO
new.append(cell)
else:
raise ValueError(cell) # wat
continue
elif header == 'Status':
# TODO
if cell == 'Yes':
do_release = True
elif cell == 'Maybe':
pass
elif cell == 'Not yet':
pass
elif cell == 'Delete':
pass
else:
pass
continue
elif header == 'PMID':
# TODO
continue
elif header == 'Other reference':
# TODO
continue
elif header == 'Other label':
# TODO
continue
elif header == 'definition':
continue # FIXME single space differences between the spreadsheet and the source
if cell:
definition_neuron = rdflib.Literal(cell)
continue
elif header == 'synonyms':
if cell:
synonyms_neuron = [rdflib.Literal(s.strip())
# FIXME bare comma is extremely dangerous
for s in cell.split(',')]
continue
elif header in skip:
continue
objects = []
if cell:
predicate = convert_header(header)
if predicate is None:
log.debug(f'{(header, cell, notes)}')
for object, label in convert_cell(cell):
if isinstance(label, tuple): # LogicalPhenotype case
_err = []
for l in label:
if lower_check(l, cell):
_err.append((cell, label))
if _err:
errors.extend(_err)
else:
objects.append(object)
elif lower_check(label, cell):
errors.append((cell, label))
elif str(id) == object:
errors.append((header, cell, object, label))
object = None
else:
objects.append(object)
if notes:
# FIXME this is a hack to only attach to the last value
# since we can't distinguish at the moment
wat[predicate, object] = notes
if object is not None:
# object aka iri can be none if we don't find anything
object_notes[object] = notes
else:
predicate_notes[predicate] = notes
# FIXME it might also be simpler in some cases
# to have this be object_notes[object] = notes
# because we are much less likely to have the same
# phenotype appear attached to the different dimensions
# FIXME comma sep is weak here because the
# reference is technically ambiguous
# might be an argument for the denormalized form ...
# or perhaps having another sheet for cases like that
else:
continue
if predicate and objects:
for object in objects: # FIXME has layer location phenotype
if isinstance(object, tuple):
op, *rest = object
pes = (Phenotype(r, predicate) for r in rest) # FIXME nonhomogenous phenotypes
phenotypes.append(LogicalPhenotype(op, *pes))
elif object:
phenotypes.append(Phenotype(object, predicate))
else:
errors.append((object, predicate, cell))
elif objects:
errors.append((header, objects))
else:
errors.append((header, cell))
# translate header -> predicate
# translate cell value to ontology id
if current_neuron and phenotypes:
# TODO merge current with changes
# or maybe we just replace since all the phenotypes should be there?
log.debug(phenotypes)
if id is not None:
log.debug(f'{(id, bool(id))}')
elif label_neuron:
id = make_cut_id(label_neuron)
if id not in expect_pes:
log.error(f'{id!r} not in cuts!?')
continue
if expect_pes[id] != len(phenotypes):
log.error(f'{id!r} failed roundtrip {len(phenotypes)} != {expect_pes[id]}')
continue
neuron = NeuronCUT(*phenotypes, id_=id, label=label_neuron,
override=bool(id) or bool(label_neuron))
neuron.adopt_meta(current_neuron)
# FIXME occasionally this will error?!
else:
continue # FIXME this polutes everything ???
fn = fixname(label_neuron)
if not phenotypes and i: # i skips header
errors.append((i, neuron_row)) # TODO special review for phenos but not current
phenotypes = Phenotype('TEMP:phenotype/' + fn),
neuron = NeuronCUT(*phenotypes,
id_=make_cut_id(label_neuron),
label=label_neuron, override=True)
# update the meta if there were any changes
if definition_neuron is not None:
neuron.definition = definition_neuron
if synonyms_neuron is not None:
neuron.synonyms = synonyms_neuron
try:
neuron.batchAnnotateByObject(object_notes)
neuron.batchAnnotate(other_notes)
except AttributeError as e:
#embed()
log.exception(e) #'something very strage has happened\n', e)
pass # FIXME FIXME FIXME
#neuron.batchAnnotateByPredicate(predicate_notes) # TODO
# FIXME doesn't quite work in this context, but there are other
# cases where annotations to the general modality are still desireable
# FIXME there may be no predicate? if the object fails to match?
if do_release:
release.append(neuron)
return config, errors, new, release
class Cuts(Sheet):
name = 'neurons-cut'
class CutsV1(Cuts):
sheet_name = 'CUT V1.0'
fetch_grid = True
def main():
#from neurondm.models.cuts import main as cuts_main
#cuts_config, *_ = cuts_main()
from IPython import embed
from neurondm.compiled.common_usage_types import config as cuts_config
cuts_neurons = cuts_config.neurons()
expect_pes = {n.id_:len(n.pes) for n in cuts_neurons}
sheet = CutsV1()
config, | |
sf = self.df
m = tc.recommender.factorization_recommender.create(sf,
self.user_id,
self.item_id,
target='rating',
verbose=False)
'''
TODO:
Test CoreML export, when we have a dirarchiver that doesn't
depend on the filesystem
if m.target:
self._test_coreml_export(m, ['135085','135038'], [0,1])
else:
self._test_coreml_export(m, ['135085','135038'])
'''
res = m.evaluate_rmse(sf, m.target)
# Compute real answers
df['prediction'] = m.predict(sf)
df['residual'] = np.square(df['prediction'] - df['rating'])
rmse_by_user = df.groupby(self.user_id)['residual'].mean().apply(lambda x: np.sqrt(x))
rmse_by_item = df.groupby(self.item_id)['residual'].mean().apply(lambda x: np.sqrt(x))
rmse_overall = np.sqrt(df['residual'].mean())
# Compare overall RMSE
assert (rmse_overall - res['rmse_overall']) < DELTA
# Compare by RMSE by user
cpp_rmse_by_user = res['rmse_by_user'].to_dataframe()
rmse_by_user = rmse_by_user.reset_index()
assert set(cpp_rmse_by_user.columns.values) == set([self.user_id, "rmse", "count"])
# No NaNs
assert not pd.isnull(cpp_rmse_by_user["rmse"]).any()
assert not pd.isnull(cpp_rmse_by_user["count"]).any()
comparison = pd.merge(rmse_by_user, cpp_rmse_by_user,
left_on=self.user_id, right_on=self.user_id)
assert all(comparison['residual'] - comparison['rmse'] < DELTA)
cpp_rmse_by_item = res['rmse_by_item'].to_dataframe()
assert set(cpp_rmse_by_item.columns.values) == set([self.item_id, "rmse", "count"])
# No NaNs
assert not pd.isnull(cpp_rmse_by_item["rmse"]).any()
assert not pd.isnull(cpp_rmse_by_item["count"]).any()
rmse_by_item = rmse_by_item.reset_index()
comparison = pd.merge(rmse_by_item, cpp_rmse_by_item,
left_on=self.item_id, right_on=self.item_id)
assert all(comparison['residual'] - comparison['rmse'] < DELTA)
def precision(self, actual, predicted, k):
assert k > 0
if len(actual) == 0:
return 0.0
if len(predicted) == 0:
return 1.0
if len(predicted) > k:
predicted = predicted[:k]
num_hits = 0.0
for i, p in enumerate(predicted):
if p in actual and p not in predicted[:i]:
num_hits += 1.0
return num_hits / k
def recall(self, actual, predicted, k):
assert k > 0
if len(actual) == 0:
return 1.0
if len(predicted) == 0:
return 0.0
if len(predicted) > k:
predicted = predicted[:k]
num_hits = 0.0
for i, p in enumerate(predicted):
if p in actual and p not in predicted[:i]:
num_hits += 1.0
return num_hits / len(actual)
def test_small_example(self):
sf = tc.SFrame()
sf['user_id'] = ['0','0','0','1','1','2','2','3','3']
sf['item_id'] = ['A','B','C','B','C','C','D','A','D']
train = sf
sf = tc.SFrame()
sf['user_id'] = ['0','0','0','1','1','2']
sf['item_id'] = ['D','E','F','A','F','F']
test = sf
user_id = 'user_id'
item_id = 'item_id'
m = tc.recommender.item_similarity_recommender.create(train,
user_id,
item_id,
verbose=False)
'''
TODO:
Test CoreML export, when we have a dirarchiver that doesn't
depend on the filesystem
self._test_coreml_export(m, ['A','B'])
'''
train_preds = m.predict(train)
assert len(train_preds) == train.num_rows()
recs = m.recommend(users=SArray(['0','1','2','3']))
sorted_scores = recs.sort(['user_id', 'item_id'])['score']
diffs = sorted_scores - tc.SArray([(1./3+0+1./4)/3,
(1./3+1./4)/2,
(1./4)/2,
(1./4+1./3)/2,
(2./3+0)/2,
(1./3+0)/2,
(1./4+1./4)/2])
assert all(abs(diffs) < DELTA)
test_preds = m.predict(test)
assert len(test_preds) == test.num_rows()
def test_precision_recall(self):
train = self.train
test = self.test
m = tc.recommender.create(train,
self.user_id,
self.item_id,
verbose=False)
'''
TODO:
Test CoreML export, when we have a dirarchiver that doesn't
depend on the filesystem
self._test_coreml_export(m, ['135085','135038'])
'''
users = set(list(test[self.user_id]))
cutoff = 5
# Check that method can run without the skip_set option
r = m.evaluate_precision_recall(test, cutoffs=[5, 10])
assert r is not None
assert type(r) == dict
# Convert to DataFrame for the tests below
r = m.evaluate_precision_recall(test, cutoffs=[cutoff],
skip_set=train)
assert r is not None
assert type(r) == dict
# Test out of order columns
r = m.evaluate_precision_recall(test[[self.item_id, self.user_id]], cutoffs=[cutoff],
skip_set=train)
assert r is not None
assert type(r) == dict
recs = m.recommend(k=cutoff).to_dataframe()
results = r['precision_recall_by_user']
assert results.column_names() == [self.user_id, 'cutoff', 'precision', 'recall', 'count']
for user in users:
# Get observed values for this user
actual = list(test[self.test[self.user_id] == user][self.item_id])
# Get predictions
predicted = list(recs[recs[self.user_id] == user][self.item_id])
if len(predicted) > 0:
# Get answers from C++
p = results['precision'][results[self.user_id] == user][0]
r = results['recall'][results[self.user_id] == user][0]
p2 = self.precision(actual, predicted, cutoff)
r2 = self.recall(actual, predicted, cutoff)
# Compare with answers using Python
assert abs(p - p2) < DELTA
assert abs(r - r2) < DELTA
class SideDataTests(RecommenderTestBase):
def setUp(self):
self.sf = tc.SFrame({'userID': ["0", "0", "0", "1", "1", "2", "8", "10"],
'placeID': ["a", "b", "c", "a", "b", "b", "c", "d"],
'rating': [.2, .3, .4, .1, .3, .3, .5, 0.9]})
self.test_sf = tc.SFrame({'userID': ["0", "0", "0", "1", "1", "2", "2", "2"],
'placeID': ["a", "b", "c", "a", "b", "b", "c", "d"],
'rating': [.2, .3, .4, .1, .3, .3, .5, 0.9]})
self.user_side = tc.SFrame({'userID': ["0", "1", "20"],
'blahID': ["a", "b", "b"],
'blahREAL': [0.1, 12, 22],
'blahVECTOR': [array.array('d',[0,1]), array.array('d',[0,2]), array.array('d',[2,3])],
'blahDICT': [{'a' : 23}, {'a' : 13}, {'a' : 23, 'b' : 32}],
})
self.item_side = tc.SFrame({'placeID': ["a", "b", "f"],
'blahID2': ["e", "e", "3"],
'blahREAL2': [0.4, 12, 22],
'blahVECTOR2': [array.array('d', [0,1,2]), array.array('d',[0,2,3]), array.array('d', [2,3,3])],
'blahDICT2': [{'a' : 23}, {'b' : 13}, {'a' : 23, 'c' : 32}]})
self.user_id = 'userID'
self.item_id = 'placeID'
self.target = 'rating'
def test_bad_input(self):
try:
m = tc.recommender.create(self.sf, self.user_id, self.item_id, user_data='bad input')
except TypeError as e:
self.assertEqual(str(e), 'Provided user_data must be an SFrame.')
try:
m = tc.recommender.create(self.sf, self.user_id, self.item_id, item_data='bad input')
except TypeError as e:
self.assertEqual(str(e), 'Provided item_data must be an SFrame.')
def test_model_creation(self):
def check_model(m):
expected = ['num_users', 'num_items',
'num_user_side_features',
'num_item_side_features',
'user_side_data_column_names',
'user_side_data_column_types',
'item_side_data_column_names',
'item_side_data_column_types']
observed = m._list_fields()
for e in expected:
assert e in observed
try:
write_dir = tempfile.mkdtemp()
fn = join(write_dir, "tmp.gl")
for u_side in [None, self.user_side]:
for i_side in [None, self.item_side]:
for model_name in model_names:
if model_name == "item_content_recommender":
continue
'''
TODO:
Test CoreML export, when we have a dirarchiver
that doesn't depend on the filesystem
'''
m = self._get_trained_model(model_name,
self.sf,
user_id=self.user_id,
item_id=self.item_id,
target=self.target,
test_export_to_coreml=False,
user_data=u_side,
item_data=i_side)
m.save(fn)
m1 = tc.load_model(fn)
check_model(m)
check_model(m1)
finally:
shutil.rmtree(write_dir)
def test_recommender_create(self):
sf_w_target = self.sf
sf_no_target = self.sf[[self.user_id, self.item_id]]
sf_binary_target = self.sf
sf_binary_target[self.target] = 1
m = tc.recommender.create(sf_w_target,
self.user_id, self.item_id)
assert isinstance(m, ItemSimilarityRecommender)
self._test_coreml_export(m, ['a','b'])
m = tc.recommender.create(sf_no_target,
self.user_id, self.item_id)
assert isinstance(m, ItemSimilarityRecommender)
self._test_coreml_export(m, ['a','b'])
m = tc.recommender.create(sf_w_target,
self.user_id, self.item_id,
self.target)
assert isinstance(m, RankingFactorizationRecommender)
"""
TODO: test CoreML export, when we can support serializing user
data into CoreML model format.
self._test_coreml_export(m, ['a','b'], [.2,.3])
"""
m = tc.recommender.create(sf_w_target,
self.user_id, self.item_id,
self.target,
ranking=False)
assert isinstance(m, FactorizationRecommender)
"""
TODO: test CoreML export, when we can support serializing user
data into CoreML model format.
self._test_coreml_export(m, ['a','b'], [.2,.3])
"""
m = tc.recommender.create(sf_binary_target,
self.user_id, self.item_id,
self.target,
ranking=False)
assert isinstance(m, FactorizationRecommender)
"""
TODO: test CoreML export, when we can support serializing user
data into CoreML model format.
self._test_coreml_export(m, ['a','b'], [.2,.3])
"""
m = tc.recommender.create(sf_w_target,
self.user_id, self.item_id,
self.target,
ranking=False,
user_data=self.user_side)
assert isinstance(m, FactorizationRecommender)
"""
TODO: test CoreML export, when we can support serializing user
data into CoreML model format.
self._test_coreml_export(m, ['a','b'], [.2,.3])
"""
m = tc.recommender.create(sf_w_target,
self.user_id, self.item_id,
self.target,
user_data=self.user_side,
item_data=self.item_side)
assert isinstance(m, RankingFactorizationRecommender)
"""
TODO: test CoreML export, when we can support serializing user
data into CoreML model format.
self._test_coreml_export(m, ['a','b'], [.2,.3])
"""
m = tc.recommender.create(sf_no_target,
self.user_id, self.item_id,
ranking=False,
user_data=self.user_side,
item_data=self.item_side)
assert isinstance(m, RankingFactorizationRecommender)
"""
TODO: test CoreML export, when we can support serializing user
data into CoreML model format.
self._test_coreml_export(m, ['a','b'], [.2,.3])
"""
m = tc.recommender.create(sf_no_target,
self.user_id, self.item_id,
ranking=False)
assert isinstance(m, ItemSimilarityRecommender)
self._test_coreml_export(m, ['a','b'])
class FactorizationTests(RecommenderTestBase):
def setUp(self):
self.model_names = ['default',
'factorization_recommender',
'ranking_factorization_recommender']
self.df = tc.SFrame({'userID': ["0", "0", "0", "1", "1", "2", "2", "2"],
'placeID': ["a", "b", "c", "a", "b", "b", "c", "d"],
'rating': [.2, .3, .4, .1, .3, .3, .5, 0.9]})
self.test_df = tc.SFrame({'userID': ["0", "0", "0", "1", "1", "2", "2", "2"],
'placeID': ["a", "b", "c", "a", "b", "b", "c", "d"],
'rating': [.2, .3, .4, .1, .3, .3, .5, 0.9]})
self.user_side = tc.SFrame({'userID': ["0", "1", "2"],
'blahID': ["a", "b", "b"],
'blahREAL': [0.1, 12, 22],
'blahVECTOR': [array.array('d',[0,1]), array.array('d',[0,2]), array.array('d',[2,3])],
'blahDICT': [{'a' : 23}, {'a' : 13}, {'a' : 23, 'b' : 32}],
})
self.item_side = tc.SFrame({'placeID': ["a", "b", "d"],
'blahID2': ["e", "e", "3"],
'blahREAL2': [0.4, 12, 22],
'blahVECTOR2': [array.array('d',[0,1,2]), array.array('d',[0,2,3]), array.array('d',[2,3,3])],
'blahDICT2': [{'a' : 23}, {'b' : 13}, {'a' : 23, 'c' : 32, None : 12}]})
self.user_id = 'userID'
self.item_id = 'placeID'
self.target = 'rating'
self.models = []
for u_side in [None, self.user_side]:
for i_side in [None, self.item_side]:
for model_name in self.model_names:
m = self._get_trained_model(model_name,
self.df,
user_id = self.user_id,
item_id = self.item_id,
target = self.target,
test_export_to_coreml=False,
user_data = u_side,
item_data = i_side)
self.models.append((model_name, m))
def test_evaluate_with_side_data(self):
for u_side in [None, self.user_side]:
for i_side in [None, self.item_side]:
for (mname, m) in self.models:
e = m.evaluate(self.test_df,
new_user_data=u_side,
new_item_data=i_side,
verbose=False)
assert 'precision_recall_by_user' in e
recs = m.recommend(k = 1,
new_user_data=u_side,
new_item_data=i_side)
assert recs is not None
assert recs.num_rows() == len(self.df[self.user_id].unique())
def test_data_summary_fields(self):
for (model_name, m) in self.models:
expected = ['num_users', 'num_items',
'num_user_side_features',
'num_item_side_features',
'observation_data_column_names',
'user_side_data_column_names',
'user_side_data_column_types',
'item_side_data_column_names',
'item_side_data_column_types']
observed = m._list_fields()
for e in expected:
assert e in observed
def test_matrix_factorization_values(self):
test_vars = | |
<filename>server.py
import os
from flask import Flask, flash, session, request, send_file, render_template, redirect, url_for, jsonify
from flask_socketio import SocketIO, send, join_room, leave_room, emit
from flask_cors import cross_origin
from datetime import datetime
from db import DBConn, SQL
from pathlib import Path
from werkzeug.utils import secure_filename
from pywebpush import webpush, WebPushException
from json import dumps, loads
from VAPID.push_key import pushKeys
UPLOAD_FILE_FOLDER = os.getcwd() + '/static/files'
ALLOWED_FILE_TYPE = ['pdf', 'doc', 'docx', 'ppt', 'pptx']
UPLOAD_ICON_FOLDER = os.getcwd() + '/static/images/user_icons'
app = Flask(__name__)
app.config['SECRET_KEY'] = 'SecretKeyHERE!'
app.config['UPLOAD_ICON_FOLDER'] = UPLOAD_ICON_FOLDER
app.config['UPLOAD_FILE_FOLDER'] = UPLOAD_FILE_FOLDER
socketio = SocketIO(app)
db = DBConn()
def send_web_push(subscription_information, message_body):
return webpush(
subscription_info = subscription_information,
data = dumps(message_body),
vapid_private_key = pushKeys['privateKey'],
vapid_claims = pushKeys['claim']
)
def replace_string(string):
return string.replace('\\','\\\\').replace('\'','\\\'').replace('\"','\\\"')
@app.route('/sw')
def sw():
return send_file(os.path.dirname(os.path.realpath(__file__))+"/static/sw.js")
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def home(path):
if session.get('user'):
return send_file(os.path.dirname(os.path.realpath(__file__))+"/static/index.html")
else:
return redirect(url_for('login'))
@app.route('/login')
def login():
return render_template("login.html")
@app.route('/login_process', methods=['POST'])
def login_process():
user_id = request.form['user_id']
password = request.form['password']
data = db.exe_fetch(SQL['login_process'].format(user_id))
if data != None:
if data['password'] == password and data['user_type'] != 'admin':
print('Success')
session['user'] = user_id
else:
flash('Fail')
return redirect(url_for('login'))
else:
flash('Fail')
return redirect(url_for('login'))
return redirect(url_for('home'))
@app.route('/logout', methods=['POST'])
def logout():
if session.get('user') != None:
session.pop('user')
return jsonify({'logout': 'Success'})
return jsonify({'logout': 'Fail'})
#
# API Start
#
#
# User API
#
@app.route('/api/user_data', methods=['GET', 'PUT'])
def api_user_data():
if session.get('user') != None:
user = int(session.get('user'))
user_data = db.exe_fetch(SQL['user_data'].format(user))
if request.method == 'GET':
return jsonify({'user_data': user_data})
if request.method == 'PUT':
changeNickname = request.json.get('changeNickname')
changePassword = request.json.get('changePassword')
if changeNickname:
nickname = changeNickname['nickname']
print(nickname)
db.exe_commit(SQL['change_nickname'].format(nickname, user))
return jsonify({'changeNickname': 'Success'})
elif changePassword:
password_now = changePassword['password_now']
password_new = changePassword['password_new']
password = db.exe_fetch(SQL['login_process'].format(user)).get('password')
print(password, password_now, password_new)
if password_now == password:
db.exe_commit(SQL['change_password'].format(password_new, user))
return jsonify({'changePassword': 'Success'})
return jsonify({'changePassword': 'Error'})
return jsonify({'result': 'Error'})
@app.route('/api/user_info', methods=['GET', 'POST', 'DELETE'])
def api_setting():
if session.get('user') != None:
user = session.get('user')
target = request.args.get('target')
if request.method == 'GET':
if target == None:
user_info = db.exe_fetch(SQL['user_info'].format(user))
if user_info['course_avg']:
user_info['course_avg'] = str(user_info['course_avg'])
return jsonify({'user_info': user_info})
else:
target_info = db.exe_fetch(SQL['target_info'].format(target, user))
if target_info['course_avg']:
target_info['course_avg'] = str(target_info['course_avg'])
return jsonify({'target_info': target_info})
elif request.method == 'POST':
file = request.files['icon']
if file:
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_ICON_FOLDER'], str(user)+'.png'))
return jsonify({'changeIcon': 'Success'})
elif request.method == 'DELETE':
if Path(UPLOAD_ICON_FOLDER+'/'+str(user)+'.png').exists():
os.remove(UPLOAD_ICON_FOLDER+'/'+str(user)+'.png')
return jsonify({'deleteIcon': 'Success'})
return jsonify({'result': 'Error'})
@app.route('/api/user_follow', methods=['POST'])
def api_user_follow():
if session.get('user') != None:
user = session.get('user')
target = request.args.get('target')
if request.method == 'POST' and target != None:
following = db.exe_fetch(SQL['following'].format(user, target))
now = datetime.now().strftime('%Y/%m/%d %H:%M:%S')
if following:
db.exe_commit(SQL['unfollow'].format(user, target))
return jsonify({ 'user_follow': "Success" })
else:
db.exe_commit(SQL['follow'].format(user, target, now))
return jsonify({ 'user_unfollow': "Success" })
return jsonify({'result': 'Error'})
#
# Questions API
#
@app.route('/api/questions', methods=['GET', 'POST'])
def api_questions():
if session.get('user') != None:
if request.method == 'GET':
new = db.exe_fetch(SQL['questions_new'], 'all')
hot = db.exe_fetch(SQL['questions_hot'], 'all')
return jsonify({ 'questions': { 'new': new, 'hot': hot } })
elif request.method == 'POST':
search_query = request.json.get('search_query')
search_result = db.exe_fetch(SQL['search_questions'].format(search_query), 'all')
return jsonify({ 'search_result': search_result })
return jsonify({'result': 'Error'})
@app.route('/api/question', methods = ['GET','POST','PATCH'])
def api_question():
if session.get('user') != None:
user = session.get('user')
question = request.args.get('q')
if request.method == 'GET':
if question != None:
data = db.exe_fetch(SQL['question'].format(question))
return jsonify({'question': data})
elif request.method == 'POST':
data = request.json.get('create_question')
title = replace_string(data.get('title'))
content = replace_string(data.get('content'))
now = datetime.now().strftime('%Y/%m/%d %H:%M:%S')
db.exe_commit(SQL['create_question'].format(user, title, content, now))
return jsonify({'result': 'Success'})
elif request.method == 'PATCH':
if question != None:
question_create_by = db.exe_fetch(SQL['question'].format(question)).get('create_by')
if int(user) == int(question_create_by):
db.exe_commit(SQL['question_valid'].format('false' ,question))
return jsonify({'question_valid': {
'result': 'Success',
'valid': 'false'
}})
return jsonify({'result': 'Error'})
@app.route('/api/solve_question', methods=['PATCH'])
def api_solve_question():
if session.get('user') != None:
user = session.get('user')
data = request.json.get('solve_question')
question = data.get('question')
solved_by = data.get('solved_by')
if solved_by == 0:
solved_by = 'null'
db.exe_commit(SQL['solve_question'].format(solved_by, question))
return jsonify({'solve_question': 'Success'})
return jsonify({'result': 'Error'})
@app.route('/api/my_questions')
def api_my_questions():
if session.get('user') != None:
user = session.get('user')
question = request.args.get('q')
if question != None:
myQuestion = False
question_create_by = db.exe_fetch(SQL['question'].format(question)).get('create_by')
if int(user) == int(question_create_by):
myQuestion = True
return jsonify({'myQuestion': myQuestion})
else:
my_questions = db.exe_fetch(SQL['my_questions'].format(user), 'all')
return jsonify({'my_questions': my_questions})
return jsonify({'result': 'Error'})
@app.route('/api/question_collection', methods = ['GET', 'POST', 'DELETE'])
def api_question_collection():
if session.get('user') != None:
user = session.get('user')
question = request.args.get('q')
if request.method == 'GET':
if question != None:
isCollection = False
record = db.exe_fetch(SQL['is_collection'].format(user, question, 'question_collection', 'question'))
if record:
isCollection = True
return jsonify({'isCollection': isCollection})
else:
collection = db.exe_fetch(SQL['question_collection'].format(user), 'all')
return jsonify({'question_collection': collection})
elif request.method == 'POST':
if question != None:
db.exe_commit(SQL['add_to_collection'].format(user, question, 'question_collection', 'question'))
return jsonify({'add_to_collection': 'Success'})
elif request.method == 'DELETE':
if question != None:
db.exe_commit(SQL['delete_from_collection'].format(user, question, 'question_collection', 'question'))
return jsonify({'delete_from_collection': 'Success'})
return jsonify({'result': 'Error'})
#
# Answer API
#
@app.route('/api/check_can_answer/<path:id>')
def api_check_can_answer(id):
canAnswer = True
if session.get('user') != None:
user = session.get('user')
data = db.exe_fetch(SQL['check_can_answer'].format(id, user))
question_create_by = db.exe_fetch(SQL['question'].format(id)).get('create_by')
if data or int(user) == int(question_create_by):
canAnswer = False
return jsonify({'canAnswer': canAnswer})
@app.route('/api/answers/<path:id>')
def api_anwsers_byQuestion_ID(id):
if session.get('user') != None:
user = session.get('user')
answers = db.exe_fetch(SQL['answer_byQuestionId'].format(id, user), 'all')
return jsonify({'answers': answers})
return jsonify({'result': 'Error'})
@app.route('/api/answer_likes', methods=['POST', 'DELETE'])
def api_answer_likes():
user = session.get('user')
if user != None:
question = request.args.get('q')
create_by = request.args.get('c')
if question != None and create_by != None:
if request.method == 'POST':
db.exe_commit(SQL['like_answer'].format(question, create_by, user))
return jsonify({'like_answer': 'Success'})
elif request.method == 'DELETE':
db.exe_commit(SQL['unlike_answer'].format(question, create_by, user))
return jsonify({'unlike_answer': 'Success'})
return jsonify({'result': 'Error'})
@app.route('/api/submit_answer', methods=['POST', 'PATCH', 'DELETE'])
def api_submit_answer():
if session.get('user') != None:
user = session.get('user')
if request.method == 'POST':
data = request.json.get('submit_answer')
question = data.get('question')
answer = replace_string(data.get('answer'))
now = datetime.now().strftime('%Y/%m/%d %H:%M:%S')
db.exe_commit(SQL['submit_answer'].format(question, user, answer, now))
return jsonify({'submit_answer': 'Success'})
elif request.method == 'PATCH':
data = request.json.get('submit_edited_answer')
question = data.get('question')
edited_answer = replace_string(data.get('edited_answer'))
db.exe_commit(SQL['submit_edited_answer'].format(question, user, edited_answer))
return jsonify({'submit_edited_answer': 'Success'})
elif request.method == 'DELETE':
data = request.json.get('delete_answer')
question = data.get('question')
db.exe_commit(SQL['delete_answer'].format(question, user))
return jsonify({'delete_answer': 'Success'})
return jsonify({'result': 'Error'})
#
# Courses API
#
@app.route('/api/courses', methods=['GET', 'POST'])
def api_courses():
if session.get('user') != None:
user = session.get('user')
if request.method == 'GET':
new = db.exe_fetch(SQL['courses_new'], 'all')
hot = db.exe_fetch(SQL['courses_hot'], 'all')
tags = db.exe_fetch(SQL['recommand_tags'].format(user), 'all')
recommand = []
for tag in tags:
if tag.get('tag') != None:
course = db.exe_fetch(SQL['top_in_tag'].format(replace_string(tag.get('tag'))))
if course != None and course not in recommand:
recommand.append(course)
for i in hot:
if len(recommand) < 5:
if i not in recommand:
recommand.append(i)
else:
break
return jsonify({'courses': { 'new': new, 'hot': hot, 'recommand': recommand } })
elif request.method == 'POST':
search_query = request.json.get('search_query')
search_method = request.json.get('search_method')
if search_method == 'title':
search_result = db.exe_fetch(SQL['search_courses_by_title'].format(replace_string(search_query)), 'all')
elif search_method == 'author':
search_result = db.exe_fetch(SQL['search_courses_by_author'].format(replace_string(search_query)), 'all')
elif search_method == 'tags':
tags = search_query.split(' ')
case = ''
for tag in tags:
t = replace_string(tag)
if case == '':
case += '''(CASE WHEN LOWER(tag) = LOWER('{0}') THEN 1 ELSE 0 END)'''.format(t)
else:
case += ''' + (CASE WHEN LOWER(tag) = LOWER('{0}') THEN 1 ELSE 0 END)'''.format(t)
db.exe_commit(SQL['search_history'].format(user, t))
search_result = db.exe_fetch(SQL['search_courses_by_tags'].format(case), 'all')
for i in search_result:
try:
i['relevant'] = float(i['relevant'])
except:
pass
return jsonify({ 'search_result': search_result })
return jsonify({'result': 'Error'})
@app.route('/api/course', methods=['GET', 'POST', 'PUT', 'PATCH'])
def api_course():
if session.get('user') != None:
user = session.get('user')
course = request.args.get('c')
if request.method == 'GET':
if course != None:
data = db.exe_fetch(SQL['course'].format(course))
tags = db.exe_fetch(SQL['course_tags'].format(course), 'all')
data['tags'] = [d['tag'] for d in tags]
try:
data['avg_rate'] = float(data.get('avg_rate'))
except:
pass
return jsonify({'course': data})
elif request.method == 'POST':
data = request.json.get('create_course')
title = replace_string(data.get('title'))
description = replace_string(data.get('description'))
tags = data.get('tags')
now = datetime.now().strftime('%Y/%m/%d %H:%M:%S')
last_id = db.exe_commit_last_id(SQL['create_course'].format(user, title, description, now)).get('last_id')
for tag in tags:
if tag != '' and tag != None:
try:
db.exe_commit(SQL['create_course_tags'].format(last_id, replace_string(tag)))
except:
pass
user_keys = db.exe_fetch("SELECT * FROM user_keys", 'all')
for i in user_keys:
following = db.exe_fetch("SELECT b.nickname FROM user_following a, users b WHERE a.following = b.user_id AND a.user_id = {0} AND a.following = {1}".format(i['user_id'], user))
if following:
try:
send_web_push(loads(i['user_key']), {'notice': 'Have a new course!', 'title': title, 'name': following['nickname'], 'action': 'create a course: '})
except:
print('webpush error')
return jsonify({'create_course': 'Success'})
elif request.method == 'PUT':
data = request.json.get('submit_edited_course')
course = data.get('course')
edited_title = replace_string(data.get('edited_title'))
edited_description = replace_string(data.get('edited_description'))
edited_tags = data.get('edited_tags')
db.exe_commit(SQL['submit_edited_course'].format(course, user, edited_title, edited_description))
db.exe_commit(SQL['reset_course_tags'].format(course))
for tag in edited_tags:
if tag != '' and tag != None:
try:
db.exe_commit(SQL['create_course_tags'].format(course, replace_string(tag)))
except:
pass
return jsonify({'edit_course': 'Success'})
elif request.method == 'PATCH':
if course != None:
author = db.exe_fetch(SQL['course'].format(course)).get('author')
if int(user) == int(author):
db.exe_commit(SQL['course_valid'].format('false', course))
return jsonify({'course_valid': {
'result': 'Success',
'valid': 'false'
}})
return jsonify({'result': 'Error'})
@app.route('/api/my_courses')
def api_my_courses():
if session.get('user') != None:
user = session.get('user')
course = request.args.get('c')
if course != None:
myCourse = False
author = db.exe_fetch(SQL['course'].format(course)).get('author')
if int(user) == int(author):
myCourse = True
return jsonify({'myCourse': myCourse})
else:
my_courses = db.exe_fetch(SQL['my_courses'].format(user), 'all')
return jsonify({'my_courses': my_courses})
return jsonify({'result': 'Error'})
@app.route('/api/course_collection', methods = ['GET', 'POST', 'DELETE'])
def api_course_collection():
if session.get('user') != None:
user = session.get('user')
course = request.args.get('c')
if request.method == 'GET':
if course != None:
isCollection = False
| |
<gh_stars>10-100
# MIT License
# Copyright (c) 2022 Muhammed
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Telegram Link : https://telegram.dog/Mo_Tech_Group
# Repo Link : https://github.com/PR0FESS0R-99/LuciferMoringstar-Robot
# License Link : https://github.com/PR0FESS0R-99/LuciferMoringstar-Robot/blob/LuciferMoringstar-Robot/LICENSE
import asyncio
from pyrogram import Client as lucifermoringstar_robot, enums
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton, CallbackQuery
from pyrogram.errors import UserIsBlocked, PeerIdInvalid, UserNotParticipant, MessageNotModified
from LuciferMoringstar_Robot import temp, CUSTOM_FILE_CAPTION, AUTH_CHANNEL, SUPPORT, CREATOR_NAME, CREATOR_USERNAME, SAVE_FILES, GET_FILECHANNEL, ADMINS, START_MESSAGE
from LuciferMoringstar_Robot.functions import get_size, get_settings, save_group_settings, is_subscribed
from LuciferMoringstar_Robot.modules import autofilter_text, connection_text, spellcheck_text, welcome_text, misc_text, filecaption_text
from LuciferMoringstar_Robot.translation import HELP_MESSAGE, ABOUT_MESSAGE, STATUS_MESSAGE, GETFILE_TEXT, USAGE_MESSAGE, NOT_SUB
from database.connections_mdb import active_connection, all_connections, delete_connection, if_active, make_active, make_inactive
from database.autofilter_mdb import Media, get_file_details
from database.chats_users_mdb import db
@lucifermoringstar_robot.on_callback_query()
async def cb_handler(bot, update):
try:
userID = update.message.reply_to_message.from_user.id
except:
userID = update.from_user.id
if userID == update.from_user.id:
if update.data == "close":
await update.message.delete()
elif update.data.startswith("nextgroup"):
mrk, index, keyword = update.data.split("_")
try:
data = temp.BUTTONS[keyword]
except KeyError:
await update.answer("𝚃𝙷𝙸𝚂 𝙼𝚈 𝙾𝙻𝙳 𝙼𝙴𝚂𝚂𝙰𝙶𝙴 𝚂𝙾 𝙿𝙻𝙴𝙰𝚂𝙴 𝚁𝙴𝚀𝚄𝙴𝚂𝚃 𝙰𝙶𝙰𝙸𝙽 🙏",show_alert=True)
return
if int(index) == int(data["total"]) - 2:
buttons = data['buttons'][int(index)+1].copy()
buttons.append(
[InlineKeyboardButton("🔙", callback_data=f"backgroup_{int(index)+1}_{keyword}"),
InlineKeyboardButton(f"📃 {int(index)+2}/{data['total']}", callback_data="pages"),
InlineKeyboardButton("🗑️", callback_data="close")]
)
buttons.append(
[InlineKeyboardButton(text="🤖 𝙲𝙷𝙴𝙲𝙺 𝙼𝚈 𝙿𝙼 🤖", url=f"https://telegram.dog/{temp.Bot_Username}")]
)
await update.edit_message_reply_markup(
reply_markup=InlineKeyboardMarkup(buttons)
)
return
else:
buttons = data['buttons'][int(index)+1].copy()
buttons.append(
[InlineKeyboardButton("🔙", callback_data=f"backgroup_{int(index)+1}_{keyword}"),
InlineKeyboardButton(f"📃 {int(index)+2}/{data['total']}", callback_data="pages"),
InlineKeyboardButton("🗑️", callback_data="close"),
InlineKeyboardButton("➡", callback_data=f"nextgroup_{int(index)+1}_{keyword}")]
)
buttons.append(
[InlineKeyboardButton(text="🤖 𝙲𝙷𝙴𝙲𝙺 𝙼𝚈 𝙿𝙼 🤖", url=f"https://telegram.dog/{temp.Bot_Username}")]
)
await update.edit_message_reply_markup(reply_markup=InlineKeyboardMarkup(buttons))
return
elif update.data.startswith("backgroup"):
mrk, index, keyword = update.data.split("_")
try:
data = temp.BUTTONS[keyword]
except KeyError:
await update.answer("𝚃𝙷𝙸𝚂 𝙼𝚈 𝙾𝙻𝙳 𝙼𝙴𝚂𝚂𝙰𝙶𝙴 𝚂𝙾 𝙿𝙻𝙴𝙰𝚂𝙴 𝚁𝙴𝚀𝚄𝙴𝚂𝚃 𝙰𝙶𝙰𝙸𝙽 🙏",show_alert=True)
return
if int(index) == 1:
buttons = data['buttons'][int(index)-1].copy()
buttons.append(
[InlineKeyboardButton(f"📃 {int(index)}/{data['total']}", callback_data="pages"),
InlineKeyboardButton("🗑️", callback_data="close"),
InlineKeyboardButton("➡", callback_data=f"nextgroup_{int(index)-1}_{keyword}")]
)
buttons.append(
[InlineKeyboardButton(text="🤖 𝙲𝙷𝙴𝙲𝙺 𝙼𝚈 𝙿𝙼 🤖", url=f"https://telegram.dog/{temp.Bot_Username}")]
)
await update.edit_message_reply_markup(reply_markup=InlineKeyboardMarkup(buttons))
return
else:
buttons = data['buttons'][int(index)-1].copy()
buttons.append(
[InlineKeyboardButton("🔙", callback_data=f"backgroup_{int(index)-1}_{keyword}"),
InlineKeyboardButton(f"📃 {int(index)}/{data['total']}", callback_data="pages"),
InlineKeyboardButton("🗑️", callback_data="close"),
InlineKeyboardButton("➡", callback_data=f"nextgroup_{int(index)-1}_{keyword}")]
)
buttons.append(
[InlineKeyboardButton(text="🤖 𝙲𝙷𝙴𝙲𝙺 𝙼𝚈 𝙿𝙼 🤖", url=f"https://telegram.dog/{temp.Bot_Username}")]
)
await update.edit_message_reply_markup(reply_markup=InlineKeyboardMarkup(buttons))
return
elif update.data.startswith("nextbot"):
mrk, index, keyword = update.data.split("_")
try:
data = temp.BUTTONS[keyword]
except KeyError:
await update.answer("𝚃𝙷𝙸𝚂 𝙼𝚈 𝙾𝙻𝙳 𝙼𝙴𝚂𝚂𝙰𝙶𝙴 𝚂𝙾 𝙿𝙻𝙴𝙰𝚂𝙴 𝚁𝙴𝚀𝚄𝙴𝚂𝚃 𝙰𝙶𝙰𝙸𝙽 🙏",show_alert=True)
return
if int(index) == int(data["total"]) - 2:
buttons = data['buttons'][int(index)+1].copy()
buttons.append(
[InlineKeyboardButton("🔙", callback_data=f"backbot_{int(index)+1}_{keyword}"),
InlineKeyboardButton(f"📃 {int(index)+2}/{data['total']}", callback_data="pages"),
InlineKeyboardButton("🗑️", callback_data="close")]
)
await update.edit_message_reply_markup(reply_markup=InlineKeyboardMarkup(buttons))
return
else:
buttons = data['buttons'][int(index)+1].copy()
buttons.append(
[InlineKeyboardButton("🔙", callback_data=f"backbot_{int(index)+1}_{keyword}"),
InlineKeyboardButton(f"📃 {int(index)+2}/{data['total']}", callback_data="pages"),
InlineKeyboardButton("🗑️", callback_data="close"),
InlineKeyboardButton("➡", callback_data=f"nextbot_{int(index)+1}_{keyword}")]
)
await update.edit_message_reply_markup(reply_markup=InlineKeyboardMarkup(buttons))
return
elif update.data.startswith("backbot"):
mrk, index, keyword = update.data.split("_")
try:
data = temp.BUTTONS[keyword]
except KeyError:
await update.answer("𝚃𝙷𝙸𝚂 𝙼𝚈 𝙾𝙻𝙳 𝙼𝙴𝚂𝚂𝙰𝙶𝙴 𝚂𝙾 𝙿𝙻𝙴𝙰𝚂𝙴 𝚁𝙴𝚀𝚄𝙴𝚂𝚃 𝙰𝙶𝙰𝙸𝙽 🙏",show_alert=True)
return
if int(index) == 1:
buttons = data['buttons'][int(index)-1].copy()
buttons.append(
[InlineKeyboardButton(f"📃 {int(index)}/{data['total']}", callback_data="pages"),
InlineKeyboardButton("🗑️", callback_data="close"),
InlineKeyboardButton("➡", callback_data=f"nextbot_{int(index)-1}_{keyword}")]
)
await update.edit_message_reply_markup(reply_markup=InlineKeyboardMarkup(buttons))
return
else:
buttons = data['buttons'][int(index)-1].copy()
buttons.append(
[InlineKeyboardButton("🔙", callback_data=f"backbot_{int(index)-1}_{keyword}"),
InlineKeyboardButton(f"📃 {int(index)}/{data['total']}", callback_data="pages"),
InlineKeyboardButton("🗑️", callback_data="close"),
InlineKeyboardButton("➡", callback_data=f"nextbot_{int(index)-1}_{keyword}")]
)
await update.edit_message_reply_markup(reply_markup=InlineKeyboardMarkup(buttons))
return
elif update.data.startswith("settings"):
mrk, set_type, status, grp_id = update.data.split("#")
grpid = await active_connection(str(update.from_user.id))
if str(grp_id) != str(grpid):
await update.message.edit("𝙸𝙰𝙼 𝙽𝙾𝚃 𝙲𝙾𝙽𝙽𝙴𝙲𝚃𝙴𝙳 𝙰𝙽𝚈 𝙶𝚁𝙾𝚄𝙿..!\n 𝚄𝚂𝙴 𝚃𝙷𝙸𝚂 𝙲𝙾𝙼𝙼𝙰𝙽𝙳 /connect 𝙰𝙽𝙳 𝙲𝙾𝙽𝙽𝙴𝙲𝚃 𝚈𝙾𝚄𝚁 𝙲𝙷𝙰𝚃")
if status == "True":
await save_group_settings(grpid, set_type, False)
else:
await save_group_settings(grpid, set_type, True)
settings = await get_settings(grpid)
if settings is not None:
pr0fess0r_99 = [[
InlineKeyboardButton('𝙵𝙸𝙻𝚃𝙴𝚁 𝙱𝚄𝚃𝚃𝙾𝙽', callback_data=f'settings#button#{settings["button"]}#{str(grp_id)}'),
InlineKeyboardButton('𝚂𝙸𝙽𝙶𝙻𝙴' if settings["button"] else '𝙳𝙾𝚄𝙱𝙻𝙴', callback_data=f'settings#button#{settings["button"]}#{str(grp_id)}')
],[
InlineKeyboardButton('𝚆𝙴𝙻𝙲𝙾𝙼𝙴 𝙼𝚂𝙶', callback_data=f'settings#welcome#{settings["welcome"]}#{str(grp_id)}'),
InlineKeyboardButton('𝙾𝙽' if settings["welcome"] else '𝙾𝙵𝙵', callback_data=f'settings#welcome#{settings["welcome"]}#{str(grp_id)}')
],[
InlineKeyboardButton('𝚂𝙿𝙴𝙻𝙻 𝙲𝙷𝙴𝙲𝙺', callback_data=f'settings#spellmode#{settings["spellmode"]}#{str(grp_id)}'),
InlineKeyboardButton('𝙾𝙽' if settings["spellmode"] else '𝙾𝙵𝙵', callback_data=f'settings#spellmode#{settings["spellmode"]}#{str(grp_id)}')
],[
InlineKeyboardButton('𝙱𝙾𝚃 𝙿𝙾𝚂𝚃𝙴𝚁', callback_data=f'settings#photo#{settings["photo"]}#{str(grp_id)}'),
InlineKeyboardButton('𝙾𝙽' if settings["photo"] else '𝙾𝙵𝙵', callback_data=f'settings#photo#{settings["photo"]}#{str(grp_id)}')
],[
InlineKeyboardButton('𝚂𝙰𝚅𝙴 𝙵𝙸𝙻𝙴𝚂', callback_data=f'settings#savefiles#{settings["savefiles"]}#{str(grp_id)}'),
InlineKeyboardButton('𝙾𝙽' if settings["savefiles"] else '𝙾𝙵𝙵', callback_data=f'settings#savefiles#{settings["savefiles"]}#{str(grp_id)}')
],[
InlineKeyboardButton('𝙵𝙸𝙻𝙴 𝙼𝙾𝙳𝙴', callback_data=f'settings#filemode#{settings["filemode"]}#{str(grp_id)}'),
InlineKeyboardButton('𝙿𝙼' if settings["filemode"] else '𝙲𝙷𝙰𝙽𝙽𝙴𝙻', callback_data=f'settings#filemode#{settings["filemode"]}#{str(grp_id)}')
]]
pr0fess0r_99 = InlineKeyboardMarkup(pr0fess0r_99)
await update.message.edit_reply_markup(reply_markup=pr0fess0r_99)
elif update.data.startswith("luciferGP"):
mrk, file_id = update.data.split("#")
file_details_pr0fess0r99 = await get_file_details(file_id)
settings = await get_settings(update.message.chat.id)
if not file_details_pr0fess0r99:
return await update.answer('𝙵𝙸𝙻𝙴 𝙽𝙾𝚃 𝙵𝙾𝚄𝙽𝙳...!')
files = file_details_pr0fess0r99[0]
title = files.file_name
size = get_size(files.file_size)
if not await db.is_user_exist(update.from_user.id):
dellogs=await update.message.reply_text(f"""<b>𝙷𝙴𝚈 {update.from_user.mention} 𝚈𝙾𝚄𝚁 𝚁𝙴𝚀𝚄𝙴𝚂𝚃 𝙵𝙸𝙻𝙴 𝙸𝚂 𝚁𝙴𝙰𝙳𝚈<b>\n\n• **𝚃𝙸𝚃𝙻𝙴** : <code>{title}</code>\n\n• **𝚂𝙸𝚉𝙴** : {size} """, reply_markup=InlineKeyboardMarkup( [[ InlineKeyboardButton("𝙲𝙻𝙸𝙲𝙺 𝙷𝙴𝚁𝙴", url=f"https://telegram.dog/{temp.Bot_Username}?start=muhammedrk-mo-tech-group-{file_id}") ]] ))
await asyncio.sleep(30)
await dellogs.delete()
return
if AUTH_CHANNEL and not await is_subscribed(bot, update):
dellogs=await update.message.reply_text(f"""<b>𝙷𝙴𝚈 {update.from_user.mention} 𝚈𝙾𝚄𝚁 𝚁𝙴𝚀𝚄𝙴𝚂𝚃 𝙵𝙸𝙻𝙴 𝙸𝚂 𝚁𝙴𝙰𝙳𝚈<b>\n\n• **𝚃𝙸𝚃𝙻𝙴** : <code>{title}</code>\n\n• **𝚂𝙸𝚉𝙴** : {size} """, reply_markup=InlineKeyboardMarkup( [[ InlineKeyboardButton("𝙲𝙻𝙸𝙲𝙺 𝙷𝙴𝚁𝙴", url=f"https://telegram.dog/{temp.Bot_Username}?start=muhammedrk-mo-tech-group-{file_id}") ]] ))
await asyncio.sleep(30)
await dellogs.delete()
return
FILE_CAPTION = settings["caption"]
caption = FILE_CAPTION.format(mention=update.from_user.mention, file_name=title, size=size, caption=files.caption)
buttons = [[ InlineKeyboardButton("⚜️ 𝚂𝙷𝙰𝚁𝙴 𝙼𝙴 𝚆𝙸𝚃𝙷 𝚈𝙾𝚄𝚁 𝙵𝚁𝙸𝙴𝙽𝙳𝚂 ⚜️", url=f"https://t.me/share/url?url=Best%20AutoFilter%20Bot%20%0A%40LuciferMoringstar_Robot%0A@{temp.Bot_Username}") ]]
if settings["savefiles"]:
protect_content = True
else:
protect_content = False
try:
if settings["filemode"]:
try:
await bot.send_cached_media(chat_id=update.from_user.id, file_id=file_id, caption=caption, reply_markup=InlineKeyboardMarkup(buttons), protect_content=protect_content)
await update.answer("""𝙲𝙷𝙴𝙲𝙺 𝙿𝙼, 𝙸 𝙷𝙰𝚅𝙴 𝚂𝙴𝙽𝚃 𝙵𝙸𝙻𝙴𝚂 𝙸𝙽 𝙿𝙼\n𝙲𝙻𝙸𝙲𝙺 𝙲𝙷𝙴𝙲𝙺 𝙿𝙼 𝙱𝚄𝚃𝚃𝙾𝙽""", show_alert=True)
except Exception as e:
await update.message.reply(f"{e}")
dellogs=await update.message.reply_text(f"""<b>𝙷𝙴𝚈 {update.from_user.mention} 𝚈𝙾𝚄𝚁 𝚁𝙴𝚀𝚄𝙴𝚂𝚃 𝙵𝙸𝙻𝙴 𝙸𝚂 𝚁𝙴𝙰𝙳𝚈<b>\n\n• **𝚃𝙸𝚃𝙻𝙴** : <code>{title}</code>\n\n• **𝚂𝙸𝚉𝙴** : {size} """, reply_markup=InlineKeyboardMarkup( [[ InlineKeyboardButton("𝙲𝙻𝙸𝙲𝙺 𝙷𝙴𝚁𝙴", url=f"https://telegram.dog/{temp.Bot_Username}?start=muhammedrk-mo-tech-group-{file_id}") ]] ))
await asyncio.sleep(30)
await dellogs.delete()
else:
try:
invite_link = await bot.create_chat_invite_link(GET_FILECHANNEL)
dlFile = await bot.send_cached_media(chat_id=GET_FILECHANNEL, file_id=file_id, caption=caption, reply_markup=InlineKeyboardMarkup(buttons))
dlReply = await update.message.reply_text(GETFILE_TEXT.format(mention=update.from_user.mention, file_name=title, file_size=size), reply_markup=InlineKeyboardMarkup( [[ InlineKeyboardButton("📥 🅳︎🅾︎🆆︎🅽︎🅻︎🅾︎🅰︎🅳︎ 📥", url=dlFile.link) ],[ InlineKeyboardButton("⚠️𝙲𝙾𝙽'𝚃 𝙰𝙲𝙲𝙴𝚂𝚂 𝙲𝙻𝙸𝙲𝙺 𝙷𝙴𝚁𝙴⚠️", url=invite_link.invite_link) ]] ))
await asyncio.sleep(1000)
await dlFile.delete()
await dlReply.delete()
except Exception as e:
await update.message.reply(f"**(1)**» {e}")
dellogs=await update.message.reply_text(f"""<b>𝙷𝙴𝚈 {update.from_user.mention} 𝚈𝙾𝚄𝚁 𝚁𝙴𝚀𝚄𝙴𝚂𝚃 𝙵𝙸𝙻𝙴 𝙸𝚂 𝚁𝙴𝙰𝙳𝚈<b>\n\n• **𝚃𝙸𝚃𝙻𝙴** : <code>{title}</code>\n\n• **𝚂𝙸𝚉𝙴** : {size} """, reply_markup=InlineKeyboardMarkup( [[ InlineKeyboardButton("𝙲𝙻𝙸𝙲𝙺 𝙷𝙴𝚁𝙴", url=f"https://telegram.dog/{temp.Bot_Username}?start=muhammedrk-mo-tech-group-{file_id}") ]] ))
await asyncio.sleep(30)
await dellogs.delete()
except UserIsBlocked:
await update.answer('Unblock the bot mahn !', show_alert=True)
except PeerIdInvalid:
dellogs=await update.message.reply_text(f"""<b>𝙷𝙴𝚈 {update.from_user.mention} 𝚈𝙾𝚄𝚁 𝚁𝙴𝚀𝚄𝙴𝚂𝚃 𝙵𝙸𝙻𝙴 𝙸𝚂 𝚁𝙴𝙰𝙳𝚈<b>\n\n• **𝚃𝙸𝚃𝙻𝙴** : <code>{title}</code>\n\n• **𝚂𝙸𝚉𝙴** : {size} """, reply_markup=InlineKeyboardMarkup( [[ InlineKeyboardButton("𝙲𝙻𝙸𝙲𝙺 𝙷𝙴𝚁𝙴", url=f"https://telegram.dog/{temp.Bot_Username}?start=muhammedrk-mo-tech-group-{file_id}") ]] ))
await asyncio.sleep(30)
await dellogs.delete()
except Exception as e:
await update.message.reply(f"**(2)**» {e}")
dellogs=await update.message.reply_text(f"""<b>𝙷𝙴𝚈 {update.from_user.mention} 𝚈𝙾𝚄𝚁 𝚁𝙴𝚀𝚄𝙴𝚂𝚃 𝙵𝙸𝙻𝙴 𝙸𝚂 𝚁𝙴𝙰𝙳𝚈<b>\n\n• **𝚃𝙸𝚃𝙻𝙴** : <code>{title}</code>\n\n• **𝚂𝙸𝚉𝙴** : {size} """, reply_markup=InlineKeyboardMarkup( [[ InlineKeyboardButton("𝙲𝙻𝙸𝙲𝙺 𝙷𝙴𝚁𝙴", url=f"https://telegram.dog/{temp.Bot_Username}?start=muhammedrk-mo-tech-group-{file_id}") ]] ))
await asyncio.sleep(30)
await dellogs.delete()
elif update.data.startswith("luciferPM"):
mrk, file_id = update.data.split("#")
# if not await db.is_user_exist(update.from_user.id):
# dellogs=await update.message.reply_text(f"""<b>𝙷𝙴𝚈 {update.from_user.id} 𝚈𝙾𝚄𝚁 𝚁𝙴𝚀𝚄𝙴𝚂𝚃 𝙵𝙸𝙻𝙴 𝙸𝚂 𝚁𝙴𝙰𝙳𝚈<b>\n\n• **𝚃𝙸𝚃𝙻𝙴** : <code>{title}</code>\n\n• **𝚂𝙸𝚉𝙴** : {size} """, reply_markup=InlineKeyboardMarkup( [[ InlineKeyboardButton("𝙲𝙻𝙸𝙲𝙺 𝙷𝙴𝚁𝙴", url=f"https://telegram.dog/{temp.Bot_Username}?start=muhammedrk-mo-tech-group-{file_id}") ]] ))
# await asyncio.sleep(30)
# await dellogs.delete()
# return
if AUTH_CHANNEL and not await is_subscribed(bot, update):
await update.answer(NOT_SUB, show_alert=True)
# dellogs=await update.message.reply_text(f"""<b>𝙷𝙴𝚈 {update.from_user.id} 𝚈𝙾𝚄𝚁 𝚁𝙴𝚀𝚄𝙴𝚂𝚃 𝙵𝙸𝙻𝙴 𝙸𝚂 𝚁𝙴𝙰𝙳𝚈<b>\n\n• **𝚃𝙸𝚃𝙻𝙴** : <code>{title}</code>\n\n• **𝚂𝙸𝚉𝙴** : {size} """, reply_markup=InlineKeyboardMarkup( [[ InlineKeyboardButton("𝙲𝙻𝙸𝙲𝙺 𝙷𝙴𝚁𝙴", url=f"https://telegram.dog/{temp.Bot_Username}?start=muhammedrk-mo-tech-group-{file_id}") ]] ))
# await asyncio.sleep(30)
# await dellogs.delete()
return
file_details_pr0fess0r99 = await get_file_details(file_id)
if not file_details_pr0fess0r99:
return await update.answer('𝙵𝙸𝙻𝙴 𝙽𝙾𝚃 𝙵𝙾𝚄𝙽𝙳...!')
files = file_details_pr0fess0r99[0]
title = files.file_name
size = get_size(files.file_size)
caption = CUSTOM_FILE_CAPTION.format(mention=update.from_user.mention, file_name=title, size=size, caption=files.caption)
buttons = [[ InlineKeyboardButton("⚜️ 𝚂𝙷𝙰𝚁𝙴 𝙼𝙴 𝚆𝙸𝚃𝙷 𝚈𝙾𝚄𝚁 𝙵𝚁𝙸𝙴𝙽𝙳𝚂 ⚜️", url=f"https://t.me/share/url?url=Best%20AutoFilter%20Bot%20%0A%40LuciferMoringstar_Robot%0A@{temp.Bot_Username}") ]]
try:
await bot.send_cached_media(chat_id=update.from_user.id, file_id=file_id, caption=caption, reply_markup=InlineKeyboardMarkup(buttons), protect_content=SAVE_FILES)
except Exception as e:
print(f"{e}")
dellogs=await update.message.reply_text(f"""<b>𝙷𝙴𝚈 {update.from_user.id} 𝚈𝙾𝚄𝚁 𝚁𝙴𝚀𝚄𝙴𝚂𝚃 𝙵𝙸𝙻𝙴 𝙸𝚂 𝚁𝙴𝙰𝙳𝚈<b>\n\n• **𝚃𝙸𝚃𝙻𝙴** : <code>{title}</code>\n\n• **𝚂𝙸𝚉𝙴** : {size} """, reply_markup=InlineKeyboardMarkup( [[ InlineKeyboardButton("𝙲𝙻𝙸𝙲𝙺 𝙷𝙴𝚁𝙴", url=f"https://telegram.dog/{temp.Bot_Username}?start=muhammedrk-mo-tech-group-{file_id}") ]] ))
await asyncio.sleep(30)
await dellogs.delete()
return
elif update.data == "start":
buttons = [[ InlineKeyboardButton("× 𝙰𝙳𝙳 𝙼𝙴 𝚃𝙾 𝚈𝙾𝚄𝚁 𝙶𝚁𝙾𝚄𝙿 ×", url=f"http://t.me/{temp.Bot_Username}?startgroup=true") ],
[ InlineKeyboardButton("𝚂𝚄𝙿𝙿𝙾𝚁𝚃 💬", url=f"t.me/{SUPPORT}"), InlineKeyboardButton("𝚄𝙿𝙳𝙰𝚃𝙴𝚂 📢", url="t.me/Mo_Tech_YT") ],
[ InlineKeyboardButton("ℹ️ 𝙷𝙴𝙻𝙿", callback_data="help"), InlineKeyboardButton("𝙰𝙱𝙾𝚄𝚃 🤠", callback_data="about") ]]
await update.message.edit(START_MESSAGE.format(mention=update.from_user.mention, name=temp.Bot_Name, username=temp.Bot_Username), reply_markup=InlineKeyboardMarkup(buttons))
elif update.data == "help":
try:
buttons = [[ InlineKeyboardButton("𝙰𝚄𝚃𝙾𝙵𝙸𝙻𝚃𝙴𝚁𝚂", callback_data="autofilter"), InlineKeyboardButton("𝙲𝙾𝙽𝙽𝙴𝙲𝚃𝙸𝙾𝙽𝚂", callback_data="connection"), InlineKeyboardButton("𝙲𝙰𝙿𝚃𝙸𝙾𝙽", callback_data="filecaption") ],
[ InlineKeyboardButton("𝚆𝙴𝙻𝙲𝙾𝙼𝙴", callback_data="welcome"), InlineKeyboardButton("𝚂𝙿𝙴𝙻𝙻𝙲𝙷𝙴𝙲𝙺", callback_data="spellcheck"), InlineKeyboardButton("𝙼𝙸𝚂𝙲", callback_data="misc") ],
[ InlineKeyboardButton("𝚂𝚃𝙰𝚃𝚄𝚂", callback_data="status"), InlineKeyboardButton("𝙷𝙾𝙼𝙴", callback_data="start") ]]
await update.message.edit(HELP_MESSAGE.format(mention=update.from_user.mention, name=temp.Bot_Name, username=temp.Bot_Username), reply_markup=InlineKeyboardMarkup(buttons))
except MessageNotModified:
pass
elif update.data == "about":
try:
buttons = [[ InlineKeyboardButton("📦 𝚂𝙾𝚄𝚁𝙲𝙴 📦", url="https://github.com/PR0FESS0R-99/LuciferMoringstar-Robot")],
[ InlineKeyboardButton("𝙷𝙾𝙼𝙴", callback_data="start"), InlineKeyboardButton("𝙷𝙾𝚆 𝚃𝙾 𝚄𝚂𝙴", callback_data="usage"), InlineKeyboardButton("𝙲𝙻𝙾𝚂𝙴", callback_data="close") ]]
await | |
# Copyright 2004-2010 <NAME>.
# See license.txt for terms.
"""
x86inst.py
----------
This contains the core functionality to handle x86 instructions. We need to
build definition tables, so that instructions can be converted to/from
mneumonics durring assembly/disassembly phases.
At some point we'll need to deal with forward references and symbols, but
not today.
"""
import logging
import struct
from pickle import decode_long, encode_long
from x86tokenizer import (tokenizeInstDef,tokenizeInst,
REGISTER,OPCODE,COMMA,OPERAND,
LBRACKET, RBRACKET,NUMBER,SYMBOL)
class OpcodeTooShort(Exception):pass
class OpcodeNeedsModRM(Exception):pass # for /d info and SIB calculation
class x86instError(Exception):pass
opcodeFlags = ['/0','/1','/2','/3','/4','/5','/6','/7',
'/r',
'cb','cw','cd','cp',
'ib','iw','id',
'+rb','+rw','+rd',
'+i'
]
instModRM = ['r/m8','r/m16','r/m32','r8','r16','r32']
immediate = ['imm8','imm16','imm32']
displacement = ['rel8','rel16','rel32']
rb =['AL','CL','DL','BL','AH','CH','DH','BH']
rw = ['AX','CX','DX','BX','SP','BP','SI','DI']
rd = ['EAX','ECX','EDX','EBX','ESP','EBP','ESI','EDI']
regOpcode = {
'r8':['AL','CL','DL','BL','AH','CH','DH','BH'],
'r16':['AX','CX','DX','BX','SP','BP','SI','DI'],
'r32':['EAX','ECX','EDX','EBX','ESP','EBP','ESI','EDI'],
'mm':['MM0','MM1','MM2','MM3','MM4','MM5','MM6','MM7'],
'xmm':['XMM0','XMM1','XMM2','XMM3','XMM4','XMM5','XMM6','XMM7'],
'/digit':[0,1,2,3,4,5,6,7],
'REG':[0,1,2,3,4,5,6,7],
}
mode1 = ['[EAX]','[ECX]','[EDX]','[EBX]','[--][--]','disp32','[ESI]','[EDI]']
mode2 = ['[EAX+disp8]','[ECX+disp8]','[EDX+disp8]','[EBX+disp8]',
'[--][--]+disp8','[EBP+disp8]','[ESI+disp8]','[EDI+disp8]']
mode3 = ['[EAX+disp32]','[ECX+disp32]','[EDX+disp32]','[EBX+disp32]',
'[--][--]+disp32','[EBP+disp8]','[ESI+disp8]','[EDI+disp8]']
#
# These could be 16, but that doesn't make since for windows
# They are overridden with opcode prefixes
#
DefaultOperandSize = 32
class OpcodeDict(dict):
"""
Holds instructions by opcode for lookup.
If you get an OpcodeTooShort exception, you need to keep on grabbing bytes
until you get a good opcode.
"""
def __getitem__(self,key):
retVal = dict.__getitem__(self,key)
if retVal == None:
raise OpcodeTooShort()
return retVal
def __setitem__(self,key,value):
if self.has_key(key):
dict.__getitem__(self,key).append(value)
else:
dict.__setitem__(self,key,[value])
# Sentinel for multi-byte opcodes
if len(key) > 1:
for i in range(1,len(key)):
tmpKey = key[:i]
dict.__setitem__(self,tmpKey,None)
def GetOp(self,opcode,modRM=None):
lst = self.__getitem__(opcode)
if modRM is not None:
mrm = ModRM(modRM)
digit = "/%s" % mrm.RegOp
lst = [item for item in lst if digit in item.OpcodeFlags]
if DefaultOperandSize == 16:
lst = [item for item in lst if item.InstructionString.find('r32') == -1]
lst = [item for item in lst if item.InstructionString.find('r/m32') == -1]
lst = [item for item in lst if item.InstructionString.find('imm32') == -1]
lst = [item for item in lst if item.InstructionString.find('rel32') == -1]
lst = [item for item in lst if item.InstructionString.find('m32') == -1]
lst = [item for item in lst if 'rd' not in item.OpcodeFlags]
elif DefaultOperandSize == 32:
lst = [item for item in lst if item.InstructionString.find('r16') == -1]
lst = [item for item in lst if item.InstructionString.find('r/m16') == -1]
lst = [item for item in lst if item.InstructionString.find('imm16') == -1]
lst = [item for item in lst if item.InstructionString.find('rel16') == -1]
lst = [item for item in lst if item.InstructionString.find('m16') == -1]
lst = [item for item in lst if 'rw' not in item.OpcodeFlags]
else:
raise RuntimeError("Invalid DefaultOperandSize")
if len(lst) == 0:
raise RuntimeError("Invalid/Unimplemented Opcode [%s]" % opcode)
elif len(lst) > 1:
# try to figure out what we need
op = lst[0]
for flag in op.OpcodeFlags:
if flag in ('/0','/1','/2','/3','/4','/5','/6','/7'):
raise OpcodeNeedsModRM("Opcode %s" % op.Opcode)
for x in lst:
logging.error(x.Description)
raise RuntimeError("Shouldn't get here")
else:
return lst[0]
class MnemonicDict(dict):
def __setitem__(self,key,val):
if self.has_key(key):
raise RuntimeError("Duplicate mnemonic def %s" % `key`)
dict.__setitem__(self,key,val)
opcodeDict = OpcodeDict()
mnemonicDict = MnemonicDict()
def longToBytes(long, bytes=4):
retVal = [ord(x) for x in encode_long(long)]
while len(retVal) < bytes:
if abs(long) == long:
retVal.append(0)
else:
retVal.append(0xFF)
if len(retVal) > bytes:
good,bad = retVal[:bytes],retVal[bytes:]
for x in bad:
if x:
raise x86instError("Failed converting long '%s' to bytes '%s'" % \
(long, retVal))
retVal = good
return tuple(retVal)
def longToString(long, bytes=4):
return ''.join([chr(c) for c in longToBytes(long,bytes)])
def longToBytesRepr(long,bytes=4):
retVal = ""
for x in longToBytes(long,bytes):
retVal += "%02X " % x
return retVal
class ModRM:
def __init__(self,byte=None):
self.Mode = 0x0
self.RegOp = 0x0
self.RM = 0x0
if byte:
self.LoadFromByte(byte)
def LoadFromByte(self,byte):
self.RegOp = byte & 0x38
self.RegOp = self.RegOp >> 3
self.RM = byte & 0x7
self.Mode = byte & 192
self.Mode = self.Mode >> 6
def SaveToByte(self):
return (self.Mode << 6) + (self.RegOp << 3) + self.RM
def HasSIB(self):
if self.Mode in (0,1,2) and self.RM == 4:
return True
else:
return False
def RegOpString(self,typ):
return regOpcode[typ][self.RegOp]
def RMString(self,typ=None):
retVal = ""
if self.Mode == 0:
retVal = mode1[self.RM]
elif self.Mode == 1:
retVal = mode2[self.RM]
elif self.Mode == 2:
retVal = mode3[self.RM]
elif self.Mode == 3:
if typ == 'r/m8':
retVal = regOpcode['r8'][self.RM]
elif typ == 'r/m16':
retVal = regOpcode['r16'][self.RM]
elif typ == 'r/m32':
retVal = regOpcode['r32'][self.RM]
else:
raise RuntimeError("Invalid r/m type")
else:
raise RuntimeError("Invalid Mode")
return retVal
def GetDisplacementSize(self):
"We only know this at runtime with real values"
if self.Mode == 0 and self.RM == 5:
return 4
elif self.Mode == 1:
return 1
elif self.Mode == 2:
return 4
else:
return 0
class instruction:
def __init__(self,opstr,inststr,desc):
self.OpcodeString = opstr
self.InstructionString = inststr
self.Description = desc
self.Opcode = []
self.OpcodeSize = 0
self.OpcodeFlags = []
self.InstructionDef = tokenizeInstDef(self.InstructionString)
self.HasImmediate = False
self.ImmediateSize = 0 # no of bytes
self.HasModRM = False
self.ModRM = None
self.HasPrefixes = False
self.Prefixes = None
self.HasDisplacement = False
self.DisplacementSize = 0
self.setOpcodeAndFlags()
self.setHasFlags()
if '+rb' in self.OpcodeFlags:
self.loadRBWD('+rb','r8')
elif '+rw' in self.OpcodeFlags:
self.loadRBWD('+rw','r16')
elif '+rd' in self.OpcodeFlags:
self.loadRBWD('+rd','r32')
else:
opcodeDict[self.Opcode] = self
mnemonicDict[self.InstructionDef] = self
def loadRBWD(self,plus,reg):
for i in range(8):
OS = self.OpcodeString
IS = self.InstructionString
ID = self.Description
OS = OS.replace(plus,plus[1:])
OS = OS.replace("%X" % self.Opcode[0], '%X' % (self.Opcode[0] + i))
IS = IS.replace(reg, regOpcode[reg][i])
instruction(OS,IS,ID)
def setOpcodeAndFlags(self):
parts = self.OpcodeString.split()
for part in parts:
if len(part) == 2 and part[0] in "ABCDEF0123456789" \
and part[1] in "ABCDEF0123456789":
# suppose I could use a regex above
self.Opcode.append(eval("0x%s" % part))
else:
self.OpcodeFlags.append(part)
self.Opcode = tuple(self.Opcode)
self.OpcodeSize = len(self.Opcode)
def setHasFlags(self):
for i in instModRM:
if i in self.InstructionString:
if "+rb" in self.OpcodeFlags: break
if "+rw" in self.OpcodeFlags: break
if "+rd" in self.OpcodeFlags: break
self.HasModRM = True
break
for i in immediate:
if i in self.InstructionString:
#hack fix, how do we do this right?
#if i.startswith('m') and 'imm' in self.InstructionString:
# continue
self.HasImmediate = True
if i.endswith('8') or i == 'mo':
self.ImmediateSize = 1
elif i.endswith('16'):
self.ImmediateSize = 2
elif i.endswith('32'):
self.ImmediateSize = 4
else:
raise RuntimeError("Invalid Immediate Value")
break
for i in displacement:
if i in self.InstructionString:
self.HasDisplacement = True
if i.endswith('8'):
self.DisplacementSize = 1
elif i.endswith('16'):
self.DisplacementSize = 2
elif i.endswith('32'):
self.DisplacementSize = 4
else:
raise RuntimeError("Invalid Displacement Value")
break
#%TODO: figure out logic for SIB, and prefixes
def GetInstance(self):
return instructionInstance(self)
def __str__(self):
retVal = ""
retVal += self.OpcodeString + "\n"
retVal += self.InstructionString + "\n"
retVal += self.Description + "\n"
retVal += "OP: %s, OP flag: %s\n" % (self.Opcode,self.OpcodeFlags)
return retVal
i = instruction
RELATIVE,DIRECT = range(1,3)
class instructionInstance:
"""
An instructionInstance is an instruction + the data for an instance's
prefixes and suffixes
"""
def __init__(self,inst):
self.Address = 0x0
self.Instruction = inst
self.Prefixes = []
self.ModRM = None
self.SIB = None
self.Displacement = None
self.DisplacementSymbol = None
self.Immediate = None
self.ImmediateSymbol = None
def GetSymbolPatchins(self,modrm=None):
"locations of symbols if they exist"
retVal = []
size = len(self.Instruction.Opcode)
if self.Instruction.HasModRM:
size += 1
if self.ModRM:
mrm = self.ModRM
elif modrm == None:
raise OpcodeNeedsModRM()
else:
mrm = ModRM(modrm)
if mrm.HasSIB():
size += 1
if mrm.GetDisplacementSize():
if self.DisplacementSymbol:
retVal.append( (self.DisplacementSymbol,self.Address + size, RELATIVE) )
size += mrm.GetDisplacementSize()
if self.Instruction.HasDisplacement:
if self.DisplacementSymbol:
retVal.append( (self.DisplacementSymbol,self.Address + size, RELATIVE) )
size += self.Instruction.DisplacementSize
if self.Instruction.HasImmediate:
if self.ImmediateSymbol:
retVal.append( (self.ImmediateSymbol, self.Address + size, DIRECT))
size += self.Instruction.ImmediateSize
return retVal
def GetSuffixSize(self,modrm=None):
"Size for everything after Opcode"
size = 0
if self.Instruction.HasModRM:
size += 1
if self.ModRM:
mrm = self.ModRM
elif modrm == None:
raise OpcodeNeedsModRM()
else:
mrm = ModRM(modrm)
if mrm.HasSIB():
size += 1
if mrm.GetDisplacementSize():
size += mrm.GetDisplacementSize()
if self.Instruction.HasDisplacement:
size += self.Instruction.DisplacementSize
if self.Instruction.HasImmediate:
size += self.Instruction.ImmediateSize
return size
def GetInstructionSize(self):
return len(self.Instruction.Opcode) + self.GetSuffixSize()
def NextInstructionLoc(self):
return self.GetInstructionSize() + self.Address
def LoadData(self, data):
first,rest = '',data
if self.Instruction.HasModRM:
first,rest = rest[0],rest[1:]
self.ModRM = ModRM(struct.unpack("<b",first)[0])
if self.ModRM.HasSIB():
first,rest = rest[0],rest[1:]
self.SIB = struct.unpack("<b",first)[0]
if self.Instruction.HasDisplacement:
if self.Instruction.DisplacementSize == 1:
first,rest = rest[0],rest[1:]
self.Displacement = struct.unpack("<b",first)[0]
elif self.Instruction.DisplacementSize == 2:
first,rest = rest[:2],rest[2:]
self.Displacement = struct.unpack("<s",first)[0]
elif self.Instruction.DisplacementSize == 4:
first,rest = rest[:4],rest[4:]
self.Displacement = struct.unpack('<l',first)[0]
else:
raise RuntimeError("Invalid Displacement size")
if self.Instruction.HasModRM:
dispSize = self.ModRM.GetDisplacementSize()
if dispSize == 0:
pass
elif dispSize == 1:
first,rest = rest[0],rest[1:]
self.Displacement = struct.unpack("<b",first)[0]
elif dispSize == 4:
first,rest = rest[:4],rest[4:]
self.Displacement | |
of code written under
DendroPy 3.x. It will be removed in future versions. All new code should
be written using |TaxonNamespace|. Old code needs to be updated to use
|TaxonNamespace|.
"""
def __init__(self, *args, **kwargs):
deprecate.dendropy_deprecation_warning(
message="Deprecated since DendroPy 4: 'TaxonSet' will no longer be supported in future releases; use 'TaxonNamespace' instead",
stacklevel=3)
TaxonNamespace.__init__(self, *args, **kwargs)
##############################################################################
## Taxon
class Taxon(
basemodel.DataObject,
basemodel.Annotable):
"""
A taxon associated with a sequence or a node on a tree.
"""
def __init__(self, label=None):
"""
Parameters
----------
label : string or |Taxon| object
Label or name of this operational taxonomic unit concept. If a
string, then the ``label`` attribute of ``self`` is set to this value.
If a |Taxon| object, then the ``label`` attribute of ``self`` is
set to the same value as the ``label`` attribute the other
|Taxon| object and all annotations/metadata are copied.
"""
if isinstance(label, Taxon):
other_taxon = label
label = other_taxon.label
memo={id(other_taxon):self}
for k in other_taxon.__dict__:
if k != "_annotations":
self.__dict__[k] = copy.deepcopy(other_taxon.__dict__[k], memo=memo)
self.deep_copy_annotations_from(other_taxon, memo=memo)
# self.copy_annotations_from(other_taxon, attribute_object_mapper=memo)
else:
basemodel.DataObject.__init__(self, label=label)
self._lower_cased_label = None
self.comments = []
def _get_label(self):
return self._label
def _set_label(self, v):
self._label = v
self._lower_cased_label = None
label = property(_get_label, _set_label)
def _get_lower_cased_label(self):
if self._label is None:
return None
if self._lower_cased_label is None:
self._lower_cased_label = str(self._label).lower()
return self._lower_cased_label
lower_cased_label = property(_get_lower_cased_label)
def __copy__(self):
raise TypeError("Cannot shallow-copy Taxon")
# return self
def taxon_namespace_scoped_copy(self, memo=None):
if memo is not None:
memo[id(self)] = self
return self
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
try:
o = memo[id(self)]
except KeyError:
# o = type(self).__new__(self.__class__)
o = self.__class__.__new__(self.__class__)
memo[id(self)] = o
for k in self.__dict__:
if k != "_annotations":
o.__dict__[k] = copy.deepcopy(self.__dict__[k], memo)
o.deep_copy_annotations_from(self, memo)
# o.copy_annotations_from(self, attribute_object_mapper=memo)
return o
def __hash__(self):
return id(self)
def __eq__(self, other):
return self is other
def __lt__(self, other):
return self.label < other.label
def __str__(self):
"String representation of self = taxon name."
return "'{}'".format(self._label)
def __repr__(self):
return "<{} {} '{}'>".format(self.__class__.__name__, hex(id(self)), self._label)
def description(self, depth=1, indent=0, itemize="", output=None, **kwargs):
"""
Returns description of object, up to level ``depth``.
"""
if depth is None or depth < 0:
return ""
output_strio = StringIO()
if self._label is None:
label = "<Unnamed Taxon>"
else:
label = "'{}'".format(self._label)
output_strio.write('{}{} Taxon object at {}: {}'.format(indent*' ', itemize, hex(id(self)), label))
s = output_strio.getvalue()
if output is not None:
output.write(s)
return s
##############################################################################
## TaxonNamespacePartition
class TaxonNamespacePartition(TaxonNamespaceAssociated):
"""
Manages a partition of a TaxonNamespace (i.e., a set of mutually-exclusive
and exhaustive subsets of a TaxonNamespace).
"""
def __init__(self, taxon_namespace, **kwargs):
"""
__init__ uses one of the following keyword arguments:
- ``membership_fn``
A function that takes a |Taxon| object as an argument and
returns a a population membership identifier or flag
(e.g., a string, an integer) .
- ``membership_attr_name``
Name of an attribute of |Taxon| objects that serves as an
identifier for subset membership.
- ``membership_dict``
A dictionary with |Taxon| objects as keys and population
membership identifier or flag as values (e.g., a string,
an integer).
- ``membership_lists``
A container of containers of |Taxon| objects, with every
|Taxon| object in ``taxon_namespace`` represented once and only
once in the sub-containers.
If none of these are specified, defaults to a partition consisting of
a single subset with all the objects in ``taxon_namespace``.
"""
TaxonNamespaceAssociated.__init__(self,
taxon_namespace=taxon_namespace)
self.subset_map = {}
if taxon_namespace is not None:
if len(kwargs) > 0:
self.apply(**kwargs)
else:
ss = TaxonNamespace(self.taxon_namespace)
self.subset_map = { self.taxon_namespace.label : ss}
def subsets(self):
"""
Return subsets of partition.
"""
return set(self.subset_map.values())
def __len__(self):
"""
Number of subsets.
"""
return len(self.subset_map)
def __iter__(self):
"""
Iterate over subsets.
"""
for k, v in self.subset_map.items():
yield v
def __getitem__(self, label):
"""
Get subset with specified label.
"""
return self.subset_map[label]
def apply(self, **kwargs):
"""
Builds the subsets of the linked TaxonNamespace resulting from the
partitioning scheme specified by one of the following keyword arguments:
``membership_fn``
A function that takes a |Taxon| object as an argument and
returns a a population membership identifier or flag
(e.g., a string, an integer).
``membership_attr_name``
Name of an attribute of |Taxon| objects that serves as an
identifier for subset membership.
``membership_dict``
A dictionary with |Taxon| objects as keys and population
membership identifier or flag as values (e.g., a string,
an integer).
``membership_lists``
A container of containers of |Taxon| objects, with every
|Taxon| object in ``taxon_namespace`` represented once and only
once in the sub-containers.
"""
if "membership_fn" in kwargs:
self.apply_membership_fn(kwargs["membership_fn"])
elif "membership_attr_name" in kwargs:
self.apply_membership_attr_name(kwargs["membership_attr_name"])
elif "membership_dict" in kwargs:
self.apply_membership_dict(kwargs["membership_dict"])
elif "membership_lists" in kwargs:
self.apply_membership_lists(kwargs["membership_lists"])
else:
raise TypeError("Must specify partitioning scheme using one of: " \
+ "'membership_fn', 'membership_dict', or 'membership_lists'")
def apply_membership_fn(self, mfunc):
"""
Constructs subsets based on function ``mfunc``, which should take a
|Taxon| object as an argument and return a population membership
identifier or flag (e.g., a string, an integer).
"""
self.subset_map = {}
for t in self.taxon_namespace:
subset_id = mfunc(t)
if subset_id not in self.subset_map:
self.subset_map[subset_id] = TaxonNamespace(label=subset_id)
self.subset_map[subset_id].add_taxon(t)
return self.subsets()
def apply_membership_attr_name(self, attr_name):
"""
Constructs subsets based on attribute ``attr_name`` of each
|Taxon| object.
"""
return self.apply_membership_fn(lambda x: getattr(x, attr_name))
def apply_membership_dict(self, mdict):
"""
Constructs subsets based on dictionary ``mdict``, which should be
dictionary with |Taxon| objects as keys and population membership
identifier or flag as values (e.g., a string, an integer).
"""
return self.apply_membership_fn(lambda x: mdict[x])
def apply_membership_lists(self, mlists, subset_labels=None):
"""
Constructs subsets based on list ``mlists``, which should be an interable
of iterables of |Taxon| objects, with every |Taxon| object in
``taxon_namespace`` represented once and only once in the sub-containers.
"""
if subset_labels is not None:
if len(subset_labels) != len(mlists):
raise ValueError('Length of subset label list must equal to number of subsets')
else:
subset_labels = range(len(mlists))
self.subset_map = {}
for lidx, mlist in enumerate(mlists):
subset_id = subset_labels[lidx]
self.subset_map[subset_id] = TaxonNamespace(label=subset_id)
for i, t in enumerate(mlist):
self.subset_map[subset_id].add_taxon(t)
return self.subsets()
##############################################################################
## TaxonNamespaceMapping
class TaxonNamespaceMapping(
basemodel.DataObject,
basemodel.Annotable):
"""
A many-to-one mapping of |Taxon| objects (e.g., gene taxa to population/species taxa).
"""
@staticmethod
def create_contained_taxon_mapping(containing_taxon_namespace,
num_contained,
contained_taxon_label_prefix=None,
contained_taxon_label_separator=' ',
contained_taxon_label_fn=None):
"""
Creates and returns a TaxonNamespaceMapping object that maps multiple
"contained" Taxon objects (e.g., genes) to Taxon objects in
``containing_taxon_namespace`` (e.g., populations or species).
``containing_taxon_namespace``
A TaxonNamespace object that defines a Taxon for each population or
species.
``num_contained``
The number of genes per population of species. The value of
this attribute can be a scalar integer, in which case each
species or population taxon will get the same fixed number
of genes. Or it can be a list, in which case the list has
to have as many elements as there are members in
``containing_taxon_namespace``, and each element will specify the
number of genes that the corresponding species or population
Taxon will get.
``contained_taxon_label_prefix``
If specified, then each gene Taxon label will begin with this.
Otherwise, each gene Taxon label will begin with the same label
as its corresponding species/population taxon label.
``contained_taxon_label_separator``
String used to separate gene Taxon label prefix from its index.
``contained_taxon_label_fn``
If specified, should be a function that takes two arguments: a
Taxon object from ``containing_taxon_namespace`` and an integer
specifying the contained gene index. It should return a string
which will be used as the label for the corresponding gene
taxon. If not None, this will bypass the
``contained_taxon_label_prefix`` and
``contained_taxon_label_separator`` arguments.
"""
if isinstance(num_contained, int):
_num_contained = [num_contained] * len(containing_taxon_namespace)
else:
_num_contained = num_contained
contained_to_containing = {}
contained_taxa = TaxonNamespace()
for cidx, containing_taxon in enumerate(containing_taxon_namespace):
num_new = _num_contained[cidx]
for new_idx in range(num_new):
if contained_taxon_label_fn is not None:
label = contained_taxon_label_fn(containing_taxon,
new_idx)
else:
label = "%s%s%d" % (containing_taxon.label,
contained_taxon_label_separator,
new_idx+1)
contained_taxon = Taxon(label=label)
contained_to_containing[contained_taxon] = containing_taxon
contained_taxa.append(contained_taxon)
contained_to_containing_map = TaxonNamespaceMapping(domain_taxon_namespace=contained_taxa,
range_taxon_namespace=containing_taxon_namespace,
mapping_dict=contained_to_containing)
return contained_to_containing_map
def __init__(self, **kwargs):
"""
__init__ uses one of the following keyword arguments:
- ``mapping_fn``
A function that takes a |Taxon| object from the domain taxa
as an argument and returns the corresponding |Taxon| object
from the range taxa. If this argument is given, then a
|TaxonNamespace| or some other container of |Taxon| objects | |
return jsonify_request(HttpResponseBadRequest("Error"))
try:
position = int(position)
except (TypeError, ValueError):
position = 0
try:
site = Site.objects.get(id=int(site_id))
except (TypeError, ValueError, MultipleObjectsReturned,
ObjectDoesNotExist):
site = get_current_site(request)
if target is None:
# Special case: If «target» is not provided, it means to create the
# new page as a root node.
try:
tb_target = Page.get_draft_root_node(position=position, site=site)
tb_position = "left"
except IndexError:
# New page to become the last root node.
tb_target = Page.get_draft_root_node(site=site)
tb_position = "right"
else:
try:
tb_target = self.model.objects.get(pk=int(target), site=site)
assert tb_target.has_add_permission(request.user)
except (TypeError, ValueError, self.model.DoesNotExist,
AssertionError):
return jsonify_request(HttpResponseBadRequest("Error"))
if position == 0:
# This is really the only possible value for position.
tb_position = "first-child"
else:
# But, just in case...
try:
tb_target = tb_target.get_children().filter(
publisher_is_draft=True, site=site)[position]
tb_position = "left"
except IndexError:
tb_position = "last-child"
try:
new_page = page.copy_page(tb_target, site, tb_position,
copy_permissions=copy_permissions)
results = {"id": new_page.pk}
return HttpResponse(
json.dumps(results), content_type='application/json')
except ValidationError:
exc = sys.exc_info()[1]
return jsonify_request(HttpResponseBadRequest(exc.messages))
@require_POST
@transaction.atomic
def revert_to_live(self, request, page_id, language):
"""
Resets the draft version of the page to match the live one
"""
page = get_object_or_404(
self.model,
pk=page_id,
publisher_is_draft=True,
title_set__language=language,
)
# ensure user has permissions to publish this page
if not self.has_revert_to_live_permission(request, language, obj=page):
return HttpResponseForbidden(force_text(_("You do not have permission to revert this page.")))
translation = page.get_title_obj(language=language)
operation_token = self._send_pre_page_operation(
request,
operation=operations.REVERT_PAGE_TRANSLATION_TO_LIVE,
obj=page,
translation=translation,
)
page.revert_to_live(language)
# Fetch updated translation
translation.refresh_from_db()
self._send_post_page_operation(
request,
operation=operations.REVERT_PAGE_TRANSLATION_TO_LIVE,
token=operation_token,
obj=page,
translation=translation,
)
messages.info(request, _('"%s" was reverted to the live version.') % page)
path = page.get_absolute_url(language=language)
path = '%s?%s' % (path, get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF'))
return HttpResponseRedirect(path)
@require_POST
@transaction.atomic
def publish_page(self, request, page_id, language):
all_published = True
try:
page = Page.objects.get(
pk=page_id,
publisher_is_draft=True,
title_set__language=language,
)
except Page.DoesNotExist:
page = None
statics = request.GET.get('statics', '')
if not statics and not page:
raise Http404("No page or static placeholder found for publishing.")
# ensure user has permissions to publish this page
if page and not self.has_publish_permission(request, obj=page):
return HttpResponseForbidden(force_text(_("You do not have permission to publish this page")))
if page:
operation_token = self._send_pre_page_operation(
request,
operation=operations.PUBLISH_PAGE_TRANSLATION,
obj=page,
translation=page.get_title_obj(language=language),
)
all_published = page.publish(language)
page = page.reload()
self._send_post_page_operation(
request,
operation=operations.PUBLISH_PAGE_TRANSLATION,
token=operation_token,
obj=page,
translation=page.get_title_obj(language=language),
successful=all_published,
)
if statics:
static_ids = statics.split(',')
static_placeholders = StaticPlaceholder.objects.filter(pk__in=static_ids)
for static_placeholder in static_placeholders.iterator():
# TODO: Maybe only send one signal...
# this would break the obj signal format though
operation_token = self._send_pre_page_operation(
request,
operation=operations.PUBLISH_STATIC_PLACEHOLDER,
obj=static_placeholder,
target_language=language,
)
published = static_placeholder.publish(request, language)
self._send_post_page_operation(
request,
operation=operations.PUBLISH_STATIC_PLACEHOLDER,
token=operation_token,
obj=static_placeholder,
target_language=language,
)
if not published:
all_published = False
if page:
if all_published:
if page.get_publisher_state(language) == PUBLISHER_STATE_PENDING:
messages.warning(request, _("Page not published! A parent page is not published yet."))
else:
messages.info(request, _('The content was successfully published.'))
LogEntry.objects.log_action(
user_id=request.user.id,
content_type_id=ContentType.objects.get_for_model(Page).pk,
object_id=page_id,
object_repr=page.get_title(language),
action_flag=CHANGE,
)
else:
if page.get_publisher_state(language) == PUBLISHER_STATE_PENDING:
messages.warning(request, _("Page not published! A parent page is not published yet."))
else:
messages.warning(request, _("There was a problem publishing your content"))
if 'node' in request.GET or 'node' in request.POST:
# if request comes from tree..
# 204 -> request was successful but no response returned.
return HttpResponse(status=204)
if 'redirect' in request.GET:
return HttpResponseRedirect(request.GET['redirect'])
referrer = request.META.get('HTTP_REFERER', '')
path = admin_reverse("cms_page_changelist")
if request.GET.get('redirect_language'):
path = "%s?language=%s&page_id=%s" % (path, request.GET.get('redirect_language'), request.GET.get('redirect_page_id'))
if admin_reverse('index') not in referrer:
if all_published:
if page:
if page.get_publisher_state(language) == PUBLISHER_STATE_PENDING:
path = page.get_absolute_url(language, fallback=True)
else:
public_page = Page.objects.get(publisher_public=page.pk)
path = '%s?%s' % (public_page.get_absolute_url(language, fallback=True), get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF'))
else:
path = '%s?%s' % (referrer, get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF'))
else:
path = '/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF')
return HttpResponseRedirect(path)
@require_POST
@transaction.atomic
def unpublish(self, request, page_id, language):
"""
Publish or unpublish a language of a page
"""
site = Site.objects.get_current()
page = get_object_or_404(self.model, pk=page_id)
if not self.has_publish_permission(request, obj=page):
return HttpResponseForbidden(force_text(_("You do not have permission to unpublish this page")))
if not page.publisher_public_id:
return HttpResponseForbidden(force_text(_("This page was never published")))
try:
page.unpublish(language)
message = _('The %(language)s page "%(page)s" was successfully unpublished') % {
'language': get_language_object(language, site)['name'], 'page': page}
messages.info(request, message)
LogEntry.objects.log_action(
user_id=request.user.id,
content_type_id=ContentType.objects.get_for_model(Page).pk,
object_id=page_id,
object_repr=page.get_title(),
action_flag=CHANGE,
change_message=message,
)
except RuntimeError:
exc = sys.exc_info()[1]
messages.error(request, exc.message)
except ValidationError:
exc = sys.exc_info()[1]
messages.error(request, exc.message)
path = admin_reverse("cms_page_changelist")
if request.GET.get('redirect_language'):
path = "%s?language=%s&page_id=%s" % (path, request.GET.get('redirect_language'), request.GET.get('redirect_page_id'))
return HttpResponseRedirect(path)
def delete_translation(self, request, object_id, extra_context=None):
if 'language' in request.GET:
language = request.GET['language']
else:
language = get_language_from_request(request)
opts = Page._meta
titleopts = Title._meta
app_label = titleopts.app_label
pluginopts = CMSPlugin._meta
try:
obj = self.get_queryset(request).get(pk=unquote(object_id))
except self.model.DoesNotExist:
# Don't raise Http404 just yet, because we haven't checked
# permissions yet. We don't want an unauthenticated user to be able
# to determine whether a given object exists.
obj = None
if not self.has_delete_translation_permission(request, language, obj):
return HttpResponseForbidden(force_text(_("You do not have permission to change this page")))
if obj is None:
raise Http404(
_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(opts.verbose_name),
'key': escape(object_id)
})
if not len(list(obj.get_languages())) > 1:
raise Http404(_('There only exists one translation for this page'))
titleobj = get_object_or_404(Title, page__id=object_id, language=language)
saved_plugins = CMSPlugin.objects.filter(placeholder__page__id=object_id, language=language)
using = router.db_for_read(self.model)
kwargs = {
'admin_site': self.admin_site,
'user': request.user,
'using': using
}
deleted_objects, __, perms_needed = get_deleted_objects(
[titleobj],
titleopts,
**kwargs
)[:3]
to_delete_plugins, __, perms_needed_plugins = get_deleted_objects(
saved_plugins,
pluginopts,
**kwargs
)[:3]
deleted_objects.append(to_delete_plugins)
perms_needed = set(list(perms_needed) + list(perms_needed_plugins))
if request.method == 'POST':
if perms_needed:
raise PermissionDenied
operation_token = self._send_pre_page_operation(
request,
operation=operations.DELETE_PAGE_TRANSLATION,
obj=obj,
translation=titleobj,
)
message = _('Title and plugins with language %(language)s was deleted') % {
'language': force_text(get_language_object(language)['name'])
}
self.log_change(request, titleobj, message)
messages.success(request, message)
titleobj.delete()
for p in saved_plugins:
p.delete()
public = obj.publisher_public
if public:
public.save()
self._send_post_page_operation(
request,
operation=operations.DELETE_PAGE_TRANSLATION,
token=operation_token,
obj=obj,
translation=titleobj,
)
if not self.has_change_permission(request, None):
return HttpResponseRedirect(admin_reverse('index'))
return HttpResponseRedirect(admin_reverse('cms_page_changelist'))
context = {
"title": _("Are you sure?"),
"object_name": force_text(titleopts.verbose_name),
"object": titleobj,
"deleted_objects": deleted_objects,
"perms_lacking": perms_needed,
"opts": opts,
"root_path": admin_reverse('index'),
"app_label": app_label,
}
context.update(extra_context or {})
request.current_app = self.admin_site.name
return render(request, self.delete_confirmation_template or [
"admin/%s/%s/delete_confirmation.html" % (app_label, titleopts.object_name.lower()),
"admin/%s/delete_confirmation.html" % app_label,
"admin/delete_confirmation.html"
], context)
def preview_page(self, request, object_id, language):
"""
Redirecting preview function based on draft_id
"""
page = get_object_or_404(self.model, id=object_id)
can_see_page = page_permissions.user_can_view_page(request.user, page)
if can_see_page and not self.has_change_permission(request, obj=page):
can_see_page = page.is_published(language)
if not can_see_page:
message = ugettext('You don\'t have permissions to see page "%(title)s"')
message = message % {'title': force_text(page)}
self.message_user(request, message, level=messages.ERROR)
return HttpResponseRedirect('/en/admin/cms/page/')
attrs = "?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')
attrs += "&language=" + language
with force_language(language):
url = page.get_absolute_url(language) + attrs
site = get_current_site(request)
if not site == page.site:
url = "http%s://%s%s" % ('s' if request.is_secure() else '',
page.site.domain, url)
return HttpResponseRedirect(url)
@require_POST
def change_innavigation(self, request, page_id):
"""
Switch the in_navigation of a page
"""
page = get_object_or_404(self.model, pk=page_id)
if self.has_change_permission(request, obj=page):
page.toggle_in_navigation()
# 204 -> request was successful but no response returned.
return HttpResponse(status=204)
return HttpResponseForbidden(force_text(_("You do not have permission to change this page's in_navigation status")))
def get_tree(self, request):
"""
Get html for the descendants (only) of given page or if no page_id is
provided, all the root nodes.
Used for lazy loading pages in cms.pagetree.js
Permission checks is done in admin_utils.get_admin_menu_item_context
which is called by admin_utils.render_admin_menu_item.
"""
page_id = request.GET.get('pageId', None)
site_id = request.GET.get('site', None)
try:
site_id = int(site_id)
site = Site.objects.get(id=site_id)
except (TypeError, ValueError, MultipleObjectsReturned,
ObjectDoesNotExist):
site = get_current_site(request)
if page_id:
page = get_object_or_404(self.model, pk=int(page_id))
pages = page.get_children()
else:
pages = Page.get_root_nodes().filter(site=site, publisher_is_draft=True)
pages = (
pages
.select_related('parent', 'publisher_public', 'site')
.prefetch_related('children')
)
response = render_admin_rows(request, pages, site=site, filtered=False)
return HttpResponse(response)
def add_page_type(self, request):
site = Site.objects.get_current()
language = request.GET.get('language') or get_language()
target = request.GET.get('copy_target')
type_root, created = self.model.objects.get_or_create(reverse_id=PAGE_TYPES_ID, publisher_is_draft=True, site=site,
defaults={'in_navigation': False})
type_title, created = Title.objects.get_or_create(page=type_root, language=language, slug=PAGE_TYPES_ID,
defaults={'title': _('Page Types')})
url = add_url_parameters(admin_reverse('cms_page_add'), target=type_root.pk, position='first-child',
add_page_type=1, copy_target=target, language=language)
return HttpResponseRedirect(url)
def resolve(self, request):
if not request.user.is_staff:
return HttpResponse('/', content_type='text/plain')
obj = False
url = False
if request.session.get('cms_log_latest', False):
log = LogEntry.objects.get(pk=request.session['cms_log_latest'])
try:
obj = log.get_edited_object()
except (ObjectDoesNotExist, ValueError):
obj = None
del request.session['cms_log_latest']
if obj and obj.__class__ in toolbar_pool.get_watch_models() and hasattr(obj, 'get_absolute_url'):
# This is a test if the object url can be retrieved
# In case it can't, object it's not taken into account
try:
force_text(obj.get_absolute_url())
except:
obj = None
else:
obj = None
if not obj:
pk = request.GET.get('pk', False) or request.POST.get('pk', False)
full_model = request.GET.get('model') or request.POST.get('model', False)
if pk and full_model:
| |
filter_types=False, needs_verb=False):
deps = self.find_deps(index, dir="parents", filter_types=filter_types)
if needs_verb:
deps = [d for d in deps if self.gov_is_verb(d) or self.dep_is_verb(d)]
return [d["governor"] for d in deps]
def find_children(self, index, filter_types=False, exclude_types=False, needs_verb=False):
deps = self.find_deps(
index,
dir="children",
filter_types=filter_types,
exclude_types=exclude_types
)
if needs_verb:
deps = [d for d in deps if self.dep_is_verb(d)]
# print(deps)
return [d["dependent"] for d in deps]
def is_verb(self, index):
pos = self.token(index)["pos"]
if pos[0] == "V":
return True
else:
cop_relations = self.find_deps(index, dir="children", filter_types="cop")
has_cop_relation = len(cop_relations)>0
if has_cop_relation:
return True
else:
return False
def gov_is_verb(self, d):
index = d["governor"]
return self.is_verb(index)
def dep_is_verb(self, d):
index = d["dependent"]
return self.is_verb(index)
def find_deps(self, index, dir=None, filter_types=False, exclude_types=False):
deps = []
if dir=="parents" or dir==None:
deps += [d for d in self.dependencies if d['dependent']==index]
if dir=="children" or dir==None:
deps += [d for d in self.dependencies if d['governor']==index]
if filter_types:
deps = [d for d in deps if d["dep"] in filter_types]
if exclude_types:
deps = [d for d in deps if not d["dep"] in exclude_types]
return deps
def find_dep_types(self, index, dir=None, filter_types=False):
deps = self.find_deps(index, dir=dir, filter_types=filter_types)
return [d["dep"] for d in deps]
def __str__(self):
return " ".join([t["word"] for t in self.tokens])
def get_subordinate_indices(self, acc, explore, depth=0, exclude_indices=[], exclude_types=[]):
# print("acc: {}\nexplore: {}\ndepth: {}\nexclude_indices: {}".format(acc, explore, depth, exclude_indices))
exclude_indices.sort()
acc.sort()
explore.sort()
# print("exclude: " + " ".join([self.tokens[t_ind-1]["word"] for t_ind in exclude_indices]))
# print("acc: " + " ".join([self.tokens[t_ind-1]["word"] for t_ind in acc]))
# print("explore: " + " ".join([self.tokens[t_ind-1]["word"] for t_ind in explore]))
# print("*****")
children = [c for i in explore for c in self.find_children(i, exclude_types=exclude_types) if not c in exclude_indices]
if len(children)==0:
return acc
else:
return self.get_subordinate_indices(
acc=acc + children,
explore=children,
depth=depth+1,
exclude_indices=exclude_indices,
exclude_types=exclude_types
)
def get_phrase_from_head(self, head_index, exclude_indices=[], exclude_types=[]):
# given an index,
# grab every index that's a child of it in the dependency graph
subordinate_indices = self.get_subordinate_indices(
acc=[head_index],
explore=[head_index],
exclude_indices=exclude_indices,
exclude_types=exclude_types
)
if not subordinate_indices:
return None
subordinate_indices.sort()
# make string of subordinate phrase from parse
parse_subordinate_string = " ".join([self.word(i) for i in subordinate_indices])
# correct subordinate phrase from parsed version to wikitext version
# (tokenization systems are different)
orig_words = self.original_sentence.split()
parsed_words = [t["word"] for t in self.tokens]
subordinate_phrase = extract_subphrase(orig_words, parsed_words, subordinate_indices)
# make a string from this to return
return subordinate_phrase
def get_valid_marker_indices(self, marker):
pos = dependency_patterns[marker]["POS"]
return [i for i in self.indices(marker) if pos == self.token(i)["pos"] ]
def get_candidate_S2_indices(self, marker, marker_index, needs_verb=False):
connection_type = dependency_patterns[marker]["S2"]
# Look for S2
return self.find_parents(marker_index, filter_types=[connection_type], needs_verb=needs_verb)
def get_candidate_S1_indices(self, marker, s2_head_index, needs_verb=False):
valid_connection_types = dependency_patterns[marker]["S1"]
return self.find_parents(
s2_head_index,
filter_types=valid_connection_types,
needs_verb=needs_verb
) + self.find_children(
s2_head_index,
filter_types=valid_connection_types,
needs_verb=needs_verb
)
def find_pair(self, marker, order, previous_sentence):
assert(order in ["s2 discourse_marker s1", "any"])
# fix me
# (this won't quite work if there are multiple matching connections)
# (which maybe never happens)
S1 = None
S2 = None
s1_ind = 1000
s2_ind = 0
for marker_index in self.get_valid_marker_indices(marker):
for s2_head_index in self.get_candidate_S2_indices(marker, marker_index, needs_verb=True):
s2_ind = s2_head_index
possible_S1s = []
for s1_head_index in self.get_candidate_S1_indices(marker, s2_head_index, needs_verb=True):
# store S1 if we have one
S1 = self.get_phrase_from_head(
s1_head_index,
exclude_indices=[s2_head_index]
)
# we'll lose some stuff here because of alignment between
# wikitext tokenization and corenlp tokenization.
# if we can't get a phrase, reject this pair
if not S1:
break
# if we are only checking for the "reverse" order, reject anything else
if order=="s2 discourse_marker s1":
if s1_ind < s2_ind:
break
possible_S1s.append((s1_head_index, S1))
# to do: fix this. it is wrong. we're just grabbing the first if there are multiple matches for the S1 pattern rather than choosing in a principled way
if len(possible_S1s) > 0:
s1_ind, S1 = possible_S1s[0]
# store S2 if we have one
S2 = self.get_phrase_from_head(
s2_head_index,
exclude_indices=[marker_index, s1_ind],
# exclude_types=dependency_patterns[marker]["S1"]
)
# we'll lose some stuff here because of alignment between
# wikitext tokenization and corenlp tokenization.
# if we can't get a phrase, reject this pair
# update: we fixed some of these with the @ correction
if not S2:
return None
# if S2 is the whole sentence *and* we're missing S1, let S1 be the previous sentence
if S2 and not S1:
words_in_sentence = len(sentence.tokens)
words_in_s2 = len(S2.split())
if words_in_sentence - 1 == words_in_s2:
S1 = previous_sentence
if S1 and S2:
return S1, S2
else:
return None
parse = get_parse(sentence)
# print(json.dumps(parse, indent=4))
sentence = Sentence(parse, sentence)
return(sentence.find_pair(marker, "any", previous_sentence))
def collect_raw_sentences(source_dir, dataset, caching):
markers_dir = pjoin(source_dir, "markers_" + DISCOURSE_MARKER_SET_TAG)
output_dir = pjoin(markers_dir, "files")
if not os.path.exists(markers_dir):
os.makedirs(markers_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if dataset == "wikitext-103":
filenames = [
"wiki.train.tokens",
"wiki.valid.tokens",
"wiki.test.tokens"
]
else:
raise Exception("not implemented")
sentences = {marker: {"sentence": [], "previous": []} for marker in DISCOURSE_MARKERS}
for filename in filenames:
print("reading {}".format(filename))
file_path = pjoin(source_dir, "orig", filename)
with io.open(file_path, 'rU', encoding="utf-8") as f:
# tokenize sentences
sentences_cache_file = file_path + ".CACHE_SENTS"
if caching and os.path.isfile(sentences_cache_file):
sent_list = pickle.load(open(sentences_cache_file, "rb"))
else:
tokens = f.read().replace("\n", ". ")
print("tokenizing")
sent_list = nltk.sent_tokenize(tokens)
if caching:
pickle.dump(sent_list, open(sentences_cache_file, "wb"))
# check each sentence for discourse markers
previous_sentence = ""
for sentence in sent_list:
words = rephrase(sentence).split() # replace "for example"
for marker in DISCOURSE_MARKERS:
if marker == "for example":
proxy_marker = "for_example"
else:
proxy_marker = marker
if proxy_marker in [w.lower() for w in words]:
sentences[marker]["sentence"].append(sentence)
sentences[marker]["previous"].append(previous_sentence)
previous_sentence = sentence
print('writing files')
statistics_lines = []
for marker in sentences:
sentence_path = pjoin(output_dir, "{}_s.txt".format(marker))
previous_path = pjoin(output_dir, "{}_prev.txt".format(marker))
n_sentences = len(sentences[marker]["sentence"])
statistics_lines.append("{}\t{}".format(marker, n_sentences))
with open(sentence_path, "w") as sentence_file:
for s in sentences[marker]["sentence"]:
sentence_file.write(s + "\n")
with open(previous_path, "w") as previous_file:
for s in sentences[marker]["previous"]:
previous_file.write(s + "\n")
statistics_report = "\n".join(statistics_lines)
open(pjoin(markers_dir, "VERSION.txt"), "w").write(
"commit: \n\ncommand: \n\nmarkers:\n" + statistics_report
)
def split_raw(source_dir, train_size):
assert(train_size < 1 and train_size > 0)
markers_dir = pjoin(source_dir, "markers_" + DISCOURSE_MARKER_SET_TAG)
input_dir = pjoin(markers_dir, "files")
split_dir = pjoin(markers_dir, "split_train{}".format(train_size))
output_dir = pjoin(split_dir, "files")
if not os.path.exists(split_dir):
os.makedirs(split_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
statistics_lines = []
for marker in DISCOURSE_MARKERS:
sentences = open(pjoin(input_dir, "{}_s.txt".format(marker)), "rU").readlines()
previous_sentences = open(pjoin(input_dir, "{}_prev.txt".format(marker)), "rU").readlines()
assert(len(sentences)==len(previous_sentences))
indices = range(len(sentences))
np.random.shuffle(indices)
test_proportion = (1-train_size)/2
n_test = round(len(indices) * test_proportion)
n_valid = n_test
n_train = len(indices) - (n_test + n_valid)
splits = {split: {"s": [], "prev": []} for split in ["train", "valid", "test"]}
for i in range(len(indices)):
sentence_index = indices[i]
sentence = sentences[sentence_index]
previous = previous_sentences[sentence_index]
if i<n_test:
split="test"
elif i<(n_test + n_valid):
split="valid"
else:
split="train"
splits[split]["s"].append(sentence)
splits[split]["prev"].append(previous)
for split in splits:
n_sentences = len(splits[split]["s"])
statistics_lines.append("{}\t{}\t{}".format(split, marker, n_sentences))
for sentence_type in ["s", "prev"]:
write_path = pjoin(output_dir, "{}_{}_{}.txt".format(split, marker, sentence_type))
with open(write_path, "w") as write_file:
for sentence in splits[split][sentence_type]:
write_file.write(sentence)
statistics_report = "\n".join(statistics_lines)
open(pjoin(split_dir, "VERSION.txt"), "w").write(
"commit: \n\ncommand: \n\nstatistics:\n" + statistics_report
)
def ssplit(method, source_dir, train_size):
methods = {
"string_ssplit_int_init": string_ssplit_int_init,
"string_ssplit_clean_markers": string_ssplit_clean_markers,
"depparse_ssplit_v1": depparse_ssplit_v1
}
assert(args.method in methods)
markers_dir = pjoin(source_dir, "markers_" + DISCOURSE_MARKER_SET_TAG)
split_dir = pjoin(markers_dir, "split_train{}".format(train_size))
input_dir = pjoin(split_dir, "files")
ssplit_dir = pjoin(split_dir, "ssplit_" + method)
output_dir = pjoin(ssplit_dir, "files")
if not os.path.exists(ssplit_dir):
os.makedirs(ssplit_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
open(pjoin(ssplit_dir, "VERSION.txt"), "w").write("commit: \n\ncommand: \n\n")
def get_data(split, marker, sentence_type):
filename = "{}_{}_{}.txt".format(split, marker, sentence_type)
file_path = pjoin(input_dir, filename)
return open(file_path, "rU").readlines()
# (a dictionary {train: {...}, valid: {...}, test: {...}})
splits = {}
for split in ["train", "valid", "test"]:
print("extracting {}".format(split))
data = {"s1": [], "s2": [], "label": []}
for marker in DISCOURSE_MARKERS:
sentences = get_data(split, marker, "s")
previous = get_data(split, marker, "prev")
assert(len(sentences) == len(previous))
for i in range(len(sentences)):
sentence = sentences[i]
previous_sentence = previous[i]
s1, s2, label = methods[method](sentence, previous_sentence, marker)
data["label"].append(marker)
data["s1"].append(s1)
data["s2"].append(s2)
splits[split] = data
for split in splits:
print("randomizing {}".format(split))
# randomize the order at this point
labels = splits[split]["label"]
s1 = splits[split]["s1"]
s2 = splits[split]["s2"]
assert(len(labels) == len(s1) and len(s1) == len(s2))
indices = range(len(labels))
np.random.shuffle(indices)
print("writing {}".format(split))
for element_type in ["label", "s1", "s2"]:
filename = "{}_{}_{}.txt".format(method, split, element_type)
file_path = pjoin(output_dir, filename)
with open(file_path, "w") as write_file:
for index in indices:
element = splits[split][element_type][index]
write_file.write(element + "\n")
def filtering(source_dir, args):
args.min_ratio = 1/args.max_ratio
marker_dir = pjoin(source_dir, "markers_" | |
import pygame
import math
import random
from pygame.locals import *
def main():
"""Main game execution
"""
game_init() # initializing game
load_resources() # loading game resources
game_loop() # looping through game
def game_init():
"""Initializing game
"""
# initializing global variables
global screen, width, height, keys, playerpos, accuracy, arrows
global badtimer,badtimer1, badguys, healthvalue
# initializing game and game-related variables
pygame.init()
width, height = 640, 480 # screen width and height
keys = [False, False, False, False] # game keys (WASD)
playerpos=[100,100] # player position
accuracy =[0,0] # player's accuracy
arrows = [] # arrows
badtimer=100 # timer to decrease for bad guys to appear
badtimer1=0 # timer to increase for bad guys to appear/disappear
badguys=[[640,100]] # bad guys initial opsition
healthvalue=194 # health value
screen = pygame.display.set_mode((width, height))
def load_resources():
"""Loading game resources
"""
# initializing global variables
global player, grass, castle, arrow, gameover
global badguyimg, badguyimg1, healthbar, health, youwin
global shoot, hit, enemy
# initializing mixer
pygame.mixer.init()
# loading resources
player = pygame.image.load("resources/images/dude.png")
grass = pygame.image.load("resources/images/grass.png")
castle = pygame.image.load("resources/images/castle.png")
arrow = pygame.image.load("resources/images/bullet.png")
healthbar = pygame.image.load("resources/images/healthbar.png")
health = pygame.image.load("resources/images/health.png")
badguyimg1 = pygame.image.load("resources/images/badguy.png")
gameover = pygame.image.load("resources/images/gameover.png")
youwin = pygame.image.load("resources/images/youwin.png")
hit = pygame.mixer.Sound("resources/audio/explode.wav")
enemy = pygame.mixer.Sound("resources/audio/enemy.wav")
shoot = pygame.mixer.Sound("resources/audio/shoot.wav")
badguyimg = badguyimg1
# setting up music
hit.set_volume(0.05)
enemy.set_volume(0.05)
shoot.set_volume(0.05)
pygame.mixer.music.load('resources/audio/moonlight.wav')
pygame.mixer.music.play(-1, 0.0)
pygame.mixer.music.set_volume(0.25)
def draw_grass():
"""Drawing grass to the screen
"""
# referencing global variables
global width, height, grass, screen
# iterating over width/grass_width
for x in range(width/grass.get_width() + 1):
# iterating over height/grass_height
for y in range(height/grass.get_height()+1):
# drawing grass on screen
screen.blit(grass,(x*100,y*100))
def draw_castle():
"""Drawing castle
"""
# referencing global variable(s)
global castle, screen
y_castle = 30
# drawing castle(s) on the screen
for x in range(4):
screen.blit(castle, (0,y_castle))
y_castle += 105
def draw_player():
"""Drawing player with z rotation
"""
# referencing global variables
global player, playerpos, playerpos1
# calculazing z rotation value
position = pygame.mouse.get_pos() # getting mouse position
# calculating angle between mouse and player tan(angle) = (y2-y1)/(x2-x1)
# angle = arctan((y2-y1)/(x2-x1))
# angle is in radians
angle = math.atan2(
position[1]-(playerpos[1]+32),
position[0]-(playerpos[0]+26)
)
angle_degress = 360-angle*57.29
# player rotation
playerrot = pygame.transform.rotate(player, angle_degress)
# player new position
playerpos1 = (
playerpos[0]-playerrot.get_rect().width/2,
playerpos[1]-playerrot.get_rect().height/2)
# drawing player on the screen
screen.blit(playerrot, playerpos1)
def draw_arrows():
"""Drawing the arrows fired by the player
"""
# referencing global variables
global arrow, arrows
# updating arrows position with velocity components
for bullet in arrows:
index=0
# velocity vector components:
# x-component: cos(angle) * acceleration
# y-compoent: sin(angle) * acceleration
velx=math.cos(bullet[0])*10 # x-component of the velocity vector
vely=math.sin(bullet[0])*10 # y-value of the velocity vector
# adding velocities to the arrows position components
bullet[1]+=velx
bullet[2]+=vely
# removing arrow from screen
if bullet[1]<-64 or bullet[1]>640 or bullet[2]<-64 or bullet[2]>480:
arrows.pop(index)
index+=1
# drawing arrows on screen
for projectile in arrows:
arrow1 = pygame.transform.rotate(arrow, 360-projectile[0]*57.29)
screen.blit(arrow1, (projectile[1], projectile[2]))
def draw_bad_guys():
"""Drawing bad guys
"""
# referencing global variables
global badtimer, badtimer1, badguys, badguyimg
global healthvalue, accuracy, arrows, hit, enemy
# check if its time to add a new bad guy to the screen
if badtimer == 0:
# ok, its tim to add a new bad guy
# adding a bad guy from any y-coordinate from the right of the screen
# with boundaries
badguys.append([640, random.randint(50,430)])
# reduce time for bad guys to appear
badtimer=100-(badtimer1*2)
# check for another timer
if badtimer1>=35:
badtimer1=35
else:
badtimer1+=5
index=0
for badguy in badguys:
# remove bad guys if they went off-screen
if badguy[0]<-64:
badguys.pop(index)
# reduce bad guys x-position (move to the left)
badguy[0]-=5 # use this variable to modify bad guys speed
# blowing up castle
badrect=pygame.Rect(badguyimg.get_rect())
badrect.top=badguy[1]
badrect.left=badguy[0]
if badrect.left<64:
# hit castle sound
hit.play()
healthvalue -= random.randint(5,20)
badguys.pop(index)
# keeping track of current arrow
index1=0
# checking for collision between bad guys and arrows
for bullet in arrows:
bullrect=pygame.Rect(arrow.get_rect()) # arrow rect
bullrect.left=bullet[1] # left?
bullrect.top=bullet[2] # top?
# checking for collision between arrow and badguy
if badrect.colliderect(bullrect):
# enemy sound
enemy.play()
# a collision happened, increase accuracy?
accuracy[0]+=1
# removing bad guy and arrow from screen
badguys.pop(index)
arrows.pop(index1)
index1+=1
# keeping track of current bad guy
index+=1
# drawing bad guys
for badguy in badguys:
screen.blit(badguyimg, badguy)
def draw_clock():
"""Drawing a timer
"""
# creating a font with size
font = pygame.font.Font(None, 24)
# rendering a text containing the current time
survivedtext = font.render(
(str((90000-pygame.time.get_ticks())/60000)+
":"+str((90000-pygame.time.get_ticks())/1000%60).zfill(2)),
True,(0,0,0))
# retrieving rect for text
textRect = survivedtext.get_rect()
# positioning text on top right corner
textRect.topright=[635,5]
# drawing text onto the screen
screen.blit(survivedtext, textRect)
def draw_health():
"""Drawing health bar
"""
# referencing global variables
global healthbar, health, healthvalue
# drawing health bar
screen.blit(healthbar, (5,5))
for health1 in range(healthvalue):
# according to how much value left, draw health
screen.blit(health, (health1+8,8))
def check_for_end():
"""Checking for the end of game
"""
# referencing global variables
global running, exitcode, accuracy, gameover, accuracy_str
# check if game needs to end
if pygame.time.get_ticks()>=90000:
# time has elapsed
running=0
exitcode=1
if healthvalue<=0:
# player health is gone
running=0
exitcode=0
if accuracy[1]!=0:
accuracy_str=accuracy[0]*1.0/accuracy[1]*100
else:
accuracy_str=0
def end_game():
"""Ending game
"""
# referencing global variables
global accuracy_str, gameover, youwin
# check if player won/lost
if exitcode==0:
# player lost
pygame.font.init()
font = pygame.font.Font(None, 24) # creating font
# rendering text
text = font.render("Accuracy: "+str(accuracy_str)+"%", True, (255,0,0))
textRect = text.get_rect()
textRect.centerx = screen.get_rect().centerx
textRect.centery = screen.get_rect().centery+24
screen.blit(gameover, (0,0))
screen.blit(text, textRect) # adding text to screen
else:
# player won
pygame.font.init()
font = pygame.font.Font(None, 24) # creating font
# rendering text
text = font.render("Accuracy: "+str(accuracy_str)+"%", True, (0,255,0))
textRect = text.get_rect()
textRect.centerx = screen.get_rect().centerx
textRect.centery = screen.get_rect().centery+24
screen.blit(youwin, (0,0))
screen.blit(text, textRect) # adding text to screen
pygame.display.flip()
# giving user the ability to quit game
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
def game_events():
"""Checking for game events
"""
# referencing global variables
global keys, playerpos, accuracy, arrows, playerpos1, shoot
# loop through events
for event in pygame.event.get():
# check if the event is the X button
if event.type == pygame.QUIT:
# if it is, quit the game
pygame.quit()
exit(0)
# checking for key down keyboard events
if event.type == pygame.KEYDOWN:
if event.key == K_w: # 'w' key was pressed down
keys[0] = True
if event.key == K_a: # 'a' key was pressed down
keys[1] = True
if event.key == K_s: # 's' key was pressed down
keys[2] = True
if event.key == K_d: # 'd' key was pressed down
keys[3] = True
# checking for key up keyboard events
if event.type == pygame.KEYUP:
if event.key == K_w: # 'w' key was pressed up
keys[0] = False
if event.key == K_a: # 'a' key was pressed up
keys[1] = False
if event.key == K_s: # 's' key was pressed up
keys[2] = False
if event.key == K_d: # 'd' key was pressed up
keys[3] = False
# checking if mouse was clicked AKA an arrow was fired!
if event.type == pygame.MOUSEBUTTONDOWN:
# shoot sound
shoot.play()
position = pygame.mouse.get_pos() # mouse position
accuracy[1]+=1 # increase y accuracy
# calculating the arrow rotation based on the rotated player
# position and the cursor position.
# This rotation value is stored in the arrows array.
# arrow = (angle, x, y)
arrows.append(
[math.atan2(
position[1]-(playerpos1[1]+32),
position[0]-(playerpos1[0]+26)),
playerpos1[0]+32,playerpos1[1]+32])
# updating player position based on which key was pressed
# AKA moving player
if keys[0]:
playerpos[1]-=5
elif keys[2]:
playerpos[1]+=5
if keys[1]:
playerpos[0]-=5
elif keys[3]:
playerpos[0]+=5
def game_loop():
"""Infinite game loop
"""
# referencing global variables
global screen, badtimer
# initializing global variables
global running, exitcode
running = 1 # use to determine if player wins or loses
exitcode = 0 # use to determine if game should be finished
# keeping looping through game
while running:
# clear screen before drawing it again
screen.fill(0)
draw_grass() # drawing grass
draw_castle() # drawing castle(s)
draw_player() # drawing player
draw_arrows() # drawing arrows
draw_bad_guys() # drawing bad guys
| |
_internals.ERROR_CONNECT_FAILED
ERROR_ILLEGAL_STATE = _internals.ERROR_ILLEGAL_STATE
ERROR_CODEC_FAILURE = _internals.ERROR_CODEC_FAILURE
ERROR_INDEX_OUT_OF_RANGE = _internals.ERROR_INDEX_OUT_OF_RANGE
ERROR_INVALID_CONVERSION = _internals.ERROR_INVALID_CONVERSION
ERROR_ITEM_NOT_FOUND = _internals.ERROR_ITEM_NOT_FOUND
ERROR_IO_ERROR = _internals.ERROR_IO_ERROR
ERROR_CORRELATION_NOT_FOUND = _internals.ERROR_CORRELATION_NOT_FOUND
ERROR_SERVICE_NOT_FOUND = _internals.ERROR_SERVICE_NOT_FOUND
ERROR_LOGON_LOOKUP_FAILED = _internals.ERROR_LOGON_LOOKUP_FAILED
ERROR_DS_LOOKUP_FAILED = _internals.ERROR_DS_LOOKUP_FAILED
ERROR_UNSUPPORTED_OPERATION = _internals.ERROR_UNSUPPORTED_OPERATION
ERROR_DS_PROPERTY_NOT_FOUND = _internals.ERROR_DS_PROPERTY_NOT_FOUND
ERROR_MSG_TOO_LARGE = _internals.ERROR_MSG_TOO_LARGE
def blpapi_getLastErrorDescription(resultCode):
return _internals.blpapi_getLastErrorDescription(resultCode)
blpapi_getLastErrorDescription = _internals.blpapi_getLastErrorDescription
def blpapi_SessionOptions_create():
return _internals.blpapi_SessionOptions_create()
blpapi_SessionOptions_create = _internals.blpapi_SessionOptions_create
def blpapi_SessionOptions_destroy(parameters):
return _internals.blpapi_SessionOptions_destroy(parameters)
blpapi_SessionOptions_destroy = _internals.blpapi_SessionOptions_destroy
def blpapi_SessionOptions_setServerHost(parameters, serverHost):
return _internals.blpapi_SessionOptions_setServerHost(parameters, serverHost)
blpapi_SessionOptions_setServerHost = _internals.blpapi_SessionOptions_setServerHost
def blpapi_SessionOptions_setServerPort(parameters, serverPort):
return _internals.blpapi_SessionOptions_setServerPort(parameters, serverPort)
blpapi_SessionOptions_setServerPort = _internals.blpapi_SessionOptions_setServerPort
def blpapi_SessionOptions_setServerAddress(parameters, serverHost, serverPort, index):
return _internals.blpapi_SessionOptions_setServerAddress(parameters, serverHost, serverPort, index)
blpapi_SessionOptions_setServerAddress = _internals.blpapi_SessionOptions_setServerAddress
def blpapi_SessionOptions_removeServerAddress(parameters, index):
return _internals.blpapi_SessionOptions_removeServerAddress(parameters, index)
blpapi_SessionOptions_removeServerAddress = _internals.blpapi_SessionOptions_removeServerAddress
def blpapi_SessionOptions_setConnectTimeout(parameters, timeoutInMilliseconds):
return _internals.blpapi_SessionOptions_setConnectTimeout(parameters, timeoutInMilliseconds)
blpapi_SessionOptions_setConnectTimeout = _internals.blpapi_SessionOptions_setConnectTimeout
def blpapi_SessionOptions_setDefaultServices(parameters, defaultServices):
return _internals.blpapi_SessionOptions_setDefaultServices(parameters, defaultServices)
blpapi_SessionOptions_setDefaultServices = _internals.blpapi_SessionOptions_setDefaultServices
def blpapi_SessionOptions_setDefaultSubscriptionService(parameters, serviceIdentifier):
return _internals.blpapi_SessionOptions_setDefaultSubscriptionService(parameters, serviceIdentifier)
blpapi_SessionOptions_setDefaultSubscriptionService = _internals.blpapi_SessionOptions_setDefaultSubscriptionService
def blpapi_SessionOptions_setDefaultTopicPrefix(parameters, prefix):
return _internals.blpapi_SessionOptions_setDefaultTopicPrefix(parameters, prefix)
blpapi_SessionOptions_setDefaultTopicPrefix = _internals.blpapi_SessionOptions_setDefaultTopicPrefix
def blpapi_SessionOptions_setAllowMultipleCorrelatorsPerMsg(parameters, allowMultipleCorrelatorsPerMsg):
return _internals.blpapi_SessionOptions_setAllowMultipleCorrelatorsPerMsg(parameters, allowMultipleCorrelatorsPerMsg)
blpapi_SessionOptions_setAllowMultipleCorrelatorsPerMsg = _internals.blpapi_SessionOptions_setAllowMultipleCorrelatorsPerMsg
def blpapi_SessionOptions_setClientMode(parameters, clientMode):
return _internals.blpapi_SessionOptions_setClientMode(parameters, clientMode)
blpapi_SessionOptions_setClientMode = _internals.blpapi_SessionOptions_setClientMode
def blpapi_SessionOptions_setMaxPendingRequests(parameters, maxPendingRequests):
return _internals.blpapi_SessionOptions_setMaxPendingRequests(parameters, maxPendingRequests)
blpapi_SessionOptions_setMaxPendingRequests = _internals.blpapi_SessionOptions_setMaxPendingRequests
def blpapi_SessionOptions_setAutoRestartOnDisconnection(parameters, autoRestart):
return _internals.blpapi_SessionOptions_setAutoRestartOnDisconnection(parameters, autoRestart)
blpapi_SessionOptions_setAutoRestartOnDisconnection = _internals.blpapi_SessionOptions_setAutoRestartOnDisconnection
def blpapi_SessionOptions_setSessionIdentityOptions(parameters, authOptions, cid):
return _internals.blpapi_SessionOptions_setSessionIdentityOptions(parameters, authOptions, cid)
blpapi_SessionOptions_setSessionIdentityOptions = _internals.blpapi_SessionOptions_setSessionIdentityOptions
def blpapi_SessionOptions_setAuthenticationOptions(parameters, authOptions):
return _internals.blpapi_SessionOptions_setAuthenticationOptions(parameters, authOptions)
blpapi_SessionOptions_setAuthenticationOptions = _internals.blpapi_SessionOptions_setAuthenticationOptions
def blpapi_SessionOptions_setNumStartAttempts(parameters, numStartAttempts):
return _internals.blpapi_SessionOptions_setNumStartAttempts(parameters, numStartAttempts)
blpapi_SessionOptions_setNumStartAttempts = _internals.blpapi_SessionOptions_setNumStartAttempts
def blpapi_SessionOptions_setMaxEventQueueSize(parameters, maxEventQueueSize):
return _internals.blpapi_SessionOptions_setMaxEventQueueSize(parameters, maxEventQueueSize)
blpapi_SessionOptions_setMaxEventQueueSize = _internals.blpapi_SessionOptions_setMaxEventQueueSize
def blpapi_SessionOptions_setSlowConsumerWarningHiWaterMark(parameters, hiWaterMark):
return _internals.blpapi_SessionOptions_setSlowConsumerWarningHiWaterMark(parameters, hiWaterMark)
blpapi_SessionOptions_setSlowConsumerWarningHiWaterMark = _internals.blpapi_SessionOptions_setSlowConsumerWarningHiWaterMark
def blpapi_SessionOptions_setSlowConsumerWarningLoWaterMark(parameters, loWaterMark):
return _internals.blpapi_SessionOptions_setSlowConsumerWarningLoWaterMark(parameters, loWaterMark)
blpapi_SessionOptions_setSlowConsumerWarningLoWaterMark = _internals.blpapi_SessionOptions_setSlowConsumerWarningLoWaterMark
def blpapi_SessionOptions_setDefaultKeepAliveInactivityTime(parameters, inactivityMsecs):
return _internals.blpapi_SessionOptions_setDefaultKeepAliveInactivityTime(parameters, inactivityMsecs)
blpapi_SessionOptions_setDefaultKeepAliveInactivityTime = _internals.blpapi_SessionOptions_setDefaultKeepAliveInactivityTime
def blpapi_SessionOptions_setDefaultKeepAliveResponseTimeout(parameters, timeoutMsecs):
return _internals.blpapi_SessionOptions_setDefaultKeepAliveResponseTimeout(parameters, timeoutMsecs)
blpapi_SessionOptions_setDefaultKeepAliveResponseTimeout = _internals.blpapi_SessionOptions_setDefaultKeepAliveResponseTimeout
def blpapi_SessionOptions_setKeepAliveEnabled(parameters, isEnabled):
return _internals.blpapi_SessionOptions_setKeepAliveEnabled(parameters, isEnabled)
blpapi_SessionOptions_setKeepAliveEnabled = _internals.blpapi_SessionOptions_setKeepAliveEnabled
def blpapi_SessionOptions_setRecordSubscriptionDataReceiveTimes(parameters, shouldRecord):
return _internals.blpapi_SessionOptions_setRecordSubscriptionDataReceiveTimes(parameters, shouldRecord)
blpapi_SessionOptions_setRecordSubscriptionDataReceiveTimes = _internals.blpapi_SessionOptions_setRecordSubscriptionDataReceiveTimes
def blpapi_SessionOptions_setServiceCheckTimeout(paramaters, timeoutMsecs):
return _internals.blpapi_SessionOptions_setServiceCheckTimeout(paramaters, timeoutMsecs)
blpapi_SessionOptions_setServiceCheckTimeout = _internals.blpapi_SessionOptions_setServiceCheckTimeout
def blpapi_SessionOptions_setServiceDownloadTimeout(paramaters, timeoutMsecs):
return _internals.blpapi_SessionOptions_setServiceDownloadTimeout(paramaters, timeoutMsecs)
blpapi_SessionOptions_setServiceDownloadTimeout = _internals.blpapi_SessionOptions_setServiceDownloadTimeout
def blpapi_SessionOptions_setTlsOptions(paramaters, tlsOptions):
return _internals.blpapi_SessionOptions_setTlsOptions(paramaters, tlsOptions)
blpapi_SessionOptions_setTlsOptions = _internals.blpapi_SessionOptions_setTlsOptions
def blpapi_SessionOptions_setFlushPublishedEventsTimeout(paramaters, timeoutMsecs):
return _internals.blpapi_SessionOptions_setFlushPublishedEventsTimeout(paramaters, timeoutMsecs)
blpapi_SessionOptions_setFlushPublishedEventsTimeout = _internals.blpapi_SessionOptions_setFlushPublishedEventsTimeout
def blpapi_SessionOptions_setBandwidthSaveModeDisabled(parameters, disableBandwidthSaveMode):
return _internals.blpapi_SessionOptions_setBandwidthSaveModeDisabled(parameters, disableBandwidthSaveMode)
blpapi_SessionOptions_setBandwidthSaveModeDisabled = _internals.blpapi_SessionOptions_setBandwidthSaveModeDisabled
def blpapi_SessionOptions_serverHost(parameters):
return _internals.blpapi_SessionOptions_serverHost(parameters)
blpapi_SessionOptions_serverHost = _internals.blpapi_SessionOptions_serverHost
def blpapi_SessionOptions_serverPort(parameters):
return _internals.blpapi_SessionOptions_serverPort(parameters)
blpapi_SessionOptions_serverPort = _internals.blpapi_SessionOptions_serverPort
def blpapi_SessionOptions_numServerAddresses(parameters):
return _internals.blpapi_SessionOptions_numServerAddresses(parameters)
blpapi_SessionOptions_numServerAddresses = _internals.blpapi_SessionOptions_numServerAddresses
def blpapi_SessionOptions_getServerAddress(parameters, index):
return _internals.blpapi_SessionOptions_getServerAddress(parameters, index)
blpapi_SessionOptions_getServerAddress = _internals.blpapi_SessionOptions_getServerAddress
def blpapi_SessionOptions_connectTimeout(parameters):
return _internals.blpapi_SessionOptions_connectTimeout(parameters)
blpapi_SessionOptions_connectTimeout = _internals.blpapi_SessionOptions_connectTimeout
def blpapi_SessionOptions_defaultServices(parameters):
return _internals.blpapi_SessionOptions_defaultServices(parameters)
blpapi_SessionOptions_defaultServices = _internals.blpapi_SessionOptions_defaultServices
def blpapi_SessionOptions_defaultSubscriptionService(parameters):
return _internals.blpapi_SessionOptions_defaultSubscriptionService(parameters)
blpapi_SessionOptions_defaultSubscriptionService = _internals.blpapi_SessionOptions_defaultSubscriptionService
def blpapi_SessionOptions_defaultTopicPrefix(parameters):
return _internals.blpapi_SessionOptions_defaultTopicPrefix(parameters)
blpapi_SessionOptions_defaultTopicPrefix = _internals.blpapi_SessionOptions_defaultTopicPrefix
def blpapi_SessionOptions_allowMultipleCorrelatorsPerMsg(parameters):
return _internals.blpapi_SessionOptions_allowMultipleCorrelatorsPerMsg(parameters)
blpapi_SessionOptions_allowMultipleCorrelatorsPerMsg = _internals.blpapi_SessionOptions_allowMultipleCorrelatorsPerMsg
def blpapi_SessionOptions_clientMode(parameters):
return _internals.blpapi_SessionOptions_clientMode(parameters)
blpapi_SessionOptions_clientMode = _internals.blpapi_SessionOptions_clientMode
def blpapi_SessionOptions_maxPendingRequests(parameters):
return _internals.blpapi_SessionOptions_maxPendingRequests(parameters)
blpapi_SessionOptions_maxPendingRequests = _internals.blpapi_SessionOptions_maxPendingRequests
def blpapi_SessionOptions_autoRestartOnDisconnection(parameters):
return _internals.blpapi_SessionOptions_autoRestartOnDisconnection(parameters)
blpapi_SessionOptions_autoRestartOnDisconnection = _internals.blpapi_SessionOptions_autoRestartOnDisconnection
def blpapi_SessionOptions_authenticationOptions(parameters):
return _internals.blpapi_SessionOptions_authenticationOptions(parameters)
blpapi_SessionOptions_authenticationOptions = _internals.blpapi_SessionOptions_authenticationOptions
def blpapi_SessionOptions_numStartAttempts(parameters):
return _internals.blpapi_SessionOptions_numStartAttempts(parameters)
blpapi_SessionOptions_numStartAttempts = _internals.blpapi_SessionOptions_numStartAttempts
def blpapi_SessionOptions_maxEventQueueSize(parameters):
return _internals.blpapi_SessionOptions_maxEventQueueSize(parameters)
blpapi_SessionOptions_maxEventQueueSize = _internals.blpapi_SessionOptions_maxEventQueueSize
def blpapi_SessionOptions_slowConsumerWarningHiWaterMark(parameters):
return _internals.blpapi_SessionOptions_slowConsumerWarningHiWaterMark(parameters)
blpapi_SessionOptions_slowConsumerWarningHiWaterMark = _internals.blpapi_SessionOptions_slowConsumerWarningHiWaterMark
def blpapi_SessionOptions_slowConsumerWarningLoWaterMark(parameters):
return _internals.blpapi_SessionOptions_slowConsumerWarningLoWaterMark(parameters)
blpapi_SessionOptions_slowConsumerWarningLoWaterMark = _internals.blpapi_SessionOptions_slowConsumerWarningLoWaterMark
def blpapi_SessionOptions_defaultKeepAliveInactivityTime(parameters):
return _internals.blpapi_SessionOptions_defaultKeepAliveInactivityTime(parameters)
blpapi_SessionOptions_defaultKeepAliveInactivityTime = _internals.blpapi_SessionOptions_defaultKeepAliveInactivityTime
def blpapi_SessionOptions_defaultKeepAliveResponseTimeout(parameters):
return _internals.blpapi_SessionOptions_defaultKeepAliveResponseTimeout(parameters)
blpapi_SessionOptions_defaultKeepAliveResponseTimeout = _internals.blpapi_SessionOptions_defaultKeepAliveResponseTimeout
def blpapi_SessionOptions_keepAliveEnabled(parameters):
return _internals.blpapi_SessionOptions_keepAliveEnabled(parameters)
blpapi_SessionOptions_keepAliveEnabled = _internals.blpapi_SessionOptions_keepAliveEnabled
def blpapi_SessionOptions_recordSubscriptionDataReceiveTimes(parameters):
return _internals.blpapi_SessionOptions_recordSubscriptionDataReceiveTimes(parameters)
blpapi_SessionOptions_recordSubscriptionDataReceiveTimes = _internals.blpapi_SessionOptions_recordSubscriptionDataReceiveTimes
def blpapi_SessionOptions_serviceCheckTimeout(parameters):
return _internals.blpapi_SessionOptions_serviceCheckTimeout(parameters)
blpapi_SessionOptions_serviceCheckTimeout = _internals.blpapi_SessionOptions_serviceCheckTimeout
def blpapi_SessionOptions_serviceDownloadTimeout(parameters):
return _internals.blpapi_SessionOptions_serviceDownloadTimeout(parameters)
blpapi_SessionOptions_serviceDownloadTimeout = _internals.blpapi_SessionOptions_serviceDownloadTimeout
def blpapi_SessionOptions_flushPublishedEventsTimeout(parameters):
return _internals.blpapi_SessionOptions_flushPublishedEventsTimeout(parameters)
blpapi_SessionOptions_flushPublishedEventsTimeout = _internals.blpapi_SessionOptions_flushPublishedEventsTimeout
def blpapi_SessionOptions_bandwidthSaveModeDisabled(parameters):
return _internals.blpapi_SessionOptions_bandwidthSaveModeDisabled(parameters)
blpapi_SessionOptions_bandwidthSaveModeDisabled = _internals.blpapi_SessionOptions_bandwidthSaveModeDisabled
def blpapi_TlsOptions_destroy(parameters):
return _internals.blpapi_TlsOptions_destroy(parameters)
blpapi_TlsOptions_destroy = _internals.blpapi_TlsOptions_destroy
def blpapi_TlsOptions_createFromFiles(clientCredentialsFileName, clientCredentialsPassword, trustedCertificatesFileName):
return _internals.blpapi_TlsOptions_createFromFiles(clientCredentialsFileName, clientCredentialsPassword, trustedCertificatesFileName)
blpapi_TlsOptions_createFromFiles = _internals.blpapi_TlsOptions_createFromFiles
def blpapi_TlsOptions_createFromBlobs(clientCredentialsRawData, clientCredentialsPassword, trustedCertificatesRawData):
return _internals.blpapi_TlsOptions_createFromBlobs(clientCredentialsRawData, clientCredentialsPassword, trustedCertificatesRawData)
blpapi_TlsOptions_createFromBlobs = _internals.blpapi_TlsOptions_createFromBlobs
def blpapi_TlsOptions_setTlsHandshakeTimeoutMs(paramaters, tlsHandshakeTimeoutMs):
return _internals.blpapi_TlsOptions_setTlsHandshakeTimeoutMs(paramaters, tlsHandshakeTimeoutMs)
blpapi_TlsOptions_setTlsHandshakeTimeoutMs = _internals.blpapi_TlsOptions_setTlsHandshakeTimeoutMs
def blpapi_TlsOptions_setCrlFetchTimeoutMs(paramaters, crlFetchTimeoutMs):
return _internals.blpapi_TlsOptions_setCrlFetchTimeoutMs(paramaters, crlFetchTimeoutMs)
blpapi_TlsOptions_setCrlFetchTimeoutMs = _internals.blpapi_TlsOptions_setCrlFetchTimeoutMs
def blpapi_Name_create(nameString):
return _internals.blpapi_Name_create(nameString)
blpapi_Name_create = _internals.blpapi_Name_create
def blpapi_Name_destroy(name):
return _internals.blpapi_Name_destroy(name)
blpapi_Name_destroy = _internals.blpapi_Name_destroy
def blpapi_Name_equalsStr(name, string):
return _internals.blpapi_Name_equalsStr(name, string)
blpapi_Name_equalsStr = _internals.blpapi_Name_equalsStr
def blpapi_Name_string(name):
return _internals.blpapi_Name_string(name)
blpapi_Name_string = _internals.blpapi_Name_string
def blpapi_Name_length(name):
return _internals.blpapi_Name_length(name)
blpapi_Name_length = _internals.blpapi_Name_length
def blpapi_Name_findName(nameString):
return _internals.blpapi_Name_findName(nameString)
blpapi_Name_findName = _internals.blpapi_Name_findName
def blpapi_SubscriptionList_create():
return _internals.blpapi_SubscriptionList_create()
blpapi_SubscriptionList_create = _internals.blpapi_SubscriptionList_create
def blpapi_SubscriptionList_destroy(list):
return _internals.blpapi_SubscriptionList_destroy(list)
blpapi_SubscriptionList_destroy = _internals.blpapi_SubscriptionList_destroy
def blpapi_SubscriptionList_addResolved(list, subscriptionString, correlationId):
return _internals.blpapi_SubscriptionList_addResolved(list, subscriptionString, correlationId)
blpapi_SubscriptionList_addResolved = _internals.blpapi_SubscriptionList_addResolved
def blpapi_SubscriptionList_clear(list):
return _internals.blpapi_SubscriptionList_clear(list)
blpapi_SubscriptionList_clear = _internals.blpapi_SubscriptionList_clear
def blpapi_SubscriptionList_append(dest, src):
return _internals.blpapi_SubscriptionList_append(dest, src)
blpapi_SubscriptionList_append = _internals.blpapi_SubscriptionList_append
def blpapi_SubscriptionList_size(list):
return _internals.blpapi_SubscriptionList_size(list)
blpapi_SubscriptionList_size = _internals.blpapi_SubscriptionList_size
def blpapi_SubscriptionList_correlationIdAt(list, index):
return _internals.blpapi_SubscriptionList_correlationIdAt(list, index)
blpapi_SubscriptionList_correlationIdAt = _internals.blpapi_SubscriptionList_correlationIdAt
def blpapi_SubscriptionList_topicStringAt(list, index):
return _internals.blpapi_SubscriptionList_topicStringAt(list, index)
blpapi_SubscriptionList_topicStringAt = _internals.blpapi_SubscriptionList_topicStringAt
def blpapi_SubscriptionList_isResolvedAt(list, index):
return _internals.blpapi_SubscriptionList_isResolvedAt(list, index)
blpapi_SubscriptionList_isResolvedAt = _internals.blpapi_SubscriptionList_isResolvedAt
class blpapi_TimePoint(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, blpapi_TimePoint, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, blpapi_TimePoint, name)
__repr__ = _swig_repr
__swig_setmethods__["d_value"] = _internals.blpapi_TimePoint_d_value_set
__swig_getmethods__["d_value"] = _internals.blpapi_TimePoint_d_value_get
if _newclass:
d_value = _swig_property(_internals.blpapi_TimePoint_d_value_get, _internals.blpapi_TimePoint_d_value_set)
def __init__(self):
this = _internals.new_blpapi_TimePoint()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _internals.delete_blpapi_TimePoint
__del__ = lambda self: None
blpapi_TimePoint_swigregister = _internals.blpapi_TimePoint_swigregister
blpapi_TimePoint_swigregister(blpapi_TimePoint)
def blpapi_TimePointUtil_nanosecondsBetween(start, end):
return _internals.blpapi_TimePointUtil_nanosecondsBetween(start, end)
blpapi_TimePointUtil_nanosecondsBetween = _internals.blpapi_TimePointUtil_nanosecondsBetween
class blpapi_Datetime_tag(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, blpapi_Datetime_tag, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, blpapi_Datetime_tag, name)
__repr__ = _swig_repr
__swig_setmethods__["parts"] = _internals.blpapi_Datetime_tag_parts_set
__swig_getmethods__["parts"] = _internals.blpapi_Datetime_tag_parts_get
if _newclass:
parts = _swig_property(_internals.blpapi_Datetime_tag_parts_get, _internals.blpapi_Datetime_tag_parts_set)
__swig_setmethods__["hours"] = _internals.blpapi_Datetime_tag_hours_set
__swig_getmethods__["hours"] = _internals.blpapi_Datetime_tag_hours_get
if _newclass:
hours = _swig_property(_internals.blpapi_Datetime_tag_hours_get, _internals.blpapi_Datetime_tag_hours_set)
__swig_setmethods__["minutes"] = _internals.blpapi_Datetime_tag_minutes_set
__swig_getmethods__["minutes"] = _internals.blpapi_Datetime_tag_minutes_get
if _newclass:
minutes = _swig_property(_internals.blpapi_Datetime_tag_minutes_get, _internals.blpapi_Datetime_tag_minutes_set)
__swig_setmethods__["seconds"] = _internals.blpapi_Datetime_tag_seconds_set
__swig_getmethods__["seconds"] = _internals.blpapi_Datetime_tag_seconds_get
if _newclass:
seconds = _swig_property(_internals.blpapi_Datetime_tag_seconds_get, _internals.blpapi_Datetime_tag_seconds_set)
__swig_setmethods__["milliSeconds"] = _internals.blpapi_Datetime_tag_milliSeconds_set
__swig_getmethods__["milliSeconds"] = _internals.blpapi_Datetime_tag_milliSeconds_get
if _newclass:
milliSeconds = _swig_property(_internals.blpapi_Datetime_tag_milliSeconds_get, _internals.blpapi_Datetime_tag_milliSeconds_set)
__swig_setmethods__["month"] = _internals.blpapi_Datetime_tag_month_set
__swig_getmethods__["month"] = _internals.blpapi_Datetime_tag_month_get
if _newclass:
month = _swig_property(_internals.blpapi_Datetime_tag_month_get, _internals.blpapi_Datetime_tag_month_set)
__swig_setmethods__["day"] = _internals.blpapi_Datetime_tag_day_set
__swig_getmethods__["day"] = _internals.blpapi_Datetime_tag_day_get
if _newclass:
day = _swig_property(_internals.blpapi_Datetime_tag_day_get, _internals.blpapi_Datetime_tag_day_set)
__swig_setmethods__["year"] = _internals.blpapi_Datetime_tag_year_set
__swig_getmethods__["year"] = _internals.blpapi_Datetime_tag_year_get
if _newclass:
year = _swig_property(_internals.blpapi_Datetime_tag_year_get, _internals.blpapi_Datetime_tag_year_set)
__swig_setmethods__["offset"] = _internals.blpapi_Datetime_tag_offset_set
__swig_getmethods__["offset"] = _internals.blpapi_Datetime_tag_offset_get
if _newclass:
offset = _swig_property(_internals.blpapi_Datetime_tag_offset_get, _internals.blpapi_Datetime_tag_offset_set)
def __init__(self):
this = _internals.new_blpapi_Datetime_tag()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _internals.delete_blpapi_Datetime_tag
__del__ = lambda self: None
blpapi_Datetime_tag_swigregister = _internals.blpapi_Datetime_tag_swigregister
blpapi_Datetime_tag_swigregister(blpapi_Datetime_tag)
class blpapi_HighPrecisionDatetime_tag(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, blpapi_HighPrecisionDatetime_tag, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, blpapi_HighPrecisionDatetime_tag, name)
__repr__ = _swig_repr
__swig_setmethods__["datetime"] = _internals.blpapi_HighPrecisionDatetime_tag_datetime_set
__swig_getmethods__["datetime"] = _internals.blpapi_HighPrecisionDatetime_tag_datetime_get
if _newclass:
datetime = _swig_property(_internals.blpapi_HighPrecisionDatetime_tag_datetime_get, _internals.blpapi_HighPrecisionDatetime_tag_datetime_set)
__swig_setmethods__["picoseconds"] = _internals.blpapi_HighPrecisionDatetime_tag_picoseconds_set
__swig_getmethods__["picoseconds"] = _internals.blpapi_HighPrecisionDatetime_tag_picoseconds_get
if _newclass:
picoseconds = _swig_property(_internals.blpapi_HighPrecisionDatetime_tag_picoseconds_get, _internals.blpapi_HighPrecisionDatetime_tag_picoseconds_set)
def __init__(self):
this = _internals.new_blpapi_HighPrecisionDatetime_tag()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _internals.delete_blpapi_HighPrecisionDatetime_tag
__del__ = lambda self: None
blpapi_HighPrecisionDatetime_tag_swigregister = _internals.blpapi_HighPrecisionDatetime_tag_swigregister
blpapi_HighPrecisionDatetime_tag_swigregister(blpapi_HighPrecisionDatetime_tag)
def blpapi_HighPrecisionDatetime_compare(lhs, rhs):
return _internals.blpapi_HighPrecisionDatetime_compare(lhs, rhs)
blpapi_HighPrecisionDatetime_compare = _internals.blpapi_HighPrecisionDatetime_compare
def blpapi_HighPrecisionDatetime_print(datetime, streamWriter, stream, level, spacesPerLevel):
return _internals.blpapi_HighPrecisionDatetime_print(datetime, streamWriter, stream, level, spacesPerLevel)
blpapi_HighPrecisionDatetime_print = _internals.blpapi_HighPrecisionDatetime_print
def blpapi_HighPrecisionDatetime_fromTimePoint(datetime, offset):
return _internals.blpapi_HighPrecisionDatetime_fromTimePoint(datetime, offset)
blpapi_HighPrecisionDatetime_fromTimePoint = _internals.blpapi_HighPrecisionDatetime_fromTimePoint
def blpapi_Constant_name(constant):
return _internals.blpapi_Constant_name(constant)
blpapi_Constant_name = _internals.blpapi_Constant_name
def blpapi_Constant_description(constant):
return _internals.blpapi_Constant_description(constant)
blpapi_Constant_description = _internals.blpapi_Constant_description
def blpapi_Constant_status(constant):
return _internals.blpapi_Constant_status(constant)
blpapi_Constant_status = _internals.blpapi_Constant_status
def blpapi_Constant_datatype(constant):
return _internals.blpapi_Constant_datatype(constant)
blpapi_Constant_datatype = _internals.blpapi_Constant_datatype
def blpapi_Constant_getValueAsChar(constant):
return _internals.blpapi_Constant_getValueAsChar(constant)
blpapi_Constant_getValueAsChar = _internals.blpapi_Constant_getValueAsChar
def blpapi_Constant_getValueAsInt32(constant):
return _internals.blpapi_Constant_getValueAsInt32(constant)
blpapi_Constant_getValueAsInt32 = _internals.blpapi_Constant_getValueAsInt32
def blpapi_Constant_getValueAsInt64(constant):
return _internals.blpapi_Constant_getValueAsInt64(constant)
blpapi_Constant_getValueAsInt64 = _internals.blpapi_Constant_getValueAsInt64
def blpapi_Constant_getValueAsFloat32(constant):
return _internals.blpapi_Constant_getValueAsFloat32(constant)
blpapi_Constant_getValueAsFloat32 = _internals.blpapi_Constant_getValueAsFloat32
def blpapi_Constant_getValueAsFloat64(constant):
return _internals.blpapi_Constant_getValueAsFloat64(constant)
blpapi_Constant_getValueAsFloat64 = _internals.blpapi_Constant_getValueAsFloat64
def blpapi_Constant_getValueAsDatetime(constant):
return _internals.blpapi_Constant_getValueAsDatetime(constant)
blpapi_Constant_getValueAsDatetime = _internals.blpapi_Constant_getValueAsDatetime
def blpapi_Constant_getValueAsString(constant):
return _internals.blpapi_Constant_getValueAsString(constant)
blpapi_Constant_getValueAsString = _internals.blpapi_Constant_getValueAsString
def blpapi_ConstantList_name(list):
return _internals.blpapi_ConstantList_name(list)
blpapi_ConstantList_name = _internals.blpapi_ConstantList_name
def blpapi_ConstantList_description(list):
return _internals.blpapi_ConstantList_description(list)
blpapi_ConstantList_description = _internals.blpapi_ConstantList_description
def blpapi_ConstantList_numConstants(list):
return _internals.blpapi_ConstantList_numConstants(list)
blpapi_ConstantList_numConstants = _internals.blpapi_ConstantList_numConstants
def blpapi_ConstantList_datatype(constant):
return _internals.blpapi_ConstantList_datatype(constant)
blpapi_ConstantList_datatype = _internals.blpapi_ConstantList_datatype
def blpapi_ConstantList_status(list):
return _internals.blpapi_ConstantList_status(list)
blpapi_ConstantList_status = _internals.blpapi_ConstantList_status
def blpapi_ConstantList_getConstant(constant, nameString, name):
return _internals.blpapi_ConstantList_getConstant(constant, nameString, name)
blpapi_ConstantList_getConstant = _internals.blpapi_ConstantList_getConstant
def blpapi_ConstantList_getConstantAt(constant, index):
return _internals.blpapi_ConstantList_getConstantAt(constant, index)
blpapi_ConstantList_getConstantAt = _internals.blpapi_ConstantList_getConstantAt
def blpapi_SchemaElementDefinition_name(field):
return _internals.blpapi_SchemaElementDefinition_name(field)
blpapi_SchemaElementDefinition_name = _internals.blpapi_SchemaElementDefinition_name
def blpapi_SchemaElementDefinition_description(field):
return _internals.blpapi_SchemaElementDefinition_description(field)
blpapi_SchemaElementDefinition_description = _internals.blpapi_SchemaElementDefinition_description
def blpapi_SchemaElementDefinition_status(field):
return _internals.blpapi_SchemaElementDefinition_status(field)
blpapi_SchemaElementDefinition_status = _internals.blpapi_SchemaElementDefinition_status
def blpapi_SchemaElementDefinition_type(field):
return _internals.blpapi_SchemaElementDefinition_type(field)
blpapi_SchemaElementDefinition_type = _internals.blpapi_SchemaElementDefinition_type
def blpapi_SchemaElementDefinition_numAlternateNames(field):
return _internals.blpapi_SchemaElementDefinition_numAlternateNames(field)
blpapi_SchemaElementDefinition_numAlternateNames = _internals.blpapi_SchemaElementDefinition_numAlternateNames
def blpapi_SchemaElementDefinition_getAlternateName(field, index):
return _internals.blpapi_SchemaElementDefinition_getAlternateName(field, index)
blpapi_SchemaElementDefinition_getAlternateName = _internals.blpapi_SchemaElementDefinition_getAlternateName
def blpapi_SchemaElementDefinition_minValues(field):
return _internals.blpapi_SchemaElementDefinition_minValues(field)
blpapi_SchemaElementDefinition_minValues = _internals.blpapi_SchemaElementDefinition_minValues
def blpapi_SchemaElementDefinition_maxValues(field):
return _internals.blpapi_SchemaElementDefinition_maxValues(field)
blpapi_SchemaElementDefinition_maxValues = _internals.blpapi_SchemaElementDefinition_maxValues
def blpapi_SchemaTypeDefinition_name(type):
return _internals.blpapi_SchemaTypeDefinition_name(type)
blpapi_SchemaTypeDefinition_name = _internals.blpapi_SchemaTypeDefinition_name
def blpapi_SchemaTypeDefinition_description(type):
return _internals.blpapi_SchemaTypeDefinition_description(type)
blpapi_SchemaTypeDefinition_description = _internals.blpapi_SchemaTypeDefinition_description
def blpapi_SchemaTypeDefinition_status(type):
return _internals.blpapi_SchemaTypeDefinition_status(type)
blpapi_SchemaTypeDefinition_status = _internals.blpapi_SchemaTypeDefinition_status
def blpapi_SchemaTypeDefinition_datatype(type):
return _internals.blpapi_SchemaTypeDefinition_datatype(type)
blpapi_SchemaTypeDefinition_datatype = _internals.blpapi_SchemaTypeDefinition_datatype
def blpapi_SchemaTypeDefinition_isComplexType(type):
return _internals.blpapi_SchemaTypeDefinition_isComplexType(type)
blpapi_SchemaTypeDefinition_isComplexType = _internals.blpapi_SchemaTypeDefinition_isComplexType
def blpapi_SchemaTypeDefinition_isSimpleType(type):
return _internals.blpapi_SchemaTypeDefinition_isSimpleType(type)
blpapi_SchemaTypeDefinition_isSimpleType = _internals.blpapi_SchemaTypeDefinition_isSimpleType
def blpapi_SchemaTypeDefinition_isEnumerationType(type):
return _internals.blpapi_SchemaTypeDefinition_isEnumerationType(type)
blpapi_SchemaTypeDefinition_isEnumerationType = _internals.blpapi_SchemaTypeDefinition_isEnumerationType
def blpapi_SchemaTypeDefinition_numElementDefinitions(type):
return _internals.blpapi_SchemaTypeDefinition_numElementDefinitions(type)
blpapi_SchemaTypeDefinition_numElementDefinitions = _internals.blpapi_SchemaTypeDefinition_numElementDefinitions
def blpapi_SchemaTypeDefinition_getElementDefinition(type, nameString, name):
return _internals.blpapi_SchemaTypeDefinition_getElementDefinition(type, nameString, name)
blpapi_SchemaTypeDefinition_getElementDefinition = _internals.blpapi_SchemaTypeDefinition_getElementDefinition
def blpapi_SchemaTypeDefinition_getElementDefinitionAt(type, index):
return _internals.blpapi_SchemaTypeDefinition_getElementDefinitionAt(type, index)
blpapi_SchemaTypeDefinition_getElementDefinitionAt = _internals.blpapi_SchemaTypeDefinition_getElementDefinitionAt
def blpapi_SchemaTypeDefinition_enumeration(element):
return _internals.blpapi_SchemaTypeDefinition_enumeration(element)
blpapi_SchemaTypeDefinition_enumeration = _internals.blpapi_SchemaTypeDefinition_enumeration
def blpapi_Request_destroy(request):
return _internals.blpapi_Request_destroy(request)
blpapi_Request_destroy = _internals.blpapi_Request_destroy
def blpapi_Request_elements(request):
return _internals.blpapi_Request_elements(request)
blpapi_Request_elements = _internals.blpapi_Request_elements
def blpapi_Request_setPreferredRoute(request, correlationId):
return _internals.blpapi_Request_setPreferredRoute(request, correlationId)
blpapi_Request_setPreferredRoute = _internals.blpapi_Request_setPreferredRoute
def blpapi_Request_getRequestId(request):
return _internals.blpapi_Request_getRequestId(request)
blpapi_Request_getRequestId = _internals.blpapi_Request_getRequestId
def blpapi_RequestTemplate_release(requestTemplate):
return _internals.blpapi_RequestTemplate_release(requestTemplate)
blpapi_RequestTemplate_release = _internals.blpapi_RequestTemplate_release
def blpapi_Operation_name(operation):
return _internals.blpapi_Operation_name(operation)
blpapi_Operation_name = _internals.blpapi_Operation_name
def blpapi_Operation_description(operation):
return _internals.blpapi_Operation_description(operation)
blpapi_Operation_description = _internals.blpapi_Operation_description
def blpapi_Operation_requestDefinition(operation):
return _internals.blpapi_Operation_requestDefinition(operation)
blpapi_Operation_requestDefinition = _internals.blpapi_Operation_requestDefinition
def blpapi_Operation_numResponseDefinitions(operation):
return _internals.blpapi_Operation_numResponseDefinitions(operation)
blpapi_Operation_numResponseDefinitions = _internals.blpapi_Operation_numResponseDefinitions
def blpapi_Operation_responseDefinition(operation, index):
return _internals.blpapi_Operation_responseDefinition(operation, index)
blpapi_Operation_responseDefinition = _internals.blpapi_Operation_responseDefinition
def blpapi_Operation_responseDefinitionFromName(operation, name):
return _internals.blpapi_Operation_responseDefinitionFromName(operation, name)
blpapi_Operation_responseDefinitionFromName = _internals.blpapi_Operation_responseDefinitionFromName
def blpapi_Service_name(service):
return _internals.blpapi_Service_name(service)
blpapi_Service_name = _internals.blpapi_Service_name
def blpapi_Service_description(service):
return _internals.blpapi_Service_description(service)
blpapi_Service_description = _internals.blpapi_Service_description
def blpapi_Service_numOperations(service):
return _internals.blpapi_Service_numOperations(service)
blpapi_Service_numOperations = _internals.blpapi_Service_numOperations
def blpapi_Service_numEventDefinitions(service):
return _internals.blpapi_Service_numEventDefinitions(service)
blpapi_Service_numEventDefinitions = _internals.blpapi_Service_numEventDefinitions
def blpapi_Service_addRef(service):
return _internals.blpapi_Service_addRef(service)
blpapi_Service_addRef = _internals.blpapi_Service_addRef
def blpapi_Service_release(service):
return _internals.blpapi_Service_release(service)
blpapi_Service_release = _internals.blpapi_Service_release
def blpapi_Service_authorizationServiceName(service):
return _internals.blpapi_Service_authorizationServiceName(service)
blpapi_Service_authorizationServiceName = _internals.blpapi_Service_authorizationServiceName
def blpapi_Service_getOperation(service, nameString, name):
return _internals.blpapi_Service_getOperation(service, nameString, name)
blpapi_Service_getOperation = _internals.blpapi_Service_getOperation
def blpapi_Service_getOperationAt(service, index):
return _internals.blpapi_Service_getOperationAt(service, index)
blpapi_Service_getOperationAt = _internals.blpapi_Service_getOperationAt
def blpapi_Service_getEventDefinition(service, nameString, name):
return _internals.blpapi_Service_getEventDefinition(service, nameString, name)
blpapi_Service_getEventDefinition = _internals.blpapi_Service_getEventDefinition
def blpapi_Service_getEventDefinitionAt(service, index):
return _internals.blpapi_Service_getEventDefinitionAt(service, index)
blpapi_Service_getEventDefinitionAt = _internals.blpapi_Service_getEventDefinitionAt
def blpapi_Service_createRequest(service, operation):
return _internals.blpapi_Service_createRequest(service, operation)
blpapi_Service_createRequest = _internals.blpapi_Service_createRequest
def blpapi_Service_createAuthorizationRequest(service, operation):
return _internals.blpapi_Service_createAuthorizationRequest(service, operation)
blpapi_Service_createAuthorizationRequest = _internals.blpapi_Service_createAuthorizationRequest
def blpapi_Service_createPublishEvent(service):
return _internals.blpapi_Service_createPublishEvent(service)
blpapi_Service_createPublishEvent = _internals.blpapi_Service_createPublishEvent
def blpapi_Service_createAdminEvent(service):
return _internals.blpapi_Service_createAdminEvent(service)
blpapi_Service_createAdminEvent = _internals.blpapi_Service_createAdminEvent
def blpapi_Service_createResponseEvent(service, correlationId):
return _internals.blpapi_Service_createResponseEvent(service, correlationId)
blpapi_Service_createResponseEvent = _internals.blpapi_Service_createResponseEvent
def blpapi_Message_messageType(message):
return _internals.blpapi_Message_messageType(message)
blpapi_Message_messageType = _internals.blpapi_Message_messageType
def blpapi_Message_topicName(message):
return _internals.blpapi_Message_topicName(message)
blpapi_Message_topicName = _internals.blpapi_Message_topicName
def blpapi_Message_service(message):
return _internals.blpapi_Message_service(message)
blpapi_Message_service = _internals.blpapi_Message_service
def blpapi_Message_numCorrelationIds(message):
return _internals.blpapi_Message_numCorrelationIds(message)
blpapi_Message_numCorrelationIds = _internals.blpapi_Message_numCorrelationIds
def blpapi_Message_correlationId(message, index):
return _internals.blpapi_Message_correlationId(message, index)
blpapi_Message_correlationId = _internals.blpapi_Message_correlationId
def blpapi_Message_getRequestId(message):
return _internals.blpapi_Message_getRequestId(message)
blpapi_Message_getRequestId = _internals.blpapi_Message_getRequestId
def blpapi_Message_elements(message):
return _internals.blpapi_Message_elements(message)
blpapi_Message_elements = _internals.blpapi_Message_elements
def blpapi_Message_fragmentType(message):
return _internals.blpapi_Message_fragmentType(message)
blpapi_Message_fragmentType = _internals.blpapi_Message_fragmentType
def blpapi_Message_recapType(message):
return _internals.blpapi_Message_recapType(message)
blpapi_Message_recapType = _internals.blpapi_Message_recapType
def blpapi_Message_addRef(message):
return _internals.blpapi_Message_addRef(message)
blpapi_Message_addRef = _internals.blpapi_Message_addRef
def blpapi_Message_release(message):
return _internals.blpapi_Message_release(message)
blpapi_Message_release = _internals.blpapi_Message_release
def blpapi_Message_timeReceived(message):
return _internals.blpapi_Message_timeReceived(message)
blpapi_Message_timeReceived = _internals.blpapi_Message_timeReceived
def blpapi_Event_eventType(event):
return _internals.blpapi_Event_eventType(event)
blpapi_Event_eventType = _internals.blpapi_Event_eventType
def blpapi_Event_release(event):
return _internals.blpapi_Event_release(event)
blpapi_Event_release = _internals.blpapi_Event_release
def blpapi_EventQueue_create():
return _internals.blpapi_EventQueue_create()
blpapi_EventQueue_create = _internals.blpapi_EventQueue_create
def blpapi_EventQueue_destroy(eventQueue):
return _internals.blpapi_EventQueue_destroy(eventQueue)
blpapi_EventQueue_destroy = _internals.blpapi_EventQueue_destroy
def blpapi_EventQueue_nextEvent(eventQueue, timeout):
return _internals.blpapi_EventQueue_nextEvent(eventQueue, timeout)
blpapi_EventQueue_nextEvent = _internals.blpapi_EventQueue_nextEvent
def blpapi_EventQueue_purge(eventQueue):
return _internals.blpapi_EventQueue_purge(eventQueue)
blpapi_EventQueue_purge = _internals.blpapi_EventQueue_purge
def blpapi_EventQueue_tryNextEvent(eventQueue):
return _internals.blpapi_EventQueue_tryNextEvent(eventQueue)
blpapi_EventQueue_tryNextEvent = _internals.blpapi_EventQueue_tryNextEvent
def blpapi_MessageIterator_create(event):
return _internals.blpapi_MessageIterator_create(event)
blpapi_MessageIterator_create = _internals.blpapi_MessageIterator_create
def | |
import Storage
import BplusTree
import tkinter
from tkinter import *
from tkinter import messagebox
from tkinter import filedialog
from tkinter.filedialog import askopenfile
import os
def show_data_bases():
data_bases = Storage.showDatabases()
database_tree = Storage.serializable.Read('./Data/', 'Databases')
database_tree.graph("Databases")
tree_window = Toplevel(main_window)
main_window.iconify()
tree_window.geometry('950x580+200+75')
tree_window.title('Bases de datos')
main_tree = Frame(tree_window)
main_tree.pack(fill=BOTH, expand=1)
main_tree.configure(background='black')
canvas_tree = Canvas(tree_window, width=750, height=530)
canvas_tree.place(x=170, y=15)
scroll = Scrollbar(main_tree, orient=VERTICAL, command=canvas_tree.yview)
scroll.pack(side=RIGHT, fill=Y)
canvas_tree.configure(yscrollcommand=scroll.set)
canvas_tree.bind('<Configure>', lambda e: canvas_tree.configure(scrollregion=canvas_buttons.bbox('all')))
scroll_tree = Scrollbar(main_tree, orient=HORIZONTAL, command=canvas_tree.xview)
scroll_tree.pack(side=BOTTOM, fill=X)
canvas_tree.configure(xscrollcommand=scroll_tree.set)
canvas_tree.bind('<Configure>', lambda e: canvas_tree.configure(scrollregion=canvas_tree.bbox('all')))
frame_tree = Frame(canvas_tree)
canvas_tree.create_window((0, 0), width=10000, height=5000, window=frame_tree, anchor='nw')
canvas_tree.image = PhotoImage(file='./Data/DataBases.png')
#Label(frame_tree, width=2000, height=200).place(x=0, y=0)
Label(frame_tree, image=canvas_tree.image).place(x=50, y=50)
canvas_buttons = Canvas(tree_window, background='red',width=130, height=530)
canvas_buttons.place(x=25, y=15)
scroll_buttons = Scrollbar(main_tree, orient=VERTICAL, command=canvas_buttons.yview)
scroll_buttons.pack(side=LEFT, fill=Y)
canvas_buttons.configure(yscrollcommand=scroll_buttons.set)
canvas_buttons.bind('<Configure>', lambda e: canvas_buttons.configure(scrollregion=canvas_buttons.bbox('all')))
buttons_frame = Frame(canvas_buttons)
canvas_buttons.create_window((15,0), width=130, height=5000, window=buttons_frame, anchor='nw')
Button(buttons_frame, text='Regresar', font='Helvetica 8 bold italic', command=lambda : close_table_window(tree_window,main_window), bg='red', padx=15, pady=3).place(x=0, y=0)
yview = 30
for x in data_bases:
Button(buttons_frame, text=x, font='Helvetica 8 bold italic', fg='white', bg='black',command= lambda database = x : show_tables(tree_window, database) ,padx=15, pady=5).place(x=0, y=yview)
yview += 35
def show_tables(parent_window, database):
tables = Storage.showTables(database)
db = Storage.serializable.Read(f"./Data/{database}/", database)
db.graph(database)
parent_window.iconify()
table_window = Toplevel(parent_window)
parent_window.iconify()
table_window.geometry('950x580+200+75')
table_window.title(database)
main_tree = Frame(table_window)
main_tree.pack(fill=BOTH, expand=1)
main_tree.configure(background='black')
canvas_tree = Canvas(table_window, width=750, height=530)
canvas_tree.place(x=170, y=15)
scroll = Scrollbar(main_tree, orient=VERTICAL, command=canvas_tree.yview)
scroll.pack(side=RIGHT, fill=Y)
canvas_tree.configure(yscrollcommand=scroll.set)
canvas_tree.bind('<Configure>', lambda e: canvas_tree.configure(scrollregion=canvas_buttons.bbox('all')))
scroll_tree = Scrollbar(main_tree, orient=HORIZONTAL, command=canvas_tree.xview)
scroll_tree.pack(side=BOTTOM, fill=X)
canvas_tree.configure(xscrollcommand=scroll_tree.set)
canvas_tree.bind('<Configure>', lambda e: canvas_tree.configure(scrollregion=canvas_tree.bbox('all')))
frame_tree = Frame(canvas_tree)
canvas_tree.create_window((0, 0), width=10000, height=5000, window=frame_tree, anchor='nw')
canvas_tree.image = PhotoImage(file=f'./Data/{database}/{database}.png')
Label(frame_tree, image=canvas_tree.image).place(x=150, y=50)
canvas_buttons = Canvas(table_window, width=120, height=530)
canvas_buttons.place(x=25, y=15)
scroll_buttons = Scrollbar(main_tree, orient=VERTICAL, command=canvas_buttons.yview)
scroll_buttons.pack(side=LEFT, fill=Y)
canvas_buttons.configure(yscrollcommand=scroll_buttons.set)
canvas_buttons.bind('<Configure>', lambda e: canvas_buttons.configure(scrollregion=canvas_buttons.bbox('all')))
buttons_frame = Frame(canvas_buttons)
canvas_buttons.create_window((15, 0), width=120, height=6000, window=buttons_frame, anchor='nw')
Button(buttons_frame, text='Regresar', font='Helvetica 8 bold italic', command=lambda: close_table_window(table_window, parent_window), bg='red', padx=15, pady=3).place(x=0, y=0)
yview = 30
for x in tables:
Button(buttons_frame, text=x, font='Helvetica 8 bold italic', fg='white', bg='black', command=lambda table=x: extract_table(database, table, table_window),padx=15, pady=5).place(x=0, y=yview)
yview += 35
def extract_table(database, table, parent_window):
rows = Storage.extractTable(database, table)
keys = get_keys(database, table)
table_window = Toplevel(parent_window)
parent_window.iconify()
table_window.geometry('950x580+200+75')
table_window.title(database+' :' + table)
main_tree = Frame(table_window)
main_tree.pack(fill=BOTH, expand=1)
main_tree.configure(background='black')
canvas_tree = Canvas(table_window, width=750, height=530)
canvas_tree.place(x=170, y=15)
scroll = Scrollbar(main_tree, orient=VERTICAL, command=canvas_tree.yview)
scroll.pack(side=RIGHT, fill=Y)
canvas_tree.configure(yscrollcommand=scroll.set)
canvas_tree.bind('<Configure>', lambda e: canvas_tree.configure(scrollregion=canvas_buttons.bbox('all')))
scroll_tree = Scrollbar(main_tree, orient=HORIZONTAL, command=canvas_tree.xview)
scroll_tree.pack(side=BOTTOM, fill=X)
canvas_tree.configure(xscrollcommand=scroll_tree.set)
canvas_tree.bind('<Configure>', lambda e: canvas_tree.configure(scrollregion=canvas_tree.bbox('all')))
frame_tree = Frame(canvas_tree)
canvas_tree.create_window((0, 0), width=10000, height=5000, window=frame_tree, anchor='nw')
canvas_tree.image = PhotoImage(file=f'./Data/{database}/{table}/{table}.png')
#Label(frame_tree, width=2000, height=200).place(x=0, y=0)
Label(frame_tree, image=canvas_tree.image).place(x=50, y=50)
canvas_buttons = Canvas(table_window, width=120, height=530)
canvas_buttons.place(x=25, y=15)
scroll_buttons = Scrollbar(main_tree, orient=VERTICAL, command=canvas_buttons.yview)
scroll_buttons.pack(side=LEFT, fill=Y)
canvas_buttons.configure(yscrollcommand=scroll_buttons.set)
canvas_buttons.bind('<Configure>', lambda e: canvas_buttons.configure(scrollregion=canvas_buttons.bbox('all')))
buttons_frame = Frame(canvas_buttons)
canvas_buttons.create_window((15, 0), width=120, height=5000, window=buttons_frame, anchor='nw')
Button(buttons_frame, text='Regresar', font='Helvetica 8 bold italic',command=lambda: close_table_window(table_window, parent_window), bg='red', padx=15, pady=3).place(x=0, y=0)
yview = 30
print(rows)
for x in range(0,len(list(keys))):
Button(buttons_frame, text=keys[x], font='Helvetica 8 bold italic', fg='white', bg='black',command=lambda info=rows[x], key=keys[x]: table_graph(info, key, table, database), padx=15, pady=5).place(x=0, y=yview)
yview += 35
def get_keys(database, table):
Storage.checkData()
# Get the databases tree
dataBaseTree = Storage.serializable.Read('./Data/', "Databases")
# Get the dbNode
databaseNode = dataBaseTree.search(dataBaseTree.getRoot(), database)
# If DB exist
if databaseNode:
tablesTree = Storage.serializable.Read(f"./Data/{database}/", database)
if tablesTree.search(tablesTree.getRoot(), table):
table_aux = Storage.serializable.Read(f'./Data/{database}/{table}/', table)
table_aux.graficar(database, table)
return list(table_aux.lista())
else:
return None
else:
return None
def table_graph(tupla, key, table, database):
f = open(f'Data/{database}/{table}/tupla.dot', 'w', encoding='utf-8')
f.write("digraph dibujo{\n")
f.write('graph [ordering="out"];')
f.write('rankdir=TB;\n')
f.write('node [shape = box];\n')
data = ""
for x in tupla:
data += """<td>""" + str(x) + """</td>"""
tabla = """<<table cellspacing='0' cellpadding='20' border='0' cellborder='1'>
<tr>""" + data + """</tr>
</table> >"""
f.write('table [label = ' + tabla + ', fontsize="30", shape = plaintext ];\n')
f.write('}')
f.close()
Storage.os.system(f'dot -Tpng Data/{database}/{table}/tupla.dot -o ./Data/{database}/{table}/tupla.png')
info_window = Toplevel()
info_window.title('Llave: ' + key)
info_window.geometry('700x200+300+100')
tupla_frame = Frame(info_window)
tupla_frame.pack(fill=BOTH, expand=1)
tupla_canvas = Canvas(info_window, width=700, height=300)
tupla_canvas.place(x=0,y=0)
scroll = Scrollbar(info_window, orient=HORIZONTAL, command=tupla_canvas.xview)
scroll.pack(side=BOTTOM, fill=X)
tupla_canvas.configure(xscrollcommand=scroll.set)
tupla_canvas.bind('<Configure>', lambda e: tupla_canvas.configure(scrollregion=tupla_canvas.bbox('all')))
photo_frame = Frame(tupla_canvas)
tupla_canvas.create_window((0, 0), width=3000, height=300, window=photo_frame, anchor='nw')
tupla_canvas.image = PhotoImage(file=f'./Data/{database}/{table}/tupla.png')
Label(photo_frame,image=tupla_canvas.image).place(x=0,y=0)
def close_table_window(window, parent):
window.destroy()
parent.deiconify()
#--------------Functions----------------------------
def show_functions():
main_window.iconify()
function_window = Toplevel(main_window)
function_window.title('Funciones de las bases de datos')
function_window.geometry('675x620+300+50')
main_frame = Frame(function_window)
main_frame.pack(fill=BOTH, expand=1)
main_canvas = Canvas(function_window, width=655, height=600)
main_canvas.place(x=0, y=0)
scroll = Scrollbar(main_frame, orient=VERTICAL, command=main_canvas.yview)
scroll.pack(side=RIGHT, fill=Y)
main_canvas.configure(yscrollcommand=scroll.set)
main_canvas.bind('<Configure>', lambda e: main_canvas.configure(scrollregion=main_canvas.bbox('all')))
funtion_frame = Frame(main_canvas)
Button(function_window, text='Regresar', padx=20, pady=5, font='Helvetica 8 bold italic', fg='black',bg='red', command=lambda: close_table_window(function_window, main_window)).place(x=10, y=5)
funtion_frame.configure(bg='black')
main_canvas.create_window((10, 10), width=655, height=2000, window=funtion_frame, anchor='nw')
database_canvas = Canvas(funtion_frame, width=630, height=300)
database_canvas.configure(bg='white')
database_canvas.place(x=10, y=40)
tkinter.Label(database_canvas, text='Database Functions',font='Helvetica 16 bold italic').place(x=225, y=10)
tkinter.Label(database_canvas,text='Create Database',font='Helvetica 10 bold italic', width=20).place(x=110, y=70)
database_name = Entry(database_canvas, width=20)
database_name.place(x=250,y=70)
tkinter.Button(database_canvas, text='Create',font='Helvetica 10 bold italic', width=10, command= lambda : create_database(database_name.get(),database_name)).place(x=320, y=65)
tkinter.Label(database_canvas, text='Alter Database', font='Helvetica 10 bold italic', width=20).place(x=100, y=120)
old_data_base = Entry(database_canvas, width=20)
old_data_base.place(x=250, y=120)
new_data_base = Entry(database_canvas, width=20)
new_data_base.place(x=320, y=120)
tkinter.Button(database_canvas, text='Alter', font='Helvetica 10 bold italic', width=10, command=lambda: alter_database(old_data_base.get(), new_data_base.get(),old_data_base,new_data_base)).place(x=390, y=115)
tkinter.Label(database_canvas, text='Show Databases', font='Helvetica 10 bold italic', width=20).place(x=100,y=170)
tkinter.Button(database_canvas, text='Show', font='Helvetica 10 bold italic', width=10, command=function_show_databases).place(x=250, y=165)
tkinter.Label(database_canvas, text='Drop Database', font='Helvetica 10 bold italic', width=20).place(x=100,y=220)
drop_data_base = Entry(database_canvas, width=20)
drop_data_base.place(x=250, y=220)
tkinter.Button(database_canvas, text='Drop', font='Helvetica 10 bold italic', width=10, command=lambda: drop_database(drop_data_base.get(), drop_data_base)).place(x=390, y=215)
tables_canvas = Canvas(funtion_frame, width=630, height=700)
tables_canvas.configure(bg='white')
tables_canvas.place(x=10, y=360)
tkinter.Label(tables_canvas, text='Table Functions', font='Helvetica 16 bold italic').place(x=225 ,y=10)
tkinter.Label(tables_canvas, text='Create Table', font='Helvetica 10 bold italic', width=20).place(x=10, y=70)
data_base = Entry(tables_canvas, width=12)
data_base.place(x=150, y=70)
table_name = Entry(tables_canvas, width=12)
table_name.place(x=225, y=70)
columns = Entry(tables_canvas, width=12)
columns.place(x=300, y=70)
tkinter.Button(tables_canvas, text='Create', font='Helvetica 10 bold italic', width=10,command=lambda: create_data_table(data_base.get(), table_name.get(), columns.get(), data_base, table_name, columns)).place(x=360, y=65)
tkinter.Label(tables_canvas, text='Show tables', font='Helvetica 10 bold italic', width=20).place(x=10,y=120)
show_table = Entry(tables_canvas, width=20)
show_table.place(x=150, y=120)
tkinter.Button(tables_canvas, text='Show', font='Helvetica 10 bold italic', width=10,command=lambda: function_show_tables(show_table.get(), show_table)).place(x=250, y=115)
tkinter.Label(tables_canvas, text='Extract Table', font='Helvetica 10 bold italic', width=20).place(x=10, y=170)
data_base_name = Entry(tables_canvas, width=20)
data_base_name.place(x=150, y=170)
table_name_extract = Entry(tables_canvas, width=20)
table_name_extract.place(x=225, y=170)
tkinter.Button(tables_canvas, text='Extract', font='Helvetica 10 bold italic', width=10, command=lambda: extract_table_function(data_base_name.get(), table_name_extract.get(),data_base_name,table_name_extract)).place(x=300, y=165)
tkinter.Label(tables_canvas, text='Extract Range Table', font='Helvetica 10 bold italic', width=20).place(x=10, y=220)
database_range_name = Entry(tables_canvas, width=15)
database_range_name.place(x=170, y=220)
table_range_name = Entry(tables_canvas, width=15)
table_range_name.place(x=245, y=220)
column_range = Entry(tables_canvas, width=15)
column_range.place(x=320, y=220)
lower_range = Entry(tables_canvas, width=15)
lower_range.place(x=395, y=220)
upper_range = Entry(tables_canvas, width=15)
upper_range.place(x=470, y=220)
tkinter.Button(tables_canvas, text='Extract', font='Helvetica 10 bold italic', width=10, command=lambda: extract_range_table(database_range_name.get(), table_range_name.get(),
column_range.get(), lower_range.get(), upper_range.get(), database_range_name, table_range_name, column_range, lower_range, upper_range)).place(x=535, y=215)
tkinter.Label(tables_canvas, text='Alter AddPK', font='Helvetica 10 bold italic', width=20).place(x=10, y=270)
data_base_pk = Entry(tables_canvas, width=20)
data_base_pk.place(x=150, y=270)
table_name_pk = Entry(tables_canvas, width=20)
table_name_pk.place(x=225, y=270)
column_pk = Entry(tables_canvas, width=20)
column_pk.place(x=300, y=270)
tkinter.Button(tables_canvas, text='Alter', font='Helvetica 10 bold italic', width=10, command=lambda:
alter_addPK(data_base_pk.get(), table_name_pk.get(), column_pk.get(), data_base_pk, table_name_pk, column_pk)).place(x=375, y=265)
tkinter.Label(tables_canvas, text='Alter DropPK', font='Helvetica 10 bold italic', width=20).place(x=10, y=320)
database_drop_pk = Entry(tables_canvas, width=20)
database_drop_pk.place(x=150, y=320)
table_drop_pk = Entry(tables_canvas, width=20)
table_drop_pk.place(x=225, y=320)
tkinter.Button(tables_canvas, text='Alter', font='Helvetica 10 bold italic', width=10,command=lambda:
alter_dropPK(database_drop_pk.get(), table_drop_pk.get(), database_drop_pk, table_drop_pk)).place(x=300, y=315)
tkinter.Label(tables_canvas, text='Alter AddFK', font='Helvetica 10 bold italic', width=20).place(x=10, y=370)
database_fk = Entry(tables_canvas, width=20)
database_fk.place(x=150, y=370)
table_fk = Entry(tables_canvas, width=20)
table_fk.place(x=225, y=370)
references = Entry(tables_canvas, width=20)
references.place(x=300, y=370)
tkinter.Button(tables_canvas, text='Alter', font='Helvetica 10 bold italic', width=10).place(x=390, y=365)
tkinter.Label(tables_canvas, text='Alter AddIndex', font='Helvetica 10 bold italic', width=20).place(x=10, y=420)
database_index = Entry(tables_canvas, width=20)
database_index.place(x=150, y=420)
table_index = Entry(tables_canvas, width=20)
table_index.place(x=225, y=420)
references_index = Entry(tables_canvas, width=20)
references_index.place(x=300, y=420)
tkinter.Button(tables_canvas, text='Alter', font='Helvetica 10 bold italic', width=10).place(x=390, y=415)
tkinter.Label(tables_canvas, text='Alter Table', font='Helvetica 10 bold italic', width=20).place(x=10, y=470)
database_alter = Entry(tables_canvas, width=20)
database_alter.place(x=150, y=470)
table_old = Entry(tables_canvas, width=20)
table_old.place(x=225, y=470)
table_new = Entry(tables_canvas, width=20)
table_new.place(x=300, y=470)
tkinter.Button(tables_canvas, text='Alter', font='Helvetica 10 bold italic', width=10, command=lambda:
alter_table(database_alter.get(), table_old.get(), table_new.get(), database_alter, table_old, table_new)).place(x=375, y=465)
tkinter.Label(tables_canvas, text='Alter AddColumn', font='Helvetica 10 bold italic', width=20).place(x=10, y=520)
database_add_column = Entry(tables_canvas, width=20)
database_add_column.place(x=150, y=520)
table_add_column = Entry(tables_canvas, width=20)
table_add_column.place(x=225, y=520)
default = Entry(tables_canvas, width=20)
default.place(x=300, y=520)
tkinter.Button(tables_canvas, text='Alter', font='Helvetica 10 bold italic', width=10, command=lambda:
alter_addColumn(database_add_column.get(), table_add_column.get(), default.get(),database_add_column, table_add_column, default)).place(x=375, y=515)
tkinter.Label(tables_canvas, text='Alter DropColumn', font='Helvetica 10 bold italic', width=20).place(x=10, y=570)
database_drop_column = Entry(tables_canvas, width=20)
database_drop_column.place(x=150, y=570)
table_drop_column = Entry(tables_canvas, width=20)
table_drop_column.place(x=225, y=570)
column_number = Entry(tables_canvas, width=20)
column_number.place(x=300, y=570)
tkinter.Button(tables_canvas, text='Alter', font='Helvetica 10 bold italic', width=10,command=lambda:
alter_dropColumn(database_drop_column.get(), table_drop_column.get(), column_number.get(), database_drop_column, table_drop_column, column_number)).place(x=375, y=565)
# -------------------------------
tkinter.Label(tables_canvas, text='Drop Table', font='Helvetica 10 bold italic', width=20).place(x=10,y=620)
database_drop_table = Entry(tables_canvas, width=20)
database_drop_table.place(x=150, y=620)
table_drop = Entry(tables_canvas, width=20)
table_drop.place(x=225, y=620)
tkinter.Button(tables_canvas, text='Alter', font='Helvetica 10 bold italic', width=10,command=lambda:
drop_table(database_drop_table.get(), table_drop.get(), database_drop_table,table_drop)).place(x=300, y=615)
# -------------------------------
row_canvas = Canvas(funtion_frame, width=630, height=700)
row_canvas.configure(bg='white')
row_canvas.place(x=10, y=1080)
tkinter.Label(row_canvas, text='Row Functions', font='Helvetica 16 bold italic').place(x=225, y=10)
tkinter.Label(row_canvas, text='Insert', font='Helvetica 10 bold italic', width=20).place(x=10, y=70)
database_insert = Entry(row_canvas, width=12)
database_insert.place(x=150, y=70)
table_insert = Entry(row_canvas, width=12)
| |
<filename>Parsing/ParseBCSL.py<gh_stars>1-10
import collections
import json
from numpy import inf
import numpy as np
from copy import deepcopy
from lark import Lark, Transformer, Tree, Token
from lark import UnexpectedCharacters, UnexpectedToken
from lark.load_grammar import _TERMINAL_NAMES
import regex
from sortedcontainers import SortedList
from Core.Atomic import AtomicAgent
from Core.Complex import Complex
import Core.Model
from Core.Rate import Rate
from Core.Rule import Rule
from Core.Structure import StructureAgent
from Regulations.ConcurrentFree import ConcurrentFree
from Regulations.Conditional import Conditional
from Regulations.Ordered import Ordered
from Regulations.Programmed import Programmed
from Regulations.Regular import Regular
from TS.State import State, Memory, Vector
from TS.TransitionSystem import TransitionSystem
from TS.Edge import edge_from_dict
from Core.Side import Side
def load_TS_from_json(json_file: str) -> TransitionSystem:
"""
Loads given JSON and interprets it as a TransitionSystem.
:param json_file: given TS in JSON
:return: resulting TransitionSystem
"""
complex_parser = Parser("rate_complex")
with open(json_file) as json_file:
data = json.load(json_file)
ordering = SortedList(map(lambda agent: complex_parser.parse(agent).data.children[0], data['ordering']))
ts = TransitionSystem(ordering, data['bound'])
ts.states_encoding = dict()
for node_id in data['nodes']:
vector = np.array(eval(data['nodes'][node_id]))
is_hell = True if vector[0] == np.math.inf else False
ts.states_encoding[int(node_id)] = State(Vector(vector), Memory(0), is_hell)
ts.edges = {edge_from_dict(edge) for edge in data['edges']}
ts.init = data['initial']
if 'parameters' in data:
ts.params = data['parameters']
ts.unprocessed = {State(Vector(np.array(eval(state))), Memory(0)) for state in data.get('unprocessed', list())}
ts.states = set(ts.states_encoding.values()) - ts.unprocessed
return ts
class Result:
"""
Class to represent output from the Parser.
"""
def __init__(self, success, data):
self.success = success
self.data = data
class SideHelper:
"""
Class to represent side of a rule.
"""
def __init__(self):
self.seq = []
self.comp = []
self.complexes = []
self.counter = 0
def __str__(self):
return " | ".join([str(self.seq), str(self.comp), str(self.complexes), str(self.counter)])
def __repr__(self):
return str(self)
def to_side(self):
return Side([Complex(self.seq[c[0]:c[1]+1], self.comp[c[0]]) for c in self.complexes])
GRAMMAR = r"""
model: rules inits (definitions)? (complexes)? (regulation)?
rules: RULES_START (rule|COMMENT)+
inits: INITS_START (init|COMMENT)+
definitions: DEFNS_START (definition|COMMENT)+
complexes: COMPLEXES_START (cmplx_dfn|COMMENT)+
regulation: REGULATION_START regulation_def
init: const? rate_complex (COMMENT)?
definition: def_param "=" number (COMMENT)?
rule: (label)? side ARROW side ("@" rate)? (";" variable)? (COMMENT)?
cmplx_dfn: cmplx_name "=" value (COMMENT)?
side: (const? complex "+")* (const? complex)?
complex: (abstract_sequence|value|cmplx_name) DOUBLE_COLON compartment
!rate : fun "/" fun | fun
!fun: const | param | rate_agent | fun "+" fun | fun "-" fun | fun "*" fun | fun POW const | "(" fun ")"
!rate_agent: "[" rate_complex "]"
COMMENT: "//" /[^\n]/*
COM: "//"
POW: "**"
ARROW: "=>"
RULES_START: "#! rules"
INITS_START: "#! inits"
DEFNS_START: "#! definitions"
COMPLEXES_START: "#! complexes"
REGULATION_START: "#! regulation"
!label: CNAME "~"
param: CNAME
def_param : CNAME
number: NUMBER
const: (INT|DECIMAL)
%import common.WORD
%import common.NUMBER
%import common.INT
%import common.DECIMAL
%import common.WS
%ignore WS
%ignore COMMENT
"""
EXTENDED_GRAMMAR = """
abstract_sequence: atomic_complex | atomic_structure_complex | structure_complex
atomic_complex: atomic ":" (cmplx_name|VAR)
atomic_structure_complex: atomic ":" structure ":" (cmplx_name|VAR)
structure_complex: structure ":" (cmplx_name|VAR)
variable: VAR "=" "{" cmplx_name ("," cmplx_name)+ "}"
VAR: "?"
"""
COMPLEX_GRAMMAR = """
rate_complex: (value|cmplx_name) DOUBLE_COLON compartment
value: (agent ".")* agent
agent: atomic | structure
structure: s_name "(" composition ")"
composition: (atomic ",")* atomic?
atomic : a_name "{" state "}"
a_name: CNAME
s_name: CNAME
compartment: CNAME
cmplx_name: CNAME
!state: (DIGIT|LETTER|"+"|"-"|"*"|"_")+
DOUBLE_COLON: "::"
%import common.CNAME
%import common.LETTER
%import common.DIGIT
"""
REGULATIONS_GRAMMAR = """
regulation_def: "type" ( regular | programmed | ordered | concurrent_free | conditional )
!regular: "regular" (DIGIT|LETTER| "+" | "*" | "(" | ")" | "[" | "]" | "_" | "|" | "&")+
programmed: "programmed" successors+
successors: CNAME ":" "{" CNAME ("," CNAME)* "}"
ordered: "ordered" order ("," order)*
order: ("(" CNAME "," CNAME ")")
concurrent_free: "concurrent-free" order ("," order)*
conditional: "conditional" context+
context: CNAME ":" "{" rate_complex ("," rate_complex)* "}"
"""
class TransformRegulations(Transformer):
def regulation(self, matches):
return {'regulation': matches[1]}
def regulation_def(self, matches):
return matches[0]
def regular(self, matches):
re = "".join(matches[1:])
# might raise exception
regex.compile(re)
return Regular(re)
def programmed(self, matches):
successors = {k: v for x in matches for k, v in x.items()}
return Programmed(successors)
def successors(self, matches):
return {str(matches[0]): {str(item) for item in matches[1:]}}
def ordered(self, matches):
return Ordered(set(matches))
def order(self, matches):
return str(matches[0]), str(matches[1])
def conditional(self, matches):
context_fun = {k: v for x in matches for k, v in x.items()}
return Conditional(context_fun)
def context(self, matches):
return {str(matches[0]): {item.children[0] for item in matches[1:]}}
def concurrent_free(self, matches):
return ConcurrentFree(set(matches))
class ReplaceVariables(Transformer):
"""
This class is used to replace variables in rule (marked by ?) by
the given cmplx_name (so far limited only to that).
"""
def __init__(self, to_replace):
super(ReplaceVariables, self).__init__()
self.to_replace = to_replace
def VAR(self, matches):
return deepcopy(self.to_replace)
class ExtractComplexNames(Transformer):
"""
Extracts definitions of cmplx_name from #! complexes part.
Also multiplies rule with variable to its instances using ReplaceVariables Transformer.
"""
def __init__(self):
super(ExtractComplexNames, self).__init__()
self.complex_defns = dict()
def cmplx_dfn(self, matches):
self.complex_defns[str(matches[0].children[0])] = matches[1]
def rules(self, matches):
new_rules = [matches[0]]
for rule in matches[1:]:
if rule.children[-1].data == 'variable':
variables = rule.children[-1].children[1:]
for variable in variables:
replacer = ReplaceVariables(variable)
new_rule = Tree('rule', deepcopy(rule.children[:-1]))
new_rules.append(replacer.transform(new_rule))
else:
new_rules.append(rule)
return Tree('rules', new_rules)
class TransformAbstractSyntax(Transformer):
"""
Transformer to remove "zooming" syntax.
Divided to three special cases (declared below).
Based on replacing subtrees in parent trees.
"""
def __init__(self, complex_defns):
super(TransformAbstractSyntax, self).__init__()
self.complex_defns = complex_defns
def cmplx_name(self, matches):
return deepcopy(self.complex_defns[str(matches[0])])
def abstract_sequence(self, matches):
return matches[0]
def atomic_structure_complex(self, matches):
"""
atomic:structure:complex
"""
structure = self.insert_atomic_to_struct(matches[0], matches[1])
sequence = self.insert_struct_to_complex(structure, matches[2])
return sequence
def atomic_complex(self, matches):
"""
atomic:complex
"""
sequence = self.insert_atomic_to_complex(matches[0], matches[1])
return sequence
def structure_complex(self, matches):
"""
structure:complex
"""
sequence = self.insert_struct_to_complex(matches[0], matches[1])
return sequence
def insert_atomic_to_struct(self, atomic, struct):
"""
Adds or replaces atomic subtree in struct tree.
"""
if len(struct.children) == 2:
struct.children[1].children.append(atomic)
else:
struct.children.append(Tree('composition', [atomic]))
return struct
def insert_struct_to_complex(self, struct, complex):
"""
Adds or replaces struct subtree in complex tree.
"""
for i in range(len(complex.children)):
if self.get_name(struct) == self.get_name(complex.children[i].children[0]):
complex.children[i] = Tree('agent', [struct])
break
return complex
def insert_atomic_to_complex(self, atomic, complex):
"""
Adds or replaces atomic subtree in complex tree.
"""
for i in range(len(complex.children)):
if self.get_name(atomic) == self.get_name(complex.children[i].children[0]):
complex.children[i] = Tree('agent', [atomic])
break
return complex
def get_name(self, agent):
return str(agent.children[0].children[0])
class TreeToComplex(Transformer):
"""
Creates actual Complexes in rates of the rules - there it is safe,
order is not important. Does not apply to the rest of the rule!
"""
def state(self, matches):
return "".join(map(str, matches))
def atomic(self, matches):
name, state = str(matches[0].children[0]), matches[1]
return AtomicAgent(name, state)
def structure(self, matches):
name = str(matches[0].children[0])
if len(matches) > 1:
composition = set(matches[1].children)
return StructureAgent(name, composition)
else:
return StructureAgent(name, set())
def rate_complex(self, matches):
sequence = []
for item in matches[0].children:
sequence.append(item.children[0])
compartment = matches[2]
return Tree("agent", [Complex(sequence, compartment)])
def compartment(self, matches):
return str(matches[0])
class TreeToObjects(Transformer):
def __init__(self):
super(TreeToObjects, self).__init__()
self.params = set()
"""
A transformer which is called on a tree in a bottom-up manner and transforms all subtrees/tokens it encounters.
Note the defined methods have the same name as elements in the grammar above.
Creates the actual Model object after all the above transformers were applied.
"""
def const(self, matches):
return float(matches[0])
def def_param(self, matches):
return str(matches[0])
def label(self, matches):
return str(matches[0])
def number(self, matches):
return float(matches[0])
def side(self, matches):
helper = SideHelper()
stochio = 1
for item in matches:
if type(item) in [int, float]:
stochio = int(item)
else:
agents = item.children[0]
compartment = item.children[2]
for i in range(stochio):
start = helper.counter
for agent in agents.children:
helper.seq.append(deepcopy(agent.children[0]))
helper.comp.append(compartment)
helper.counter += 1
helper.complexes.append((start, helper.counter - 1))
stochio = 1
return helper
def rule(self, matches):
label = None
rate = None
if len(matches) == 5:
label, lhs, arrow, rhs, rate = matches
elif len(matches) == 4:
if type(matches[0]) == str:
label, lhs, arrow, rhs = matches
else:
lhs, arrow, rhs, rate = matches
else:
lhs, arrow, rhs = matches
agents = tuple(lhs.seq + rhs.seq)
mid = lhs.counter
compartments = lhs.comp + rhs.comp
complexes = lhs.complexes + list(
map(lambda item: (item[0] + lhs.counter, item[1] + lhs.counter), rhs.complexes))
pairs = [(i, i + lhs.counter) for i in range(min(lhs.counter, rhs.counter))]
if lhs.counter > rhs.counter:
pairs += [(i, None) for i in range(rhs.counter, lhs.counter)]
elif lhs.counter < rhs.counter:
for i in range(lhs.counter, rhs.counter):
replication = False
if lhs.counter == 1 and rhs.counter > 1:
if lhs.seq[pairs[-1][0]] == rhs.seq[pairs[-1][1] - lhs.counter]:
if rhs.seq[pairs[-1][1] - lhs.counter] == rhs.seq[i]:
pairs += [(pairs[-1][0], i + lhs.counter)]
replication = True
if not replication:
pairs += [(None, i + lhs.counter)]
return | |
curr_value / network_weight[layer][scale][1]
curr_result.pop()
#print('end0-0')
curr_value = curr_value * network_weight[layer][scale][2]
curr_result.append([scale, 1])
_parse(network_weight, layer+1, curr_value, curr_result, last=1)
curr_value = curr_value / network_weight[layer][scale][2]
curr_result.pop()
#print('end0-1')
scale = 1
if last == scale:
curr_value = curr_value * network_weight[layer][scale][0]
curr_result.append([scale, 0])
_parse(network_weight, layer+1, curr_value, curr_result, last=0)
curr_value = curr_value / network_weight[layer][scale][0]
curr_result.pop()
#print('end1-0')
curr_value = curr_value * network_weight[layer][scale][1]
curr_result.append([scale, 1])
_parse(network_weight, layer+1, curr_value, curr_result, last=1)
curr_value = curr_value / network_weight[layer][scale][1]
curr_result.pop()
#print('end1-1')
curr_value = curr_value * network_weight[layer][scale][2]
curr_result.append([scale, 2])
_parse(network_weight, layer+1, curr_value, curr_result, last=2)
curr_value = curr_value / network_weight[layer][scale][2]
curr_result.pop()
#print('end1-2')
elif layer == 2:
#print('begin layer 2')
scale = 0
if last == scale:
curr_value = curr_value * network_weight[layer][scale][1]
curr_result.append([scale, 0])
_parse(network_weight, layer+1, curr_value, curr_result, last=0)
curr_value = curr_value / network_weight[layer][scale][1]
curr_result.pop()
#print('end0-0')
curr_value = curr_value * network_weight[layer][scale][2]
curr_result.append([scale, 1])
_parse(network_weight, layer+1, curr_value, curr_result, last=1)
curr_value = curr_value / network_weight[layer][scale][2]
curr_result.pop()
#print('end0-1')
scale = 1
if last == scale:
curr_value = curr_value * network_weight[layer][scale][0]
curr_result.append([scale, 0])
_parse(network_weight, layer+1, curr_value, curr_result, last=0)
curr_value = curr_value / network_weight[layer][scale][0]
curr_result.pop()
#print('end1-0')
curr_value = curr_value * network_weight[layer][scale][1]
curr_result.append([scale, 1])
_parse(network_weight, layer+1, curr_value, curr_result, last=1)
curr_value = curr_value / network_weight[layer][scale][1]
curr_result.pop()
#print('end1-1')
curr_value = curr_value * network_weight[layer][scale][2]
curr_result.append([scale, 2])
_parse(network_weight, layer+1, curr_value, curr_result, last=2)
curr_value = curr_value / network_weight[layer][scale][2]
curr_result.pop()
#print('end1-2')
scale = 2
if last == scale:
curr_value = curr_value * network_weight[layer][scale][0]
curr_result.append([scale, 1])
_parse(network_weight, layer+1, curr_value, curr_result, last=1)
curr_value = curr_value / network_weight[layer][scale][0]
curr_result.pop()
#print('end2-1')
curr_value = curr_value * network_weight[layer][scale][1]
curr_result.append([scale, 2])
_parse(network_weight, layer+1, curr_value, curr_result, last=2)
curr_value = curr_value / network_weight[layer][scale][1]
curr_result.pop()
#print('end2-2')
curr_value = curr_value * network_weight[layer][scale][2]
curr_result.append([scale, 3])
_parse(network_weight, layer+1, curr_value, curr_result, last=3)
curr_value = curr_value / network_weight[layer][scale][2]
curr_result.pop()
#print('end2-3')
else:
#print('begin layer {}'.format(layer))
scale = 0
if last == scale:
curr_value = curr_value * network_weight[layer][scale][1]
curr_result.append([scale, 0])
_parse(network_weight, layer+1, curr_value, curr_result, last=0)
curr_value = curr_value / network_weight[layer][scale][1]
curr_result.pop()
#print('end0-0')
curr_value = curr_value * network_weight[layer][scale][2]
curr_result.append([scale, 1])
_parse(network_weight, layer+1, curr_value, curr_result, last=1)
curr_value = curr_value / network_weight[layer][scale][2]
curr_result.pop()
#print('end0-1')
scale = 1
if last == scale:
curr_value = curr_value * network_weight[layer][scale][0]
curr_result.append([scale, 0])
_parse(network_weight, layer+1, curr_value, curr_result, last=0)
curr_value = curr_value / network_weight[layer][scale][0]
curr_result.pop()
#print('end1-0')
curr_value = curr_value * network_weight[layer][scale][1]
curr_result.append([scale, 1])
_parse(network_weight, layer+1, curr_value, curr_result, last=1)
curr_value = curr_value / network_weight[layer][scale][1]
curr_result.pop()
#print('end1-1')
curr_value = curr_value * network_weight[layer][scale][2]
curr_result.append([scale, 2])
_parse(network_weight, layer+1, curr_value, curr_result, last=2)
curr_value = curr_value / network_weight[layer][scale][2]
curr_result.pop()
#print('end1-2')
scale = 2
if last == scale:
curr_value = curr_value * network_weight[layer][scale][0]
curr_result.append([scale, 1])
_parse(network_weight, layer+1, curr_value, curr_result, last=1)
curr_value = curr_value / network_weight[layer][scale][0]
curr_result.pop()
#print('end2-1')
curr_value = curr_value * network_weight[layer][scale][1]
curr_result.append([scale, 2])
_parse(network_weight, layer+1, curr_value, curr_result, last=2)
curr_value = curr_value / network_weight[layer][scale][1]
curr_result.pop()
#print('end2-2')
curr_value = curr_value * network_weight[layer][scale][2]
curr_result.append([scale, 3])
_parse(network_weight, layer+1, curr_value, curr_result, last=3)
curr_value = curr_value / network_weight[layer][scale][2]
curr_result.pop()
#print('end2-3')
scale = 3
if last == scale:
curr_value = curr_value * network_weight[layer][scale][0]
curr_result.append([scale, 2])
_parse(network_weight, layer+1, curr_value, curr_result, last=2)
curr_value = curr_value / network_weight[layer][scale][0]
curr_result.pop()
#print('end3-2')
curr_value = curr_value * network_weight[layer][scale][1]
curr_result.append([scale, 3])
_parse(network_weight, layer+1, curr_value, curr_result, last=3)
curr_value = curr_value / network_weight[layer][scale][1]
curr_result.pop()
#print('end3-3')
network_weight = F.softmax(self.fabric_path_alpha, dim=-1) * 5
network_weight = network_weight.data.cpu().numpy()
_parse(network_weight, 0, 1, [], 0)
print('\tDecode Network max_prob:', max_prob)
return best_result
def viterbi_search(self):
network_space = torch.zeros((self.nb_layers, 4, 3))
for layer in range(self.nb_layers):
if layer == 0:
network_space[layer][0][1:] = F.softmax(self.fabric_path_alpha.data[layer][0][1:], dim=-1) * (
2 / 3)
elif layer == 1:
network_space[layer][0][1:] = F.softmax(self.fabric_path_alpha.data[layer][0][1:], dim=-1) * (
2 / 3)
network_space[layer][1] = F.softmax(self.fabric_path_alpha.data[layer][1], dim=-1)
elif layer == 2:
network_space[layer][0][1:] = F.softmax(self.fabric_path_alpha.data[layer][0][1:], dim=-1) * (
2 / 3)
network_space[layer][1] = F.softmax(self.fabric_path_alpha.data[layer][1], dim=-1)
network_space[layer][2] = F.softmax(self.fabric_path_alpha.data[layer][2], dim=-1)
else:
network_space[layer][0][1:] = F.softmax(self.fabric_path_alpha.data[layer][0][1:], dim=-1) * (
2 / 3)
network_space[layer][1] = F.softmax(self.fabric_path_alpha.data[layer][1], dim=-1)
network_space[layer][2] = F.softmax(self.fabric_path_alpha.data[layer][2], dim=-1)
network_space[layer][3][:2] = F.softmax(self.fabric_path_alpha.data[layer][3][:2], dim=-1) * (
2 / 3)
prob_space = np.zeros((network_space.shape[:2]))
path_space = np.zeros((network_space.shape[:2])).astype('int8')
# prob_space [layer, sample] means the layer-the choice go to sample-th scale
# network space 0 ↗, 1 →, 2 ↘ , rate means choice
# path_space 1 0 -1 1-rate means path
for layer in range(network_space.shape[0]):
if layer == 0:
prob_space[layer][0] = network_space[layer][0][1] # 0-layer go to next 0-scale prob
prob_space[layer][1] = network_space[layer][0][2] # 0-layer go to next 1-scale prob
path_space[layer][0] = 0
path_space[layer][1] = -1
else:
for sample in range(network_space.shape[1]):
if sample > layer + 1: # control valid sample in each layer
continue
local_prob = []
for rate in range(network_space.shape[2]):
if (sample == 0 and rate == 2) or (sample == 3 and rate == 0):
# if the next scale is 0, does not come from rate 2: reduction
# if the next scale is 3, does not come from rate 0: up
continue
else:
# sample is target scale, sample+(1-rate) is current scale
# prob_space[layer-1][sample+(1-rate)], the prob of last layer to current scale
# rate = 0, current to target up, then current is target + 1 (i.e.) 1-rate = 1
# rate = 1, current to target same, then current is the same as target 1-rate = 0
# rate = 2, current to target reduce, then current is target - 1 (i.e.) 1-rate = -1
local_prob.append(prob_space[layer - 1][sample + 1 - rate] *
network_space[layer][sample + 1 - rate][rate])
prob_space[layer][sample] = np.max(local_prob, axis=0)
rate = np.argmax(local_prob, axis=0)
path = 1 - rate if sample != 3 else -rate
path_space[layer][sample] = path
output_sample = np.argmax(prob_space[-1, :], axis=-1)
actual_path = np.zeros(12).astype('uint8')
actual_path[-1] = output_sample # have known tha last scale
for i in range(1, self.nb_layers): # get scale path according to path_space
actual_path[-i - 1] = actual_path[-i] + path_space[self.nb_layers - i, actual_path[-i]]
return actual_path, network_layer_to_space(actual_path)
def genotype_decode(self):
# use cell_arch_decode to replace genetype_decode
raise NotImplementedError
def cell_arch_decode(self):
genes = [] # [nb_cells, nb_edges, edge_index, best_op]
# TODO: confirm nb_choices
nb_choices = 7
def _parse(alphas, steps, has_none):
# TODO: just include None edge, probs of all operation are all zero, it will never be selected
gene = []
start = 0
n = 2 # offset
for i in range(steps):
end = start + n
# all the edge ignore Zero operation TODO: reconfirm Zero operation index
edges = sorted(range(start, end), key=lambda x: -np.max(alphas[x, 1:]))
top1edge = edges[0] # edge index
best_op_index = np.argmax(alphas[top1edge]) #
gene.append([top1edge, best_op_index])
start = end
n += 1 # move offset
# len(gene) related to steps, each step chose one path
# shape as [nb_steps, operation_index]
return np.array(gene)
# todo alphas is AP_path_alpha for all the paths in each cell not single node
# TODO: nb_edges in cells
nb_edges = 2
for cell in self.cells:
alpha = np.zeros((nb_edges, nb_choices))
has_none = False
for index, op in enumerate(cell.ops):
#print(index)
# each op is MobileInvertedResidual
# MixedEdge is op.mobile_inverted_conv
# Each MixedEdge has 'None' case, when prev_prev_c is None and edge_index==0
# so the cell_arch of each cell in fabric will raise size mismatch error
# TODO: each cell_arch list or array, cannot use concatenate
# if mobile_inverted_conv is None and shortcut is None, then ops.appends(None)
if op is None:
#print('find None operation')
assert index == 0, 'invalid edge_index, {} is None'.format(index)
has_none = True
elif op is not None:
mixededge = op.mobile_inverted_conv
assert mixededge.__str__().startswith('MixedEdge'), 'Error in cell_arch_decode'
alpha[index] = mixededge.AP_path_alpha.data.cpu().numpy()
#print(alpha)
#print(alpha.shape)
# alpha is a list, including [path_index, path_alpha] in a cell
gene = _parse(alpha, self.run_config.steps, has_none)
#print('---')
#print(gene)
#print(gene)
genes.append(gene)
# return genes, select which edge, which operation in each cell
# [path_index, operation_index]
return np.array(genes)
# shape as [nb_cells, nb_steps, operation_index]
def architecture_path_parameters(self):
# only architecture_path_parameters, within cells
for name, param in self.named_parameters():
if 'AP_path_alpha' in name:
yield param
def architecture_network_parameters(self):
# only architecture_network_parameters, network level
for name, param in self.named_parameters():
if 'fabric_path_alpha' in name:
yield param
def cell_binary_gates(self):
# only binary gates
for name, param in self.named_parameters():
if 'AP_path_wb' in name:
yield param
def network_binary_gates(self):
for name, param in self.named_parameters():
if 'fabric_path_wb' in name:
yield param
def binary_gates(self):
for name, param in self.named_parameters():
if 'fabric_path_wb' in name or 'AP_path_wb' in name:
yield param
def | |
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
file_type
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', False
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
kwargs['unit_id'] = \
unit_id
return self.call_with_http_info(**kwargs)
self.get_calculation_unit_info_by_id = _Endpoint(
settings={
'response_type': dict({ 200:(file_type,), }),
'auth': [
'Basic',
'Bearer'
],
'endpoint_path': '/analytics/engines/quant/v3/calculations/{id}/units/{unitId}/info',
'operation_id': 'get_calculation_unit_info_by_id',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
'unit_id',
],
'required': [
'id',
'unit_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'unit_id':
(str,),
},
'attribute_map': {
'id': 'id',
'unit_id': 'unitId',
},
'location_map': {
'id': 'path',
'unit_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'application/x-protobuf',
'application/octet-stream'
],
'content_type': [],
},
api_client=api_client,
callable=__get_calculation_unit_info_by_id
)
def __get_calculation_unit_result_by_id(
self,
id,
unit_id,
**kwargs
):
"""Get Quant Engine calculation result by id # noqa: E501
This is the endpoint to get the result of a previously requested calculation. If the calculation has finished computing, the body of the response will contain the requested document in JSON. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_calculation_unit_result_by_id(id, unit_id, async_req=True)
>>> result = thread.get()
Args:
id (str): from url, provided from the location header in the Get Quant Engine calculation status by id endpoint
unit_id (str): from url, provided from the location header in the Get Quant Engine calculation status by id endpoint
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is False.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
file_type
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', False
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
kwargs['unit_id'] = \
unit_id
return self.call_with_http_info(**kwargs)
self.get_calculation_unit_result_by_id = _Endpoint(
settings={
'response_type': dict({ 200:(file_type,), }),
'auth': [
'Basic',
'Bearer'
],
'endpoint_path': '/analytics/engines/quant/v3/calculations/{id}/units/{unitId}/result',
'operation_id': 'get_calculation_unit_result_by_id',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
'unit_id',
],
'required': [
'id',
'unit_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'unit_id':
(str,),
},
'attribute_map': {
'id': 'id',
'unit_id': 'unitId',
},
'location_map': {
'id': 'path',
'unit_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'application/x-protobuf',
'application/octet-stream'
],
'content_type': [],
},
api_client=api_client,
callable=__get_calculation_unit_result_by_id
)
def __post_and_calculate(
self,
**kwargs
):
"""Create and Run Quant Engine calculation # noqa: E501
This endpoint runs the Quant Engine calculation specified in the POST body parameters. It can take one or more calculation units as input. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_and_calculate(async_req=True)
>>> result = thread.get()
Keyword Args:
cache_control (str): Standard HTTP header. Accepts no-store, max-age, max-stale.. [optional]
quant_calculation_parameters_root (QuantCalculationParametersRoot): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is False.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
(For 202 status - CalculationStatusRoot)(For 201 status - ObjectRoot)(For 200 status - CalculationStatusRoot)
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', False
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.post_and_calculate = _Endpoint(
settings={
'response_type': dict({ 202:(CalculationStatusRoot,), 201:(ObjectRoot,), 200:(CalculationStatusRoot,), }),
'auth': [
'Basic',
'Bearer'
],
'endpoint_path': '/analytics/engines/quant/v3/calculations',
'operation_id': 'post_and_calculate',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'cache_control',
'quant_calculation_parameters_root',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'cache_control':
(str,),
'quant_calculation_parameters_root':
(QuantCalculationParametersRoot,),
},
'attribute_map': {
'cache_control': 'Cache-Control',
},
'location_map': {
'cache_control': 'header',
'quant_calculation_parameters_root': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json',
'application/x-protobuf'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__post_and_calculate
)
def __put_and_calculate(
self,
id,
**kwargs
):
"""Create or update Quant Engine calculation and run it. # noqa: E501
This endpoint updates and runs the Quant Engine calculation specified in the PUT body parameters. This also allows creating new Quant Engine calculations with custom ids. It can take one or more calculation units as input. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.put_and_calculate(id, async_req=True)
>>> result = thread.get()
Args:
id (str): from url, provided from the location header in the Create and Run Quant Engine calculation endpoint
Keyword Args:
cache_control (str): Standard HTTP header. Accepts no-store, max-age, max-stale.. [optional]
quant_calculation_parameters_root (QuantCalculationParametersRoot): Calculation Parameters. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is False.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
(For 202 status - CalculationStatusRoot)(For 200 status - CalculationStatusRoot)(For 201 status - ObjectRoot)
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', False
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
| |
import sys
import matplotlib
matplotlib.use("Agg")
from pylab import *
ioff()
base = '../'
sys.path.append(base+"utils/Continuum/")
sys.path.append(base+"utils/Correlation/")
sys.path.append(base+"utils/GLOBALutils/")
sys.path.append(base+"utils/OptExtract/")
baryc_dir= base+'utils/SSEphem/'
sys.path.append(baryc_dir)
ephemeris='DEc403'
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
# ceres modules
import dupontutils
import continuum
import correlation
import GLOBALutils
import Marsh
# other useful modules
import argparse
import ephem
import glob
import jplephem
import os
import pickle
from astropy.io import fits as pyfits
import scipy
import scipy.interpolate
from scipy import optimize
from scipy import interpolate
from numpy import radians as rad
import statsmodels.api as sm
lowess = sm.nonparametric.lowess
# Recive input parameters
parser = argparse.ArgumentParser()
parser.add_argument('directorio')
parser.add_argument('-avoid_plot', action="store_true", default=False)
parser.add_argument('-dirout',default='default')
parser.add_argument('-do_class', action="store_true", default=False)
parser.add_argument('-just_extract', action="store_true", default=False)
parser.add_argument('-no_dark_sub', action = "store_true", default = False)
parser.add_argument('-npools', default=1)
parser.add_argument('-oblaze', default = 'same')
parser.add_argument('-ofind', default = 'last')
parser.add_argument('-o2do',default='all')
parser.add_argument('-reffile',default='default')
parser.add_argument('-resolution',default='40000')
args = parser.parse_args()
dirin = args.directorio
avoid_plot = args.avoid_plot
dirout = args.dirout
DoClass = args.do_class
JustExtract = args.just_extract
no_dark_substraction = args.no_dark_sub
npools = int(args.npools)
object2do = args.o2do
reffile = args.reffile
stst = args.ofind
stblaze = args.oblaze
resolution = float(args.resolution)
dark_substraction = not no_dark_substraction
if dirin[-1] != '/':
dirin = dirin + '/'
if dirout == 'default':
dirout = dirin[:-1]+'_red/'
if not os.access(dirout,os.F_OK):
os.system('mkdir '+dirout)
if os.access(dirout+'proc',os.F_OK):
os.system('rm -r '+dirout+'proc')
os.system('mkdir '+dirout+'proc')
f_res = open(dirout+'proc/'+'results.txt','w')
if reffile == 'default':
reffile = dirin+'reffile.txt'
if resolution != 40000 and resolution != 50000 and resolution != 60000:
raise ValueError("Input Resolution not permited. try 40000, 50000 or 60000\n")
####### GLOBAL VARIABLES #####
force_pre_process = False
force_bl = False #
force_bkg = False
force_P = False
force_thar_extract = False
force_thar_wavcal = False
force_sci_extract = False
force_sci_proc = True #
force_RV = False #
force_stellar_pars = False
force_corr = False
use_ref = False
ref_traces = '/data/echelle/duPont/red/20120630/trace.pkl'
bad_colummn = True
have_skyF = False
Inverse_m = True
use_cheby = True
MRMS = 200
trace_degree = 5
Marsh_alg = 0
ext_aperture = 6
NSigma_Marsh = 5
NCosmic_Marsh = 5
S_Marsh = 0.4
N_Marsh = 3
min_extract_col = 50
max_extract_col = 2000
#npar_wsol = 27 #number of parameters of wavelength solution
ncoef_x = 5
ncoef_m = 6
npar_wsol = (min(ncoef_x,ncoef_m) + 1) * (2*max(ncoef_x,ncoef_m) - min(ncoef_x,ncoef_m) + 2) / 2
n_last = 55 # last echelle order as defined in the reference wavelength files
ntotal = 64
oro0 = 36
order_dir = base+'dupont/wavcals/'
models_path = base+'data/COELHO_MODELS/R_40000b/'
print "\n\n\tEchelle Du Pont 2.5m PIPELINE\n"
print "\tRAW data is in ",dirin
print "\tProducts of reduction will be in",dirout
print '\n'
# file containing the log
log = dirout+'night.log'
biases, milkyflats, skyflats, objects, ThAr_ref, darks = dupontutils.FileClassify(dirin,log)
ThAr_ref = ThAr_ref[:2]
if dark_substraction == True and len(darks)<3:
dark_substraction = False
f = open(log,'r')
lines = f.readlines()
print '\tThese are all the images to proccess:'
for bias in biases:
hd = pyfits.getheader(bias)
print '\tbias', hd['EXPTYPE'], hd['EXPTIME'], hd['UT-DATE'],hd['UT-TIME'],bias
print '\n'
for dark in darks:
hd = pyfits.getheader(dark)
print '\tdark', hd['EXPTYPE'], hd['EXPTIME'], hd['UT-DATE'],hd['UT-TIME'],dark
print '\n'
for milky in milkyflats:
hd = pyfits.getheader(milky)
print '\tmilky', hd['EXPTYPE'], hd['EXPTIME'], hd['UT-DATE'],hd['UT-TIME'],milky
print '\n'
for line in lines:
print '\t'+line[:-1]
if stst == 'last':
if os.access(dirout+'findstar.txt',os.F_OK):
fst = open(dirout+'findstar.txt','r')
stst = fst.readline()
fst.close()
else:
raise ValueError("There is not a previously defined standard star file!!! You have to enter one (i.e. -ofind ccd0001.fits).\n")
else:
fst = open(dirout+'findstar.txt','w')
fst.write(stst+'\n')
fst.close()
if stblaze == 'same':
print '\n\tThe pipeline will use the image that traces the orders to derive the blaze function ...'
stblaze = stst
if ( (os.access(dirout+'Flat.fits',os.F_OK) == False) or\
(os.access(dirout+'trace.pkl',os.F_OK) == False) or \
(os.access(dirout+'MasterBias.fits',os.F_OK) == False) or \
(force_pre_process) ):
print "\tNo previous pre-processing files or found"
pre_process = 1
else:
print "\tPre-processing files found, going straight to extraction"
pre_process = 0
if (pre_process == 1):
print "\n\tGenerating Master calibration frames..."
MasterBias, RO_bias, GA_bias = dupontutils.MedianCombine(biases, zero_bo=False, dark_bo=False, flat_bo=False)
hdu = pyfits.PrimaryHDU( MasterBias )
if (os.access(dirout+'MasterBias.fits',os.F_OK)):
os.remove(dirout+'MasterBias.fits')
hdu.writeto(dirout+'MasterBias.fits')
print "\t\t-> Masterbias: done!"
MDARKS = []
dark_times = []
if dark_substraction:
for dark in darks:
hd = pyfits.getheader(dark)
if len(dark_times) == 0:
dark_times.append(hd['EXPTIME'])
else:
if dark_times.count(hd['EXPTIME']) == 0:
dark_times.append(hd['EXPTIME'])
dark_groups = []
ndark_times = []
for time in dark_times:
group = []
for dark in darks:
hd = pyfits.getheader(dark)
if hd['EXPTIME'] == time:
group.append(dark)
if len(group)>2:
dark_groups.append(group)
ndark_times.append(hd['EXPTIME'])
dark_times = ndark_times
i = 0
while i < len(dark_times):
DARK, RON, GAIN = dupontutils.MedianCombine(dark_groups[i], zero_bo=True, zero=dirout+'MasterBias.fits',dark_bo=False)
hdu = pyfits.PrimaryHDU( DARK )
hdu = GLOBALutils.update_header(hdu,'EXPTIME',dark_times[i])
if os.access(dirout+'DARK_'+str(int(dark_times[i]))+'s.fits',os.F_OK):
os.remove(dirout+'DARK_'+str(int(dark_times[i]))+'s.fits')
hdu.writeto( dirout+'DARK_'+str(int(dark_times[i]))+'s.fits' )
MDARKS.append(dirout+'DARK_'+str(int(dark_times[i]))+'s.fits')
i+=1
print "\t\t-> Masterdarks: done!"
force_flat_comb = True
if force_flat_comb or os.access(dirout+'Flat.fits',os.F_OK) == False:
Flat, RON, GAIN = dupontutils.milk_comb(milkyflats, MDARKS, zero=dirout+'MasterBias.fits')
hdu = pyfits.PrimaryHDU( Flat )
hdu = GLOBALutils.update_header(hdu,'RON',RON)
hdu = GLOBALutils.update_header(hdu,'GAIN',GAIN)
if (os.access(dirout+'Flat_or.fits',os.F_OK)):
os.remove(dirout+'Flat_or.fits')
hdu.writeto(dirout+'Flat_or.fits')
Flat = Flat/scipy.signal.medfilt(Flat,[15,15])
hdu = pyfits.PrimaryHDU( Flat )
hdu = GLOBALutils.update_header(hdu,'RON',RON)
hdu = GLOBALutils.update_header(hdu,'GAIN',GAIN)
if (os.access(dirout+'Flat.fits',os.F_OK)):
os.remove(dirout+'Flat.fits')
hdu.writeto(dirout+'Flat.fits')
else:
h = pyfits.open(dirout+'Flat.fits')
Flat = h[0].data
RON = h[0].header['RON']
GAIN = h[0].header['GAIN']
print "\t\t-> Masterflat: done!"
ftra = True
median_filter = False
# Find orders & traces
print "\tTracing echelle orders..."
if ftra or os.access(dirout+'trace.pkl',os.F_OK) == False:
h = pyfits.open(dirin+stst)[0]
hth = pyfits.getheader(dirin+stst)
d = h.data
d = dupontutils.OverscanTrim(d,hth['BIASSEC'])
d -= MasterBias
d /= Flat
if bad_colummn:
d = dupontutils.b_col(d)
d = d[:1420,:]
if use_ref:
c_comp = pickle.load( open( ref_traces, 'r' ) )['c_all']
nord = pickle.load( open( ref_traces, 'r' ) )['nord']
lim = pickle.load( open( ref_traces, 'r' ) )['lims']
c_all,pshift = GLOBALutils.retrace( d, c_comp )
else:
c_all, nord = GLOBALutils.get_them(d,5,trace_degree,mode=1)
print '\t\t'+str(nord)+' orders found...'
else:
trace_dict = pickle.load( open( dirout+"trace.pkl", 'r' ) )
c_all = trace_dict['c_all']
nord = trace_dict['nord']
GAIN = trace_dict['GA_ob']
RON = trace_dict['RO_ob']
trace_dict = {'c_all':c_all, 'nord':nord, 'GA_ob': GAIN, 'RO_ob': RON, 'DARKS':MDARKS, 'dtimes':dark_times}
pickle.dump( trace_dict, open( dirout+"trace.pkl", 'w' ) )
else:
trace_dict = pickle.load( open( dirout+"trace.pkl", 'r' ) )
c_all = trace_dict['c_all']
nord = trace_dict['nord']
GAIN = trace_dict['GA_ob']
RON = trace_dict['RO_ob']
h = pyfits.open(dirout+'MasterBias.fits')
MasterBias = h[0].data
MDARKS = trace_dict['DARKS']
dark_times = trace_dict['dtimes']
h = pyfits.open(dirout+'Flat.fits')
Flat = h[0].data
print '\n\tExtraction of ThAr calibration frames:'
for fsim in ThAr_ref:
hth = pyfits.getheader(fsim)
thmjd,mjd0 = dupontutils.mjd_fromheader(hth)
dth = pyfits.getdata(fsim)
dth = dupontutils.OverscanTrim(dth,hth['BIASSEC'])
dth -= MasterBias
dth /= Flat
thar_fits_simple = dirout+'ThAr_'+hth['DATE-OBS']+'_'+hth['UT-TIME'][:2]+ \
'-'+hth['UT-TIME'][3:5]+'-'+hth['UT-TIME'][6:]+'.spec.simple.fits'
if ( os.access(thar_fits_simple,os.F_OK) == False ) or (force_thar_extract):
print "\t\tNo previous extraction or extraction forced for ThAr file", fsim, "extracting..."
close_spec = dupontutils.get_close(thmjd,hth['RA-D'],hth['DEC-D'],objects)
temp_spec = pyfits.getdata(close_spec)
temp_hd = pyfits.getheader(close_spec)
temp_spec = dupontutils.OverscanTrim(temp_spec,temp_hd['BIASSEC'])
temp_spec -= MasterBias
if bad_colummn:
temp_spec = dupontutils.b_col(temp_spec)
temp_spec = temp_spec/Flat
c_alls, pshift = GLOBALutils.retrace( temp_spec, c_all )
thar_Ss = GLOBALutils.simple_extraction(dth,c_alls,ext_aperture,min_extract_col,max_extract_col,npools)
thar_Ss = thar_Ss[::-1]
if (os.access(thar_fits_simple,os.F_OK)):
os.remove( thar_fits_simple )
hdu = pyfits.PrimaryHDU( thar_Ss )
hdu.writeto( thar_fits_simple )
else:
print "\t\tThAr file", fsim, "all ready extracted, loading..."
print "\n\tWavelength solution of ThAr calibration spectra:"
# compute wavelength calibration files
thtimes = []
thnames = []
bad_thars = []
thRA = []
thDEC = []
ntr = 0
counter = 0
for thar in ThAr_ref:
if ntr > 0:
force_corr = False
hth = pyfits.getheader(thar)
thmjd,mjd0 = dupontutils.mjd_fromheader(hth)
thar_fits_simple = dirout+'ThAr_'+hth['DATE-OBS']+'_'+hth['UT-TIME'][:2]+'-'+\
hth['UT-TIME'][3:5]+'-'+hth['UT-TIME'][6:]+'.spec.simple.fits'
wavsol_pkl = dirout+'ThAr' +hth['DATE-OBS']+'_'+hth['UT-TIME'][:2]+'-'+\
hth['UT-TIME'][3:5]+'-'+hth['UT-TIME'][6:]+'.wavsolpars.pkl'
thar_Ss = pyfits.getdata(thar_fits_simple)
if ( os.access(wavsol_pkl,os.F_OK) == False ) or (force_thar_wavcal):
print " \t\tWorking on ThAr file", thar
RON = hth['ENOISE']
GAIN = hth['EGAIN']
lines_thar = thar_Ss[:,:]
if os.access(dirout+'id_orders.pkl',os.F_OK) == False or force_corr:
maxes = 0
or32 = 0
for order in range(len(lines_thar)):
ccf_max, shift = GLOBALutils.cor_thar(lines_thar[order],filename=order_dir+'order_32o.iwdat',span=50)
if ccf_max > maxes:
maxes = ccf_max
rough_shift = shift
or32 = order
or0 = or32 - 32
if or0 >= 0:
orwa = 0
else:
orwa = - or0
or0 = 0
#print 'n_lasts',n_last,or0 + nord
if n_last > or0 + nord:
n_last = or0 + nord
if or32-32 >0:
nn = n_last + 1
else:
nn = n_last + 1 + or32 -32
#print 'NN',nn
if os.access(dirout+'id_orders.pkl',os.F_OK):
os.remove(dirout+'id_orders.pkl')
pdict = {'or0':or0, 'orwa':orwa, 'n_last':n_last, 'rough_shift':rough_shift,'nn':nn}
pickle.dump( pdict, open(dirout+'id_orders.pkl', 'w' ) )
ntr += 1
else:
pdict = pickle.load(open(dirout+'id_orders.pkl', 'r'))
or0 = pdict['or0']
orwa = pdict['orwa']
n_last = pdict['n_last']
nn = pdict['nn']
rough_shift = pdict['rough_shift']
iv_thar = 1/((lines_thar/GAIN) + (RON**2/GAIN**2))
All_Pixel_Centers = np.array([])
All_Wavelengths = np.array([])
All_Orders = np.array([])
All_Centroids = np.array([])
All_Sigmas = np.array([])
All_Intensities = np.array([])
All_Residuals = np.array([])
All_Sigmas = np.array([])
orre = or0
order = orwa
OK = []
OW = []
while order < n_last:
order_s = str(order)
if (order < 10):
order_s = '0'+str(order)
thar_order_orig = lines_thar[orre,:]
IV = iv_thar[orre,:]
wei = np.sqrt( IV )
bkg = GLOBALutils.Lines_mBack(thar_order_orig, IV, thres_rel=3, line_w=10)
thar_order = thar_order_orig - bkg
coeffs_pix2wav, coeffs_pix2sigma, pixel_centers, wavelengths, rms_ms,\
residuals, centroids, sigmas, intensities = GLOBALutils.Initial_Wav_Calibration( \
order_dir+'order_'+order_s+'o.iwdat', thar_order, order, wei, rmsmax=1000, \
minlines=10, FixEnds=False, Dump_Argon=False, Dump_AllLines=True, Cheby=use_cheby, \
rough_shift = rough_shift)
fwhms_lns = sigmas*2.355
inis_lns = pixel_centers - fwhms_lns*0.5
fins_lns = pixel_centers + fwhms_lns*0.5
inis_wvs = GLOBALutils.Cheby_eval(coeffs_pix2wav,inis_lns,float(len(thar_order)))
fins_wvs = GLOBALutils.Cheby_eval(coeffs_pix2wav,fins_lns,float(len(thar_order)))
fwhms_wvs = inis_wvs - fins_wvs
resolution2 = wavelengths / fwhms_wvs
if wavelengths.max() > 5500 and wavelengths.min()<5500:
print "\t\t\tmedian Resolution of order", order, '=', np.around(np.median(resolution2))
if (order == 32):
if (use_cheby):
Global_ZP = GLOBALutils.Cheby_eval( coeffs_pix2wav, 1023, len(thar_order) )
else:
Global_ZP = scipy.polyval( coeffs_pix2wav, 0.0 )
All_Pixel_Centers = np.append( All_Pixel_Centers, pixel_centers )
All_Wavelengths = np.append( All_Wavelengths, wavelengths )
All_Orders = | |
be in [msc, nx, radioss, optistruct]")
def saves(self):
"""Saves a pickled string"""
return dumps(self)
def __getstate__(self):
"""clears out a few variables in order to pickle the object"""
# Copy the object's state from self.__dict__ which contains
# all our instance attributes. Always use the dict.copy()
# method to avoid modifying the original state.
state = self.__dict__.copy()
# Remove the unpicklable entries.
del state['log']
#if hasattr(self, '_card_parser_b'):
#del state['_card_parser_b']
#if hasattr(self, '_card_parser_prepare'):
#del state['_card_parser_prepare']
# this block let's us identify the objects that are problematic
# we just play with the value of i to delete all objects past
# some threshold. Once we find where the code breaks, we dig
# into the objects further
if 0: # pragma: no cover
i = 0
for key, value in sorted(state.items()):
if isinstance(value, dict) and len(value) == 0:
continue
#if not isinstance(value, (str, int, float)):
if i > 100:
#print('deleting', key)
del state[key]
else:
#print('***', key, value)
i += 1
#else:
#print(key, type(value), value)
#break
#i += 1
return state
def save(self, obj_filename='model.obj', unxref=True):
# type: (str, bool) -> None
"""Saves a pickleable object"""
#del self.log
#del self._card_parser, self._card_parser_prepare
del self.generalized_tables
del self.op2_reader
#print(object_attributes(self, mode="all", keys_to_skip=[]))
with open(obj_filename, 'wb') as obj_file:
dump(self, obj_file)
def load(self, obj_filename='model.obj'):
# type: (str) -> None
"""Loads a pickleable object"""
with open(obj_filename, 'rb') as obj_file:
obj = load(obj_file)
keys_to_skip = [
'total_effective_mass_matrix',
'effective_mass_matrix',
'rigid_body_mass_matrix',
'modal_effective_mass_fraction',
'modal_participation_factors',
'modal_effective_mass',
'modal_effective_weight',
]
for key in object_attributes(self, mode="all", keys_to_skip=keys_to_skip):
if key.startswith('__') and key.endswith('__'):
continue
val = getattr(obj, key)
#print(key)
#if isinstance(val, types.FunctionType):
#continue
try:
setattr(self, key, val)
except AttributeError:
print('key=%r val=%s' % (key, val))
raise
#self.case_control_deck = CaseControlDeck(self.case_control_lines, log=self.log)
self.log.debug('done loading!')
#def _set_ask_vectorized(self, ask=False):
#"""
#Enables vectorization
#The code will degenerate to dictionary based results when
#a result does not support vectorization.
#Vectorization is always True here.
#Parameters
#----------
#ask: bool
#Do you want to see a GUI of result types.
#+--------+---------------+---------+------------+
#| Case # | Vectorization | Ask | Read Modes |
#+========+===============+=========+============+
#| 1 | True | True | 1, 2 |
#+--------+---------------+---------+------------+
#| 2 | True | False | 1, 2 |
#+--------+---------------+---------+------------+
#| 3 | False | True | 1, 2 |
#+--------+---------------+---------+------------+
#| 4 | False | False | 0 |
#+--------+---------------+---------+------------+
#Definitions
#===========
#Vectorization - A storage structure that allows for faster read/access
#speeds and better memory usage, but comes with a more
#difficult to use data structure.
#It limits the node IDs to all be integers (e.g. element
#centroid). Composite plate elements (even for just CTRIA3s)
#with an inconsistent number of layers will have a more
#difficult data structure.
#Scanning - a quick check used to figure out how many results to process
#that takes almost no time
#Reading - process the op2 data
#Build - call the __init__ on a results object (e.g. RealDisplacementArray)
#Start Over - Go to the start of the op2 file
#Ask - launch a GUI dialog to let the user click which results to load
#Read Mode Definitions
#=====================
#0. The default OP2 dictionary based-approach with no asking GUI (removed)
#1. The first read of a result to get the shape of the data
#2. The second read of a result to get the results
#Cases
#======
#1. Scan the block to get the size, build the object (read_mode=1),
#ask the user, start over, fill the objects (read_mode=2).
#Degenerate to read_mode=0 when read_mode=2 cannot be used based
#upon the value of ask.
#2. Same as case #1, but don't ask the user.
#Scan the block to get the size, build the object (read_mode=1),
#start over, fill the objects (read_mode=2).
#3. Scan the block to get the object types (read_mode=1), ask the user,
#build the object & fill it (read_mode=2)
#4. Read the block to get the size, build the object & fill it (read_mode=0; removed)
#"""
#self.ask = ask
def read_op2(self, op2_filename=None, combine=True, build_dataframe=None,
skip_undefined_matrices=False, encoding=None):
"""
Starts the OP2 file reading
Parameters
----------
op2_filename : str (default=None -> popup)
the op2_filename
combine : bool; default=True
True : objects are isubcase based
False : objects are (isubcase, subtitle) based;
will be used for superelements regardless of the option
build_dataframe : bool (default=None -> True if in iPython, False otherwise)
builds a pandas DataFrame for op2 objects
skip_undefined_matrices : bool; default=False
True : prevents matrix reading crashes
encoding : str
the unicode encoding (default=None; system default)
"""
if build_dataframe is None:
build_dataframe = False
if ipython_info():
build_dataframe = True
if encoding is None:
encoding = sys.getdefaultencoding()
self.encoding = encoding
self.skip_undefined_matrices = skip_undefined_matrices
assert self.ask in [True, False], self.ask
self.is_vectorized = True
self.log.debug('combine=%s' % combine)
self.log.debug('-------- reading op2 with read_mode=1 (array sizing) --------')
self.read_mode = 1
self._close_op2 = False
# get GUI object names, build objects, but don't read data
OP2_Scalar.read_op2(self, op2_filename=op2_filename)
# TODO: stuff to figure out objects
# TODO: stuff to show gui of table names
# TODO: clear out objects the user doesn't want
self.read_mode = 2
self._close_op2 = True
self.log.debug('-------- reading op2 with read_mode=2 (array filling) --------')
OP2_Scalar.read_op2(self, op2_filename=self.op2_filename)
self._finalize()
if build_dataframe:
self.build_dataframe()
self.create_objects_from_matrices()
self.combine_results(combine=combine)
self.log.debug('finished reading op2')
def create_objects_from_matrices(self):
"""
creates the following objects:
- monitor3 : MONPNT3 object from the MP3F matrix
- monitor1 : MONPNT1 object from the PMRF, PERF, PFRF, AGRF, PGRF, AFRF matrices
"""
#assert len(self._frequencies) > 0, self._frequencies
if 'MP3F' in self.matrices:
self.monitor3 = MONPNT3(self._frequencies, self.matrices['MP3F'])
# these are totally wrong...it doesn't go by component;
# it goes by inertial, external, flexibility, etc.
if 'PERF' in self.matrices:
#self.monitor1 = MONPNT1(
#self._frequencies, self.matrices, [
# :) ? :) :) ? ?
#'PMRF', 'AFRF', 'PFRF', 'PGRF', 'AGRF', 'PERF', ])
self.monitor1 = MONPNT1(
self._frequencies, self.matrices,
# :) ? :) :)2 ? ?
['PMRF', 'PERF', 'PFRF', 'AGRF', 'PGRF', 'AFRF', ])
def _finalize(self):
"""internal method"""
result_types = self.get_table_types()
for result_type in result_types:
result = self.get_result(result_type)
for obj in result.values():
if hasattr(obj, 'finalize'):
obj.finalize()
elif hasattr(obj, 'tCode') and not obj.is_sort1:
raise RuntimeError('object has not implemented finalize\n%s' % (
''.join(obj.get_stats())))
self.del_structs()
def build_dataframe(self):
"""
Converts the OP2 objects into pandas DataFrames
.. todo:: fix issues with:
- RealDisplacementArray
- RealPlateStressArray (???)
- RealPlateStrainArray (???)
- RealCompositePlateStrainArray (???)
"""
# TODO: sorter = uniques.argsort()
#C:\Anaconda\lib\site-packages\pandas\core\algorithms.py:198: DeprecationWarning: unorderable dtypes;
#returning scalar but in the future this will be an error
no_sort2_classes = ['RealEigenvalues', 'ComplexEigenvalues', 'BucklingEigenvalues']
result_types = self.get_table_types()
if len(self.matrices):
for key, matrix in sorted(self.matrices.items()):
if hasattr(matrix, 'build_dataframe'):
matrix.build_dataframe()
else:
self.log.warning('pandas: build_dataframe is not supported for key=%s type=%s' % (key, str(type(matrix))))
raise NotImplementedError()
#continue
for result_type in result_types:
result = self.get_result(result_type)
for obj in result.values():
class_name = obj.__class__.__name__
#print('working on %s' % class_name)
obj.object_attributes()
obj.object_methods()
if class_name in no_sort2_classes:
try:
obj.build_dataframe()
obj.object_methods()
except MemoryError:
raise
except:
self.log.error(obj)
self.log.error('build_dataframe is broken for %s' % class_name)
raise
continue
if obj.is_sort2:
#self.log.warning(obj)
self.log.warning('build_dataframe is not supported for %s - SORT2' % class_name)
continue
try:
obj.build_dataframe()
except MemoryError:
raise
except NotImplementedError:
self.log.warning(obj)
self.log.warning('build_dataframe is broken for %s' % class_name)
raise
except:
self.log.error(obj)
self.log.error('build_dataframe is broken for %s' % class_name)
raise
def load_hdf5(self, hdf5_filename, combine=True):
"""loads an h5 file into an OP2 object"""
check_path(hdf5_filename, 'hdf5_filename')
from pyNastran.op2.op2_interface.hdf5_interface import load_op2_from_hdf5_file
import h5py
self.op2_filename = hdf5_filename
self.log.info('hdf5_op2_filename = %r' % hdf5_filename)
debug = False
with h5py.File(hdf5_filename, 'r') as h5_file:
load_op2_from_hdf5_file(self, h5_file, self.log, debug=debug)
self.combine_results(combine=combine)
def export_to_hdf5(self, hdf5_filename):
"""
Converts the OP2 objects into hdf5 object
TODO: doesn't support:
- matrices
- BucklingEigenvalues
"""
from pyNastran.op2.op2_interface.hdf5_interface import export_op2_to_hdf5_file
export_op2_to_hdf5_file(hdf5_filename, self)
def combine_results(self, combine=True):
"""
we want the data to be in the same format and grouped by subcase, so
we take
.. code-block:: python
stress = {
# isubcase, analysis_code, sort_method, count, superelement_adaptivity_index, pval_step
(1, 2, 1, 0, 'SUPERELEMENT 0', '') : result1,
(1, 2, 1, 0, 'SUPERELEMENT 10', '') : result2,
(1, 2, 1, 0, 'SUPERELEMENT 20', '') : result3,
(2, 2, 1, 0, 'SUPERELEMENT 0', '') : result4,
}
and convert it to:
.. code-block:: | |
params[name] = []
params[name].append((token.section_number, token))
for name, parts in params.items():
parts = sorted(parts, key=itemgetter(0))
first_param = parts[0][1]
charset = first_param.charset
if not first_param.extended and len(parts) > 1:
if parts[1][0] == 0:
parts[1][1].defects.append(errors.InvalidHeaderDefect(
'duplicate parameter name; duplicate(s) ignored'))
parts = parts[:1]
value_parts = []
i = 0
for section_number, param in parts:
if section_number != i:
if not param.extended:
param.defects.append(errors.InvalidHeaderDefect(
'duplicate parameter name; duplicate ignored'))
continue
else:
param.defects.append(errors.InvalidHeaderDefect(
'inconsistent RFC2231 parameter numbering'))
i += 1
value = param.param_value
if param.extended:
try:
value = urllib.parse.unquote_to_bytes(value)
except UnicodeEncodeError:
value = urllib.parse.unquote(value, encoding='latin-1')
else:
try:
value = value.decode(charset, 'surrogateescape')
except LookupError:
value = value.decode('us-ascii', 'surrogateescape')
if utils._has_surrogates(value):
param.defects.append(errors.
UndecodableBytesDefect())
value_parts.append(value)
value = ''.join(value_parts)
yield name, value
def __str__(self):
params = []
for name, value in self.params:
if value:
params.append('{}={}'.format(name, quote_string(value)))
else:
params.append(name)
params = '; '.join(params)
return ' ' + params if params else ''
class ParameterizedHeaderValue(TokenList):
@property
def params(self):
for token in reversed(self):
if token.token_type == 'mime-parameters':
return token.params
return {}
@property
def parts(self):
if self and self[-1].token_type == 'mime-parameters':
return TokenList(self[:-1] + self[-1])
return TokenList(self).parts
class ContentType(ParameterizedHeaderValue):
token_type = 'content-type'
maintype = 'text'
subtype = 'plain'
class ContentDisposition(ParameterizedHeaderValue):
token_type = 'content-disposition'
content_disposition = None
class ContentTransferEncoding(TokenList):
token_type = 'content-transfer-encoding'
cte = '7bit'
class HeaderLabel(TokenList):
token_type = 'header-label'
class Header(TokenList):
token_type = 'header'
def _fold(self, folded):
folded.append(str(self.pop(0)))
folded.lastlen = len(folded.current[0])
folded.stickyspace = str(self.pop(0)) if self[0
].token_type == 'cfws' else ''
rest = self.pop(0)
if self:
raise ValueError('Malformed Header token list')
rest._fold(folded)
class Terminal(str):
def __new__(cls, value, token_type):
self = super().__new__(cls, value)
self.token_type = token_type
self.defects = []
return self
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, super().__repr__())
@property
def all_defects(self):
return list(self.defects)
def _pp(self, indent=''):
return ['{}{}/{}({}){}'.format(indent, self.__class__.__name__,
self.token_type, super().__repr__(), '' if not self.defects else
' {}'.format(self.defects))]
def cte_encode(self, charset, policy):
value = str(self)
try:
value.encode('us-ascii')
return value
except UnicodeEncodeError:
return _ew.encode(value, charset)
def pop_trailing_ws(self):
return None
def pop_leading_fws(self):
return None
@property
def comments(self):
return []
def has_leading_comment(self):
return False
def __getnewargs__(self):
return str(self), self.token_type
class WhiteSpaceTerminal(Terminal):
@property
def value(self):
return ' '
def startswith_fws(self):
return True
has_fws = True
class ValueTerminal(Terminal):
@property
def value(self):
return self
def startswith_fws(self):
return False
has_fws = False
def as_encoded_word(self, charset):
return _ew.encode(str(self), charset)
class EWWhiteSpaceTerminal(WhiteSpaceTerminal):
@property
def value(self):
return ''
@property
def encoded(self):
return self[:]
def __str__(self):
return ''
has_fws = True
DOT = ValueTerminal('.', 'dot')
ListSeparator = ValueTerminal(',', 'list-separator')
RouteComponentMarker = ValueTerminal('@', 'route-component-marker')
_wsp_splitter = re.compile('([{}]+)'.format(''.join(WSP))).split
_non_atom_end_matcher = re.compile('[^{}]+'.format(''.join(ATOM_ENDS).
replace('\\', '\\\\').replace(']', '\\]'))).match
_non_printable_finder = re.compile('[\\x00-\\x20\\x7F]').findall
_non_token_end_matcher = re.compile('[^{}]+'.format(''.join(TOKEN_ENDS).
replace('\\', '\\\\').replace(']', '\\]'))).match
_non_attribute_end_matcher = re.compile('[^{}]+'.format(''.join(
ATTRIBUTE_ENDS).replace('\\', '\\\\').replace(']', '\\]'))).match
_non_extended_attribute_end_matcher = re.compile('[^{}]+'.format(''.join(
EXTENDED_ATTRIBUTE_ENDS).replace('\\', '\\\\').replace(']', '\\]'))).match
def _validate_xtext(xtext):
"""If input token contains ASCII non-printables, register a defect."""
non_printables = _non_printable_finder(xtext)
if non_printables:
xtext.defects.append(errors.NonPrintableDefect(non_printables))
if utils._has_surrogates(xtext):
xtext.defects.append(errors.UndecodableBytesDefect(
'Non-ASCII characters found in header token'))
def _get_ptext_to_endchars(value, endchars):
"""Scan printables/quoted-pairs until endchars and return unquoted ptext.
This function turns a run of qcontent, ccontent-without-comments, or
dtext-with-quoted-printables into a single string by unquoting any
quoted printables. It returns the string, the remaining value, and
a flag that is True iff there were any quoted printables decoded.
"""
fragment, *remainder = _wsp_splitter(value, 1)
vchars = []
escape = False
had_qp = False
for pos in range(len(fragment)):
if fragment[pos] == '\\':
if escape:
escape = False
had_qp = True
else:
escape = True
continue
if escape:
escape = False
elif fragment[pos] in endchars:
break
vchars.append(fragment[pos])
else:
pos = pos + 1
return ''.join(vchars), ''.join([fragment[pos:]] + remainder), had_qp
def get_fws(value):
"""FWS = 1*WSP
This isn't the RFC definition. We're using fws to represent tokens where
folding can be done, but when we are parsing the *un*folding has already
been done so we don't need to watch out for CRLF.
"""
newvalue = value.lstrip()
fws = WhiteSpaceTerminal(value[:len(value) - len(newvalue)], 'fws')
return fws, newvalue
def get_encoded_word(value):
""" encoded-word = "=?" charset "?" encoding "?" encoded-text "?="
"""
ew = EncodedWord()
if not value.startswith('=?'):
raise errors.HeaderParseError('expected encoded word but found {}'.
format(value))
tok, *remainder = value[2:].split('?=', 1)
if tok == value[2:]:
raise errors.HeaderParseError('expected encoded word but found {}'.
format(value))
remstr = ''.join(remainder)
if len(remstr) > 1 and remstr[0] in hexdigits and remstr[1] in hexdigits:
rest, *remainder = remstr.split('?=', 1)
tok = tok + '?=' + rest
if len(tok.split()) > 1:
ew.defects.append(errors.InvalidHeaderDefect(
'whitespace inside encoded word'))
ew.cte = value
value = ''.join(remainder)
try:
text, charset, lang, defects = _ew.decode('=?' + tok + '?=')
except ValueError:
raise errors.HeaderParseError("encoded word format invalid: '{}'".
format(ew.cte))
ew.charset = charset
ew.lang = lang
ew.defects.extend(defects)
while text:
if text[0] in WSP:
token, text = get_fws(text)
ew.append(token)
continue
chars, *remainder = _wsp_splitter(text, 1)
vtext = ValueTerminal(chars, 'vtext')
_validate_xtext(vtext)
ew.append(vtext)
text = ''.join(remainder)
return ew, value
def get_unstructured(value):
"""unstructured = (*([FWS] vchar) *WSP) / obs-unstruct
obs-unstruct = *((*LF *CR *(obs-utext) *LF *CR)) / FWS)
obs-utext = %d0 / obs-NO-WS-CTL / LF / CR
obs-NO-WS-CTL is control characters except WSP/CR/LF.
So, basically, we have printable runs, plus control characters or nulls in
the obsolete syntax, separated by whitespace. Since RFC 2047 uses the
obsolete syntax in its specification, but requires whitespace on either
side of the encoded words, I can see no reason to need to separate the
non-printable-non-whitespace from the printable runs if they occur, so we
parse this into xtext tokens separated by WSP tokens.
Because an 'unstructured' value must by definition constitute the entire
value, this 'get' routine does not return a remaining value, only the
parsed TokenList.
"""
unstructured = UnstructuredTokenList()
while value:
if value[0] in WSP:
token, value = get_fws(value)
unstructured.append(token)
continue
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except errors.HeaderParseError:
pass
else:
have_ws = True
if len(unstructured) > 0:
if unstructured[-1].token_type != 'fws':
unstructured.defects.append(errors.
InvalidHeaderDefect(
'missing whitespace before encoded word'))
have_ws = False
if have_ws and len(unstructured) > 1:
if unstructured[-2].token_type == 'encoded-word':
unstructured[-1] = EWWhiteSpaceTerminal(unstructured
[-1], 'fws')
unstructured.append(token)
continue
tok, *remainder = _wsp_splitter(value, 1)
vtext = ValueTerminal(tok, 'vtext')
_validate_xtext(vtext)
unstructured.append(vtext)
value = ''.join(remainder)
return unstructured
def get_qp_ctext(value):
"""ctext = <printable ascii except \\ ( )>
This is not the RFC ctext, since we are handling nested comments in comment
and unquoting quoted-pairs here. We allow anything except the '()'
characters, but if we find any ASCII other than the RFC defined printable
ASCII, a NonPrintableDefect is added to the token's defects list. Since
quoted pairs are converted to their unquoted values, what is returned is
a 'ptext' token. In this case it is a WhiteSpaceTerminal, so it's value
is ' '.
"""
ptext, value, _ = _get_ptext_to_endchars(value, '()')
ptext = WhiteSpaceTerminal(ptext, 'ptext')
_validate_xtext(ptext)
return ptext, value
def get_qcontent(value):
"""qcontent = qtext / quoted-pair
We allow anything except the DQUOTE character, but if we find any ASCII
other than the RFC defined printable ASCII, a NonPrintableDefect is
added to the token's defects list. Any quoted pairs are converted to their
unquoted values, so what is returned is a 'ptext' token. In this case it
is a ValueTerminal.
"""
ptext, value, _ = _get_ptext_to_endchars(value, '"')
ptext = ValueTerminal(ptext, 'ptext')
_validate_xtext(ptext)
return ptext, value
def get_atext(value):
"""atext = <matches _atext_matcher>
We allow any non-ATOM_ENDS in atext, but add an InvalidATextDefect to
the token's defects list if we find non-atext characters.
"""
m = _non_atom_end_matcher(value)
if not m:
raise errors.HeaderParseError("expected atext but found '{}'".
format(value))
atext = m.group()
value = value[len(atext):]
atext = ValueTerminal(atext, 'atext')
_validate_xtext(atext)
return atext, value
def get_bare_quoted_string(value):
"""bare-quoted-string = DQUOTE *([FWS] qcontent) [FWS] DQUOTE
A quoted-string without the leading or trailing white space. Its
value is the text between the quote marks, with whitespace
preserved and quoted pairs decoded.
"""
if value[0] != '"':
raise errors.HeaderParseError('expected \'"\' but found \'{}\''.
format(value))
bare_quoted_string = BareQuotedString()
value = value[1:]
while value and value[0] != '"':
if value[0] in WSP:
token, value = get_fws(value)
elif value[:2] == '=?':
try:
token, value = get_encoded_word(value)
bare_quoted_string.defects.append(errors.
InvalidHeaderDefect('encoded word inside quoted string'))
except errors.HeaderParseError:
token, value = get_qcontent(value)
else:
token, value = get_qcontent(value)
bare_quoted_string.append(token)
if not value:
bare_quoted_string.defects.append(errors.InvalidHeaderDefect(
'end of header inside quoted string'))
return bare_quoted_string, value
return bare_quoted_string, value[1:]
def get_comment(value):
"""comment = "(" *([FWS] ccontent) [FWS] ")"
ccontent = ctext / quoted-pair / comment
We handle nested | |
🤍.jpg', params='', query='abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://user@pass:www.example.org:8443/Příliš žluťoučký/kůň 🤍.jpg?test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test',
ParseResult(scheme='https', netloc='user@pass:www.example.org:8443', path='/Příliš žluťoučký/kůň 🤍.jpg', params='', query='test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test', fragment='') # noqa
),
(
'https://user@pass:www.example.org:8443/Příliš žluťoučký/kůň 🤍.jpg?test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test#Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍',
ParseResult(scheme='https', netloc='user@pass:www.example.org:8443', path='/Příliš žluťoučký/kůň 🤍.jpg', params='', query='test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://user@pass:www.example.org:8443/Příliš žluťoučký/kůň 🤍.jpg;Příliš žluťoučký kůň=🤍',
ParseResult(scheme='https', netloc='user@pass:www.example.org:8443', path='/Příliš žluťoučký/kůň 🤍.jpg', params='Příliš žluťoučký kůň=🤍', query='', fragment='') # noqa
),
(
'https://user@pass:www.example.org:8443/Příliš žluťoučký/kůň 🤍.jpg;Příliš žluťoučký kůň=🤍#Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍',
ParseResult(scheme='https', netloc='user@pass:www.example.org:8443', path='/Příliš žluťoučký/kůň 🤍.jpg', params='Příliš žluťoučký kůň=🤍', query='', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://user@pass:www.example.org:8443/Příliš žluťoučký/kůň 🤍.jpg;Příliš žluťoučký kůň=🤍?abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍',
ParseResult(scheme='https', netloc='user@pass:www.example.org:8443', path='/Příliš žluťoučký/kůň 🤍.jpg', params='Příliš žluťoučký kůň=🤍', query='abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍', fragment='') # noqa
),
(
'https://user@pass:www.example.org:8443/Příliš žluťoučký/kůň 🤍.jpg;Příliš žluťoučký kůň=🤍?abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍#Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍',
ParseResult(scheme='https', netloc='user@pass:www.example.org:8443', path='/Příliš žluťoučký/kůň 🤍.jpg', params='Příliš žluťoučký kůň=🤍', query='abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://user@pass:www.example.org:8443/Příliš žluťoučký/kůň 🤍.jpg;Příliš žluťoučký kůň=🤍?test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test',
ParseResult(scheme='https', netloc='user@pass:www.example.org:8443', path='/Příliš žluťoučký/kůň 🤍.jpg', params='Příliš žluťoučký kůň=🤍', query='test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test', fragment='') # noqa
),
(
'https://user@pass:www.example.org:8443/Příliš žluťoučký/kůň 🤍.jpg;Příliš žluťoučký kůň=🤍?test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test#Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍',
ParseResult(scheme='https', netloc='user@pass:www.example.org:8443', path='/Příliš žluťoučký/kůň 🤍.jpg', params='Příliš žluťoučký kůň=🤍', query='test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://user@pass:www.example.org:8443/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt',
ParseResult(scheme='https', netloc='user@pass:www.example.org:8443', path='/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt', params='', query='', fragment='') # noqa
),
(
'https://user@pass:www.example.org:8443/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt#Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍',
ParseResult(scheme='https', netloc='user@pass:www.example.org:8443', path='/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt', params='', query='', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://user@pass:www.example.org:8443/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt?abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍',
ParseResult(scheme='https', netloc='user@pass:www.example.org:8443', path='/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt', params='', query='abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍', fragment='') # noqa
),
(
'https://user@pass:www.example.org:8443/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt?abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍#Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍',
ParseResult(scheme='https', netloc='user@pass:www.example.org:8443', path='/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt', params='', query='abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://user@pass:www.example.org:8443/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt?test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test',
ParseResult(scheme='https', netloc='user@pass:www.example.org:8443', path='/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt', params='', query='test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test', fragment='') # noqa
),
(
'https://user@pass:www.example.org:8443/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt?test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test#Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍',
ParseResult(scheme='https', netloc='user@pass:www.example.org:8443', path='/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt', params='', query='test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://user@pass:www.example.org:8443/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt;Příliš žluťoučký kůň=🤍',
ParseResult(scheme='https', netloc='user@pass:www.example.org:8443', path='/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt', params='Příliš žluťoučký kůň=🤍', query='', fragment='') # noqa
),
(
'https://user@pass:www.example.org:8443/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt;Příliš žluťoučký kůň=🤍#Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍',
ParseResult(scheme='https', netloc='user@pass:www.example.org:8443', path='/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt', params='Příliš žluťoučký kůň=🤍', query='', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://user@pass:www.example.org:8443/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt;Příliš žluťoučký kůň=🤍?abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍',
ParseResult(scheme='https', netloc='user@pass:www.example.org:8443', path='/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt', params='Příliš žluťoučký kůň=🤍', query='abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍', fragment='') # noqa
),
(
'https://user@pass:www.example.org:8443/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt;Příliš žluťoučký kůň=🤍?abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍#Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍',
ParseResult(scheme='https', netloc='user@pass:www.example.org:8443', path='/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt', params='Příliš žluťoučký kůň=🤍', query='abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://user@pass:www.example.org:8443/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt;Příliš žluťoučký kůň=🤍?test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test',
ParseResult(scheme='https', netloc='user@pass:www.example.org:8443', path='/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt', params='Příliš žluťoučký kůň=🤍', query='test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test', fragment='') # noqa
),
(
'https://user@pass:www.example.org:8443/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt;Příliš žluťoučký kůň=🤍?test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test#Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍',
ParseResult(scheme='https', netloc='user@pass:www.example.org:8443', path='/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt', params='Příliš žluťoučký kůň=🤍', query='test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/', params='', query='', fragment='') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/#Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/', params='', query='', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/?abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/', params='', query='abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍', fragment='') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/?abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍#Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/', params='', query='abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/?test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/', params='', query='test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test', fragment='') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/?test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test#Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/', params='', query='test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/;Příliš žluťoučký kůň=🤍',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/', params='Příliš žluťoučký kůň=🤍', query='', fragment='') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/;Příliš žluťoučký kůň=🤍#Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/', params='Příliš žluťoučký kůň=🤍', query='', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/;Příliš žluťoučký kůň=🤍?abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/', params='Příliš žluťoučký kůň=🤍', query='abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍', fragment='') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/;Příliš žluťoučký kůň=🤍?abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍#Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/', params='Příliš žluťoučký kůň=🤍', query='abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/;Příliš žluťoučký kůň=🤍?test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/', params='Příliš žluťoučký kůň=🤍', query='test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test', fragment='') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/;Příliš žluťoučký kůň=🤍?test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test#Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/', params='Příliš žluťoučký kůň=🤍', query='test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/Příliš žluťoučký/kůň 🤍.jpg',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/Příliš žluťoučký/kůň 🤍.jpg', params='', query='', fragment='') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/Příliš žluťoučký/kůň 🤍.jpg#Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/Příliš žluťoučký/kůň 🤍.jpg', params='', query='', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/Příliš žluťoučký/kůň 🤍.jpg?abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/Příliš žluťoučký/kůň 🤍.jpg', params='', query='abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍', fragment='') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/Příliš žluťoučký/kůň 🤍.jpg?abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍#Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/Příliš žluťoučký/kůň 🤍.jpg', params='', query='abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/Příliš žluťoučký/kůň 🤍.jpg?test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/Příliš žluťoučký/kůň 🤍.jpg', params='', query='test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test', fragment='') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/Příliš žluťoučký/kůň 🤍.jpg?test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test#Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/Příliš žluťoučký/kůň 🤍.jpg', params='', query='test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/Příliš žluťoučký/kůň 🤍.jpg;Příliš žluťoučký kůň=🤍',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/Příliš žluťoučký/kůň 🤍.jpg', params='Příliš žluťoučký kůň=🤍', query='', fragment='') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/Příliš žluťoučký/kůň 🤍.jpg;Příliš žluťoučký kůň=🤍#Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/Příliš žluťoučký/kůň 🤍.jpg', params='Příliš žluťoučký kůň=🤍', query='', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/Příliš žluťoučký/kůň 🤍.jpg;Příliš žluťoučký kůň=🤍?abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/Příliš žluťoučký/kůň 🤍.jpg', params='Příliš žluťoučký kůň=🤍', query='abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍', fragment='') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/Příliš žluťoučký/kůň 🤍.jpg;Příliš žluťoučký kůň=🤍?abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍#Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/Příliš žluťoučký/kůň 🤍.jpg', params='Příliš žluťoučký kůň=🤍', query='abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/Příliš žluťoučký/kůň 🤍.jpg;Příliš žluťoučký kůň=🤍?test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/Příliš žluťoučký/kůň 🤍.jpg', params='Příliš žluťoučký kůň=🤍', query='test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test', fragment='') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/Příliš žluťoučký/kůň 🤍.jpg;Příliš žluťoučký kůň=🤍?test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test#Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/Příliš žluťoučký/kůň 🤍.jpg', params='Příliš žluťoučký kůň=🤍', query='test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt', params='', query='', fragment='') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt#Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt', params='', query='', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt?abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt', params='', query='abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍', fragment='') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt?abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍#Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt', params='', query='abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt?test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt', params='', query='test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test', fragment='') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt?test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test#Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt', params='', query='test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt;Příliš žluťoučký kůň=🤍',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt', params='Příliš žluťoučký kůň=🤍', query='', fragment='') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt;Příliš žluťoučký kůň=🤍#Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt', params='Příliš žluťoučký kůň=🤍', query='', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt;Příliš žluťoučký kůň=🤍?abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt', params='Příliš žluťoučký kůň=🤍', query='abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍', fragment='') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt;Příliš žluťoučký kůň=🤍?abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍#Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt', params='Příliš žluťoučký kůň=🤍', query='abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt;Příliš žluťoučký kůň=🤍?test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt', params='Příliš žluťoučký kůň=🤍', query='test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test', fragment='') # noqa
),
(
'https://uživatel@heslo:příklad.cz:8443/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt;Příliš žluťoučký kůň=🤍?test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test#Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍',
ParseResult(scheme='https', netloc='uživatel@heslo:příklad.cz:8443', path='/\ufffftest/somedir\u202f\u2029abc/\r\nsome \x00\t file.txt', params='Příliš žluťoučký kůň=🤍', query='test=test\x00abc&speciální=abc\uffff\u2028\r\n\t \u202f\x01test', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://uživatel@heslo:[2001:fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b]:8443/',
ParseResult(scheme='https', netloc='uživatel@heslo:[2001:dbfd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b]:8443', path='/', params='', query='', fragment='') # noqa
),
(
'https://uživatel@heslo:[2001:fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b]:8443/#Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍',
ParseResult(scheme='https', netloc='uživatel@heslo:[2001:db8::1234]:8443', path='/', params='', query='', fragment='Příliš\r\n\x01\x00 žluťoučký\uffff kůň 🤍') # noqa
),
(
'https://uživatel@heslo:[2001:fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b]:8443/?abc=def&možnost=Příliš žluťoučký kůň&další možnost=🤍',
ParseResult(scheme='https', netloc='uživatel@heslo:[2001:db8::1234]:8443', path='/', params='', query='abc=def&možnost=Příliš žluťoučký | |
_ = call.mock_calls[0]
assert args[0] == cloud_sql.SqlInstancesTruncateLogRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_sql_resources.Operation)
assert response.kind == 'kind_value'
assert response.target_link == 'target_link_value'
assert response.status == cloud_sql_resources.Operation.SqlOperationStatus.PENDING
assert response.user == 'user_value'
assert response.operation_type == cloud_sql_resources.Operation.SqlOperationType.IMPORT
assert response.name == 'name_value'
assert response.target_id == 'target_id_value'
assert response.self_link == 'self_link_value'
assert response.target_project == 'target_project_value'
@pytest.mark.asyncio
async def test_truncate_log_async_from_dict():
await test_truncate_log_async(request_type=dict)
def test_update(transport: str = 'grpc', request_type=cloud_sql.SqlInstancesUpdateRequest):
client = SqlInstancesServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_sql_resources.Operation(
kind='kind_value',
target_link='target_link_value',
status=cloud_sql_resources.Operation.SqlOperationStatus.PENDING,
user='user_value',
operation_type=cloud_sql_resources.Operation.SqlOperationType.IMPORT,
name='name_value',
target_id='target_id_value',
self_link='self_link_value',
target_project='target_project_value',
)
response = client.update(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_sql.SqlInstancesUpdateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_sql_resources.Operation)
assert response.kind == 'kind_value'
assert response.target_link == 'target_link_value'
assert response.status == cloud_sql_resources.Operation.SqlOperationStatus.PENDING
assert response.user == 'user_value'
assert response.operation_type == cloud_sql_resources.Operation.SqlOperationType.IMPORT
assert response.name == 'name_value'
assert response.target_id == 'target_id_value'
assert response.self_link == 'self_link_value'
assert response.target_project == 'target_project_value'
def test_update_from_dict():
test_update(request_type=dict)
def test_update_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SqlInstancesServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update),
'__call__') as call:
client.update()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_sql.SqlInstancesUpdateRequest()
@pytest.mark.asyncio
async def test_update_async(transport: str = 'grpc_asyncio', request_type=cloud_sql.SqlInstancesUpdateRequest):
client = SqlInstancesServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cloud_sql_resources.Operation(
kind='kind_value',
target_link='target_link_value',
status=cloud_sql_resources.Operation.SqlOperationStatus.PENDING,
user='user_value',
operation_type=cloud_sql_resources.Operation.SqlOperationType.IMPORT,
name='name_value',
target_id='target_id_value',
self_link='self_link_value',
target_project='target_project_value',
))
response = await client.update(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_sql.SqlInstancesUpdateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_sql_resources.Operation)
assert response.kind == 'kind_value'
assert response.target_link == 'target_link_value'
assert response.status == cloud_sql_resources.Operation.SqlOperationStatus.PENDING
assert response.user == 'user_value'
assert response.operation_type == cloud_sql_resources.Operation.SqlOperationType.IMPORT
assert response.name == 'name_value'
assert response.target_id == 'target_id_value'
assert response.self_link == 'self_link_value'
assert response.target_project == 'target_project_value'
@pytest.mark.asyncio
async def test_update_async_from_dict():
await test_update_async(request_type=dict)
def test_create_ephemeral(transport: str = 'grpc', request_type=cloud_sql.SqlInstancesCreateEphemeralCertRequest):
client = SqlInstancesServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_ephemeral),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_sql_resources.SslCert(
kind='kind_value',
cert_serial_number='cert_serial_number_value',
cert='cert_value',
common_name='common_name_value',
sha1_fingerprint='sha1_fingerprint_value',
instance='instance_value',
self_link='self_link_value',
)
response = client.create_ephemeral(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_sql.SqlInstancesCreateEphemeralCertRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_sql_resources.SslCert)
assert response.kind == 'kind_value'
assert response.cert_serial_number == 'cert_serial_number_value'
assert response.cert == 'cert_value'
assert response.common_name == 'common_name_value'
assert response.sha1_fingerprint == 'sha1_fingerprint_value'
assert response.instance == 'instance_value'
assert response.self_link == 'self_link_value'
def test_create_ephemeral_from_dict():
test_create_ephemeral(request_type=dict)
def test_create_ephemeral_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SqlInstancesServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_ephemeral),
'__call__') as call:
client.create_ephemeral()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_sql.SqlInstancesCreateEphemeralCertRequest()
@pytest.mark.asyncio
async def test_create_ephemeral_async(transport: str = 'grpc_asyncio', request_type=cloud_sql.SqlInstancesCreateEphemeralCertRequest):
client = SqlInstancesServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_ephemeral),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cloud_sql_resources.SslCert(
kind='kind_value',
cert_serial_number='cert_serial_number_value',
cert='cert_value',
common_name='common_name_value',
sha1_fingerprint='sha1_fingerprint_value',
instance='instance_value',
self_link='self_link_value',
))
response = await client.create_ephemeral(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_sql.SqlInstancesCreateEphemeralCertRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_sql_resources.SslCert)
assert response.kind == 'kind_value'
assert response.cert_serial_number == 'cert_serial_number_value'
assert response.cert == 'cert_value'
assert response.common_name == 'common_name_value'
assert response.sha1_fingerprint == 'sha1_fingerprint_value'
assert response.instance == 'instance_value'
assert response.self_link == 'self_link_value'
@pytest.mark.asyncio
async def test_create_ephemeral_async_from_dict():
await test_create_ephemeral_async(request_type=dict)
def test_reschedule_maintenance(transport: str = 'grpc', request_type=cloud_sql.SqlInstancesRescheduleMaintenanceRequest):
client = SqlInstancesServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.reschedule_maintenance),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_sql_resources.Operation(
kind='kind_value',
target_link='target_link_value',
status=cloud_sql_resources.Operation.SqlOperationStatus.PENDING,
user='user_value',
operation_type=cloud_sql_resources.Operation.SqlOperationType.IMPORT,
name='name_value',
target_id='target_id_value',
self_link='self_link_value',
target_project='target_project_value',
)
response = client.reschedule_maintenance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_sql.SqlInstancesRescheduleMaintenanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_sql_resources.Operation)
assert response.kind == 'kind_value'
assert response.target_link == 'target_link_value'
assert response.status == cloud_sql_resources.Operation.SqlOperationStatus.PENDING
assert response.user == 'user_value'
assert response.operation_type == cloud_sql_resources.Operation.SqlOperationType.IMPORT
assert response.name == 'name_value'
assert response.target_id == 'target_id_value'
assert response.self_link == 'self_link_value'
assert response.target_project == 'target_project_value'
def test_reschedule_maintenance_from_dict():
test_reschedule_maintenance(request_type=dict)
def test_reschedule_maintenance_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SqlInstancesServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.reschedule_maintenance),
'__call__') as call:
client.reschedule_maintenance()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_sql.SqlInstancesRescheduleMaintenanceRequest()
@pytest.mark.asyncio
async def test_reschedule_maintenance_async(transport: str = 'grpc_asyncio', request_type=cloud_sql.SqlInstancesRescheduleMaintenanceRequest):
client = SqlInstancesServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.reschedule_maintenance),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cloud_sql_resources.Operation(
kind='kind_value',
target_link='target_link_value',
status=cloud_sql_resources.Operation.SqlOperationStatus.PENDING,
user='user_value',
operation_type=cloud_sql_resources.Operation.SqlOperationType.IMPORT,
name='name_value',
target_id='target_id_value',
self_link='self_link_value',
target_project='target_project_value',
))
response = await client.reschedule_maintenance(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloud_sql.SqlInstancesRescheduleMaintenanceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloud_sql_resources.Operation)
assert response.kind == 'kind_value'
assert response.target_link == 'target_link_value'
assert response.status == cloud_sql_resources.Operation.SqlOperationStatus.PENDING
assert response.user == 'user_value'
assert response.operation_type == cloud_sql_resources.Operation.SqlOperationType.IMPORT
assert response.name == 'name_value'
assert response.target_id == 'target_id_value'
assert response.self_link == 'self_link_value'
assert response.target_project == 'target_project_value'
@pytest.mark.asyncio
async def test_reschedule_maintenance_async_from_dict():
await test_reschedule_maintenance_async(request_type=dict)
def test_verify_external_sync_settings(transport: str = 'grpc', request_type=cloud_sql.SqlInstancesVerifyExternalSyncSettingsRequest):
client = SqlInstancesServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.verify_external_sync_settings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloud_sql_resources.SqlInstancesVerifyExternalSyncSettingsResponse(
kind='kind_value',
)
response = client.verify_external_sync_settings(request)
# Establish that the underlying gRPC | |
<filename>speechdetector/speech_detect.py
import os
import re
from collections import defaultdict
import numpy as np
#from numpy import mean, std, array, min, max, inf,sqrt, exp, pi
import numpy as np
import pylab as P
from acousticsim.representations.mfcc import Mfcc
from acousticsim.representations.amplitude_envelopes import Envelopes
from acousticsim.representations.gammatone import Gammatone
from acousticsim.representations.pitch import Pitch, Harmonicity
#Features:
#Amplitude (relative?), Spectral slope?
TIMIT_SEGMENT_SET_VCSIL = {'C':{'b','d','g','p','t','k','q','jh','ch','s','sh',
'z','zh','f','th','v','dh','hh','hv','ax-h'},
'V':{'m','n','ng','em','en','eng','nx','l','r','w','y','el',
'iy','ih','eh','ey','ae','aa','aw','ay','ah','ao','oy','ow','uh',
'uw','ux','er','ax','ix','axr','dx'},
'SIL':{'pau','epi','h#','bcl','dcl','gcl','pcl','tcl','kcl'}}
TIMIT_SEGMENT_SET = {'C':{'b','d','g','p','t','k','q','jh','ch','s','sh',
'z','zh','f','th','v','dh','hh'},
'SIL':{'pau','epi',
'h#','bcl','dcl','gcl','pcl','tcl','kcl'},
'S':{'m','n','ng','em','en','eng','nx','l','r','w','y',
'el','hv','ax-h','dx'},
'V':{
'iy','ih','eh','ey','ae','aa','aw','ay','ah','ao','oy','ow','uh',
'uw','ux','er','ax','ix','axr'}}
BUCKEYE_SEGMENT_SET = {'C':{'p','t','k','b','d','g','ch','jh','th','dh',
's','z','sh','zh','f','v','hh','tq'},
'SIL':{'SIL'},
'S':{'m','n','ng','l','w','r','y','nx','en','em',
'eng','el','dx'},
'V':{'aa','ae','ay','aw','ao','oy','ow','eh','ey',
'er','ah','uw','uh','ih','iy',
'aan',
'aen','ayn','awn','aon','oyn','own','ehn','eyn',
'ern','ahn','uwn','ihn','iyn','uhn'}}
BUCKEYE_SEGMENT_SET_VCSIL = {'C':{'p','t','k','b','d','g','ch','jh','th','dh',
's','z','sh','zh','f','v','hh','tq',},
'V':{'aa','ae','ay','aw','ao','oy','ow','eh','ey',
'er','ah','uw','uh','ih','iy','m','n','ng','l',
'w','r','y','nx','en','em','eng','el','aan',
'aen','ayn','awn','aon','oyn','own','ehn','eyn',
'ern','ahn','uwn','ihn','iyn','uhn'},
'SIL':{'SIL'}}
TIMIT_DIR = r'C:\Users\michael\Documents\Data\TIMIT_fixed'
BUCKEYE_DIR = r'C:\Users\michael\Documents\Data\VIC\Speakers'
TIMIT_DRS = ['DR1','DR2','DR3','DR4','DR5','DR6','DR7','DR8']
def align_dialog_info(words, phones, wavs):
dialogs = {}
for p in words:
name = os.path.splitext(os.path.split(p)[1])[0]
dialogs[name] = {'words':p}
for p2 in phones:
name = os.path.splitext(os.path.split(p2)[1])[0]
dialogs[name]['phones'] = p2
for p3 in wavs:
name = os.path.splitext(os.path.split(p3)[1])[0]
dialogs[name]['wav'] = p3
return dialogs
def read_phones(path, dialect = 'timit', sr = None):
output = list()
with open(path,'r') as file_handle:
if dialect == 'timit':
for line in file_handle:
l = line.strip().split(' ')
start = float(l[0])
end = float(l[1])
phone = l[2]
if sr is not None:
start /= sr
end /= sr
output.append([phone, start, end])
elif dialect == 'buckeye':
f = re.split("#\r{0,1}\n",file_handle.read())[1]
flist = f.splitlines()
begin = 0.0
for l in flist:
line = re.split("\s+\d{3}\s+",l.strip())
end = float(line[0])
label = re.split(" {0,1};| {0,1}\+",line[1])[0]
output.append([label,begin,end])
begin = end
else:
raise(NotImplementedError)
return output
class SpeechClassifierLandmarks(object):
_freq_lims = (80,4000)
_num_bands = 4
def __init__(self, parameters = 'timit'):
pass
def train(self,dialect = 'timit'):
#assume training on TIMIT
if dialect == 'timit':
path = TIMIT_DIR
train_dir = os.path.join(path, 'TRAIN')
segment_set = TIMIT_SEGMENT_SET
wrdExt = '.wrd'
phnExt = '.phn'
elif dialect == 'buckeye':
train_dir = BUCKEYE_DIR
segment_set = BUCKEYE_SEGMENT_SET
wrdExt = '.words'
phnExt = '.phones'
words = []
phones = []
wavs = []
for root, subdirs, files in os.walk(train_dir):
for f in files:
if f.lower().endswith(wrdExt):
words.append(os.path.join(root,f))
elif f.lower().endswith(phnExt):
phones.append(os.path.join(root,f))
elif f.lower().endswith('.wav'):
wavs.append(os.path.join(root,f))
dialogs = align_dialog_info(words, phones,wavs)
values = [defaultdict(list) for i in range(self._num_bands)]
for (i,(d,info)) in enumerate(dialogs.items()):
print(i,len(dialogs), d)
ampenvs = Envelopes(info['wav'],self._freq_lims, self._num_bands)
#self.update_range(ampenvs)
phones = read_phones(info['phones'],sr=ampenvs.sampling_rate,dialect=dialect)
previous = None
for p in phones:
#if p[0] in segment_set['V']:
# phone_class = 'V'
#else:
# phone_class = 'NV'
for k,v in segment_set.items():
if p[0] in v:
phone_class = k
break
else:
continue
print(p)
bands = ampenvs[p[1],p[2]]
P.plot(bands)
P.show()
break
class SpeechClassifier(object):
"""
Classify a wav file into speech components. Currently doesn't have
high accuracy
Parameters
----------
parameters : str
Parameters to use for the classifier, can be 'timit', 'buckeye'
or 'new'
"""
_num_coeffs = 3
_use_priors = True
win_len = 0.025
time_step = 0.01
freq_lims = (80,4000)
def __init__(self, parameters = 'timit'):
self._states = ['V','C','SIL', 'S']
if parameters == 'timit':
self._guass_values = {'S': [106.69858295806942, 13.788101941585037,
-2.7157271472866578, 10.436144308052297,
-4.7651752344952119, 16.40640775462192,
-0.20527335955567133, 10.849061304730101,
-0.47118579058647031, 8.2687453916999427,
0.37763342640775416, 9.433485919731508,
1.7238440536507622, 12.415721048856815,
-4.1644962118506568, 11.289866989968807,
-1.087066518341647, 12.346358683908502,
122.57544878814136, 98.409504759980436,
5.5296286574132241, 4.825418319490506],
'C': [98.669101415828138, 12.969681154558939,
-31.383989078115722, 15.356287596393775,
-3.6024466565312148, 13.052742055994578,
2.8805856059435517, 14.843254671285834,
1.8735038149092653, 13.428119472685694,
0.20547684191851148, 10.865016028080774,
-1.1094621990978419, 21.253209202475396,
4.5868033781563868, 16.109931025613381,
-0.30797358082648352, 17.285745939222192,
27.162001644890676, 86.296812928620383,
-0.0012759997855903513, 3.6931402207970461],
'SIL': [67.260125656339383, 16.364258978070932,
-23.994604651768732, 11.8282996527255,
-4.1936611952894509, 12.377333899724508,
0.1952864254960883, 20.186446606791733,
-1.7874594107406285, 10.183650016903522,
-1.24398059928302, 11.261290783446572,
9.3859576265900202, 23.711655267287838,
0.0062866497091458437, 14.195477763466464,
-1.247745911984363, 17.131753916206851,
25.216211813260664, 85.720055043911827,
-0.94090835139375628, 4.0782367578024195],
'V': [121.79361965642315, 12.147622531365982,
-10.568844151240489, 10.181818034234762,
-9.0190738510271338, 18.72513361330407,
-1.8221555596816867, 10.023815472882168,
0.1786402424590863, 6.5160231749873745,
0.52604413889141155, 8.1725722837946346,
-5.9174582469826369, 10.497004578953135,
-1.4707037149398738, 9.7730479831706152,
1.2984439177165692, 9.7936713622437921,
134.39679444095819, 109.7964618588956,
5.8068054070370332, 4.6899801286927882]}
self._ranges = [[30.120333629226856, 159.66897139827836],
[-77.835593153485163, 32.870167900828804],
[-69.726366723368699, 62.529506799142993],
[-77.474592423943136, 89.850673096664636],
[-62.107330271953956, 63.23772724196985],
[-74.249433084205918, 65.588485849292454],
[-127.00926808091104, 132.30046306413362],
[-87.65436477838243, 89.160571452999051],
[-113.44799223511505, 110.40706447575778],
[0.0, 592.59259259259261],
[-39.999565683801926, 26.048043267524864]]
self._priors = {'SIL': 0.23807641179745634,
'S': 0.15020121253448,
'V': 0.37767202810265377,
'C': 0.23405034756540993}
self._initial_probs = {'SIL': 1.0, 'S': 0.0, 'V': 0.0, 'C': 0.0}
self._transitions = {'C': {'C': 0.07291775798847565,
'V': 0.6127815610267155,
'SIL': 0.15914091147197484,
'S': 0.15515976951283394},
'V': {'C': 0.2669533725667723,
'V': 0.06088727931190584,
'SIL': 0.27451335445903124,
'S': 0.39764599366229064},
'SIL': {'C': 0.8465709994994159,
'V': 0.03153679292507926,
'SIL': 0.019189053896212248,
'S': 0.1027031536792925},
'S': {'C': 0.11725506876506452,
'V': 0.6145611796398696,
'SIL': 0.1881468878491422,
'S': 0.08003686374592373}}
elif parameters == 'buckeye':
self._guass_values = {'V': [128.90264866566679, 15.14317575840515,
1.4871098383116084, 10.323758763127435,
-6.3106813016323215, 14.045080900462775],
'SIL': [86.76749073542328, 12.076001681199791,
-18.785544623133003, 7.3577038261119636,
4.9374633668260719, 10.317154208644],
'C': [112.54976709996274, 17.433637984244346,
-22.861301032841272, 19.124160819690392,
6.1901419921157217, 14.163979962615953],
'S': [115.88482662589307, 14.987730620486317,
4.0720428025866937, 10.516924467062324,
5.2798333599796852, 13.508389353927729]}
self._ranges = [[42.310066393516529, 188.05789726778681],
[-79.370596330143442, 48.209406532240891],
[-75.96161099579318, 82.987855827814656]]
self._priors = {'V': 0.4063921598826558,
'SIL': 0.16116159722743895,
'C': 0.2656405574321356,
'S': 0.16680568545776964}
self._initial_probs = {'V': 0.2627450980392157,
'SIL': 0.35294117647058826,
'C': 0.15294117647058825,
'S': 0.23137254901960785}
self._transitions = {'V': {'V': 0.08651136312395726,
'SIL': 0.024600374261622402,
'C': 0.4763550074401407,
'S': 0.41253325517427963},
'SIL': {'V': 0.3940983122207039,
'SIL': 0.06355947852420873,
'C': 0.29848210658492447,
'S': 0.24386010267016286},
'C': {'V': 0.5984959206381872,
'SIL': 0.034218503106175024,
'C': 0.21540913958428531,
'S': 0.1518764366713524},
'S': {'V': 0.6420108304718991,
'SIL': 0.03064562098777161,
'C': 0.21645109775021149,
'S': 0.11089245079011777}}
else:
self._guass_values = {x: [0 for i in range((self._num_coeffs*3+2)*2)]
for x in self._states}
self._ranges = [[np.inf,-np.inf] for x in range((self._num_coeffs*3+2))]
self._priors = {x: 0 for x in self._states}
self._transitions = {x: {y: 0 for y in self._states} for x in self._states}
self._initial_probs = {x: 0 for x in self._states}
def train_range(self, train_dir):
wavs = []
for root, subdirs, files in os.walk(train_dir):
for f in files:
if f.lower().endswith('.wav'):
wavs.append(os.path.join(root,f))
for f in wavs:
mfcc, pitch, harmonicity = self.get_features(f)
self.update_range(mfcc, pitch, harmonicity)
def get_features(self, path):
mfcc = Mfcc(path, self.freq_lims, self._num_coeffs, 0.025,
0.01, num_filters = 26, use_power = True, deltas = True)
pitch = Pitch(path, self.time_step, (75,600))
pitch.process()
harmonicity = Harmonicity(path, self.time_step, 75)
harmonicity.process()
return mfcc, pitch, harmonicity
def update_range(self, mfcc, pitch, harmonicity):
num_cc = self._num_coeffs
if mfcc._deltas:
num_cc *= 3
coeffs = mfcc.to_array()
for i in range(num_cc):
t = coeffs[:,i]
try:
mini = np.min(t)
if mini < self._ranges[i][0]:
self._ranges[i][0] = mini
except:
continue
maxi = np.max(t)
if maxi > self._ranges[i][1]:
self._ranges[i][1] = maxi
pitches = pitch.to_array()
mini = np.min(pitches)
if mini < self._ranges[num_cc][0]:
self._ranges[num_cc][0] = mini
maxi = np.max(pitches)
if maxi > self._ranges[num_cc][1]:
self._ranges[num_cc][1] = maxi
harms = harmonicity.to_array()
mini = np.min(harms)
if mini < self._ranges[num_cc+1][0]:
self._ranges[num_cc+1][0] = mini
maxi = np.max(harms)
if maxi > self._ranges[num_cc+1][1]:
self._ranges[num_cc+1][1] = maxi
def train(self,dialect = 'timit'):
#assume training on TIMIT
if dialect == 'timit':
path = TIMIT_DIR
train_dir = os.path.join(path, 'TRAIN')
segment_set = TIMIT_SEGMENT_SET
wrdExt = '.wrd'
phnExt = '.phn'
elif dialect == 'buckeye':
train_dir = BUCKEYE_DIR
segment_set = BUCKEYE_SEGMENT_SET
wrdExt = '.words'
phnExt = '.phones'
words = []
phones = []
wavs = []
for root, subdirs, files in sorted(os.walk(train_dir)):
for f in files:
if f.lower().endswith(wrdExt):
words.append(os.path.join(root,f))
elif f.lower().endswith(phnExt):
phones.append(os.path.join(root,f))
elif f.lower().endswith('.wav'):
wavs.append(os.path.join(root,f))
dialogs = align_dialog_info(words, phones,wavs)
values = [defaultdict(list) for i in range(self._num_coeffs*3+2)]
for (i,(d,info)) in enumerate(dialogs.items()):
print(i,len(dialogs), d)
mfcc, pitch, harmonicity = self.get_features(info['wav'])
self.update_range(mfcc, pitch, harmonicity)
phones = read_phones(info['phones'],sr=mfcc.sampling_rate,dialect=dialect)
previous = None
for p in phones:
#if p[0] in segment_set['V']:
# phone_class = 'V'
#else:
# phone_class = 'NV'
for k,v in segment_set.items():
if p[0] in v:
phone_class = k
break
else:
continue
if previous is None:
self._initial_probs[phone_class] += 1
else:
self._transitions[previous][phone_class] += 1
coeffs = mfcc[p[1],p[2]]
for i in range(self._num_coeffs*3):
t = [x[i] for x in coeffs]
values[i][phone_class].extend(t)
pitches = pitch[p[1],p[2]]
t = [x for x in pitches]
values[self._num_coeffs*3][phone_class].extend(t)
harms = harmonicity[p[1],p[2]]
t = [x for x in harms]
values[self._num_coeffs*3+1][phone_class].extend(t)
previous = phone_class
#break
#Find out the max of each value
total = 0
for k in self._guass_values.keys():
#if k not in ['V','NV']:
# continue
for i in range(self._num_coeffs*3+2):
ind = i * 2
self._guass_values[k][ind] = np.mean(values[i][k])
self._guass_values[k][ind+1] = np.std(values[i][k])
self._priors[k] = len(values[0][k])
total += self._priors[k]
for k,v in self._priors.items():
self._priors[k] = v/total
initial_total = sum(self._initial_probs.values())
for k,v in self._initial_probs.items():
self._initial_probs[k] = v/initial_total
for k,v in self._transitions.items():
k_total = sum(v.values())
for k2, v2 in v.items():
v[k2] = v2/k_total
def calc_prob(self, feature_values, category,use_pitch_info):
if self._use_priors:
val = self._priors[category]
else:
val = 1 /len(self._states)
for i in range(self._num_coeffs*3+2):
if not use_pitch_info and i == self._num_coeffs*3+2:
break
ind = i * 2
mean = self._guass_values[category][ind]
var = self._guass_values[category][ind+1]**2
val = feature_values[i]
if isinstance(val, list):
val = val[0]
prob = np.exp(-1*(val-mean)**2/(2*var))/(np.sqrt(np.pi*var))
val *= prob
return val
def predict(self,feature_values, use_pitch_info=False):
if len(feature_values) != self._num_coeffs*3+2:
return None
best_value = 0
best_cat = None
for k in self._guass_values.keys():
if k not in self._states:
continue
if self._priors[k] is None:
return None
val = self.calc_prob(feature_values, k,use_pitch_info)
if val > best_value:
best_value = val
best_cat = k
return best_cat
def find_vowels(self, path, speaker_ranges, num_vowels = 1, debug = False):
vnv = self.predict_file(path, norm_amp = True,
alg = | |
<gh_stars>1-10
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GObject, GLib, Gdk
import dataConnection, Installer, installThread
from Functions import functions, systemTools
import os
tr = functions.tr
class userWindow(Gtk.Window):
"""Graphical User Interface. Use GTK3 and glade."""
dataConnect = None
builder = None
companyId = None
companyName = None
codeToSearch = None
userCodeFlag = False
lastMessage = ''
messagePos = 1
def __init__(self):
self.dataConnect = dataConnection.dataConnection()
self.installation = Installer.Installer() #Instance of Installer class
functions.logging.debug(tr('Loading Interface'))
self.builder = Gtk.Builder()
path = os.path.dirname(os.path.abspath(__file__))
gladeFile = path + "\\OrangeInstallerGUI.glade"
if (self.installation.getCurrentSystem() == 'Linux'):
gladeFile = path + "/OrangeInstallerGUI.glade"
self.builder.add_from_file(gladeFile)
# translate window's labels
for obj in self.builder.get_objects():
if obj.find_property("placeholder_text") and obj.get_property("placeholder_text"):
obj.set_property("placeholder_text", tr(obj.get_property("placeholder_text")))
elif obj.find_property("label") and obj.get_property("label"):
obj.set_property("label", tr(obj.get_property("label")))
elif obj.find_property("text") and obj.get_property("text"):
obj.set_property("text", tr(obj.get_property("text")))
self.handlers = {
"delete-event": Gtk.main_quit,
"userExit": self.userExit,
"nextWindow1": self.nextWindow,
"searching": self.search,
"selectRow": self.selectRow,
"prevWindow": self.prevWindow,
"changeInstallDirectory": self.changeInstallDirectory,
"readyToInstall": self.readyToInstall,
"startInstall": self.startInstall,
"userFinish": self.userFinished,
"messageOk" : self.hideMessage,
"showAdvOpt": self.showOrHideAdvOpt,
"initialClick": self.initialClick,
"insertCode":self.insertCode,
"shortcutButtonToggled": self.shortcutButtonToggled,
"companyFolderSetter" : self.companyFolderSetter,
"nextNotify": self.nextNotify,
"closeNotify": self.closeNotify,
"showReport": self.showReport,
"copyClipboard": self.copyClipboard,
"saveReport":self.saveReport
}
self.builder.connect_signals(self.handlers)
#load objects for working.
objects = ["initialwindow", "window", "window1", "window2", "message", "treeview", "liststore", \
"statusbar", "statusbarInstall", "treeview-selection", "companyLabel", "installButton", \
"installPathLabel", "folderChooser", "inputSVNUser", "inputSVNPassword", "notebook", \
"finishButton", "spinner1", "installLabel", "revadvoptions", "codebox", "initial", \
"opt1install", "opt2svn", "opt3report","opt4shortcut", "opt5console", "advoptions", "messagebar", "opt6companyname", "finalwindows", "report", \
"reportBuffer", "statusView", "bufferInstall", "notificationRevealer","textNotification", "notifyAnimation","infoBarButton", "showFinalReport", \
"copyClipboardReportButton", "saveReportButton"]
# 'buttton1' is Previus button.
for obj in objects:
setattr(self, obj, self.builder.get_object(obj))
self.advOptInitial()
self.actualWindowPos = 0 #First window, this is an index for navigator
functions.logging.debug(tr('GUI Loaded'))
self.initialwindow.show_all()
#self.window.show_all()
#check database Status
dbcheck = self.dataConnect.testConnection()
if dbcheck:
self.communicator(tr("Connection to DB: OK"))
self.messagebar.set_text(tr("Connection to DB: OK"))
else:
self.communicator(tr("Fail to connect with DB"))
self.messagebar.set_text(tr("Fail to connect with DB"))
#List all results for search and show up in screen
renderer = Gtk.CellRendererText()
column = Gtk.TreeViewColumn("Company", renderer, text=0)
self.treeview.append_column(column)
#push initial notifications
self.actualNotify = 0
self.installation.checkNotifications() #0 = key
self.checkAvailableNotifications()
"""User press exit button"""
def userExit(self, widget):
functions.exitProgram(2) #End by user
sys.exit()
"""User press Finish button"""
def userFinished(self, widget):
functions.exitProgram(0) #Installation Finished
sys.exit()
"""For next buttons"""
def nextWindow(self, widget):
#check if company was selected
if (self.companyId == None and self.companyName != None):
resultOfSearch = self.dataConnect.getDataSearch('company', 'name', self.companyName, "*")
self.companyId = resultOfSearch[0][0]
if (self.companyId == None and self.companyName == None):
self.communicator(tr("First you must choose a Company"))
else:
nextWindowPos = self.actualWindowPos + 1
if (self.actualWindowPos == 1):
self.installation.setInstallPath(self.installation.getInstallPath(), self.companyName)
self.window.hide()
self.installation.initialization(self.companyId) #If company was picked so we initialize installer
self.preparateWin1()
self.window1.show_all()
if (self.actualWindowPos == 2):
self.window1.hide()
self.window2.show_all()
if (self.actualWindowPos == 3): #THIS WINDOWS IS NOT EXIST YET!!
self.window2.hide()
self.window3.show_all()
self.actualWindowPos = nextWindowPos #is more clearly
"""Show company name in screen and install patch"""
def preparateWin1(self):
self.companyLabel.set_text(self.companyName)
self.installPathLabel.set_text(tr("Default Path: ") + self.installation.getInstallPath())
"""For previous buttons (return buttons)"""
def prevWindow(self, widget):
prevWindowPos = self.actualWindowPos - 1
if (self.actualWindowPos == 2):
self.window1.hide()
if self.userCodeFlag == True: #If code user was loaded, the system never shows the search company window
self.initialwindow.show_all()
else:
self.window.show_all()
self.installation.setInstallPath(self.installation.getInstallPath()) #For reset path if i go to first windows.
if (self.actualWindowPos == 3):
self.window2.hide()
self.window.show_all()
self.actualWindowPos = prevWindowPos #is more clearly
"""Take one message and show in screen"""
def communicator(self, message):
if (self.actualWindowPos == 1):
self.statusbar.push(1, message)
if (self.actualWindowPos == 3): #third screen
self.statusbarInstall.push(1, message)
if self.lastMessage != message:
self.lastMessage = message
self.bufferInstall.insert_at_cursor(message + "\n")
self.statusView.set_buffer(self.bufferInstall)
self.bufferInstall.create_mark("end", self.bufferInstall.get_end_iter(), False)
mark = self.bufferInstall.get_mark("end")
self.bufferInstall.move_mark(mark,self.bufferInstall.get_end_iter())
self.statusView.scroll_mark_onscreen(mark)
"""Engine of search bar. Through this, one company will be selected"""
def search(self, widget):
imputTest = widget.get_text()
resultOfSearch = self.dataConnect.getDataSearch('company', 'name', imputTest, "*")
#clear treeView
self.liststore.clear()
if (len(resultOfSearch) == 0):
self.communicator(tr("Company not Found, try again"))
if (len(resultOfSearch) > 1):
self.communicator(tr("Too many results, choose one from this list"))
for i in range(len(resultOfSearch)):
if (i > 9):
self.communicator(tr("Some result not shown in screen."))
break
self.liststore.append(["id: %i - %s" % (resultOfSearch[i][0], resultOfSearch[i][1])])
if (len(resultOfSearch) == 1):
self.liststore.append(["id: %i - %s" % (resultOfSearch[0][0], resultOfSearch[0][1])])
self.communicator(tr("Company Chosen"))
self.companyId = resultOfSearch[0][0] #[0] for unique row, [0] for Id
self.companyName = resultOfSearch[0][1]
"""For pick company from list on screen"""
def selectRow(self, widget):
model, colum = widget.get_selected()
if model and colum:
if len(model[colum][0].split(" ")) > 1:
self.companyId = model[colum][0].split(" ")[1]
self.companyName = model[colum][0].split(" ")[-1]
else:
self.companyName = model[colum][0]
"""If directory path change, this set the new one"""
def changeInstallDirectory(self, widget):
if systemTools.isWindows():
newPath = self.folderChooser.get_uri().split('file:///')
else: newPath = self.folderChooser.get_uri().split('file://')
newPath = newPath[1] #Discard first split
newPath = newPath.replace("%20", " ") #Fix spaces
self.installation.pathThroughWidget = True
self.installation.setInstallPath(newPath)
self.installation.pathThroughWidget = False
self.installPathLabel.set_text(tr("Install Path: ") + self.installation.getInstallPath())
"""Check if the conditions for starting installation are ready or not"""
def readyToInstall(self, widget):
if (self.inputSVNUser.get_text_length() !=0 and self.inputSVNPassword.get_text_length() != 0):
self.installButton.set_opacity(1)
self.installButton.set_sensitive(True)
else:
if (self.installButton.get_sensitive() == True):
self.installButton.set_opacity(0.5)
self.installButton.set_sensitive(False)
"""Start all installation Engine"""
def startInstall(self, widget):
self.installPathLabel.set_text(tr("Checking Username and Password from SVN"))
self.installation.setSvnControlFromOut()
self.installation.svn.svnUserName = self.inputSVNUser.get_text()
self.installation.svn.svnPassword = self.inputSVNPassword.get_text()
if self.installation.svn.checkCredentials() == True:
self.installPathLabel.set_text(tr("Great Success!"))
self.nextWindow(widget)
if self.userCodeFlag:
self.initialwindow.hide()
self.installation.startInstall()
self.installStatus()
self.checkProgress()
else:
self.installation.setInstallPath(self.installation.getInstallPath()) #For reset path after credentials error
self.installPathLabel.set_text(tr(self.installation.svn.checkCredentials()))
# translate secondary text here
self.message.set_property("secondary_text", tr(self.message.get_property("secondary_text")))
self.message.show_all()
"""Restart refresh timer"""
def installStatus(self):
timeout = GObject.timeout_add(10000, self.imagesSlides)
"""This is for refresh status of installation and show it on screen (DECREPT)"""
def checkProgress(self):
GObject.timeout_add(1000, self.checkProgress)
catchProgress = self.installation.getMsgBuffer()
self.communicator(catchProgress)
"""Pass images over installation while wait it"""
def imagesSlides(self):
if (self.notebook.get_current_page() == (self.notebook.get_n_pages() - 1) ):
self.notebook.set_current_page(0) # back to first picture
else: self.notebook.next_page()
if(self.installation.checkStatus() == True):
self.finishButton.set_opacity(1)
self.finishButton.set_sensitive(True)
self.showFinalReport.set_opacity(1)
self.showFinalReport.set_sensitive(True)
self.spinner1.stop()
self.installLabel.set_text(tr('Installation Finished'))
self.installation.finalReportAppend(tr('Installation Finished'))
################# New final Report windows
if self.installation.showReportAfterInstall and self.opt3report.get_active() == True:
self.showReportWindows()
#################
else: self.installStatus()
def hideMessage(self, widget):
self.message.hide()
def showOrHideAdvOpt(self, widget):
if self.revadvoptions.get_reveal_child() == True:
self.revadvoptions.set_reveal_child(False)
else: self.revadvoptions.set_reveal_child(True)
def insertCode(self, widget):
if self.codebox.get_text_length() == 8:
self.codeToSearch = self.dataConnect.getDataSearch('OnlyValidCodes', 'companykey', self.codebox.get_text(), "*")
if self.codeToSearch:
self.companyId = self.codeToSearch[0][3]
search = self.dataConnect.getData('company', self.companyId, "name")
search = search[0][0]
self.companyName = search
#For personal notifications, its mean, when i found a company
notiCount = self.checkAvailableNotifications()
self.installation.checkNotifications(self.companyId)
self.checkAvailableNotifications()
if notiCount != False:
notiCount = notiCount - self.actualNotify
for n in range(notiCount):
self.nextNotify()
##
self.messagebar.set_text(tr("Code for company: ") + "{}".format(self.companyName))
print (tr("Code for company: ")) + "{}".format(self.companyName)
self.initial.set_sensitive(True)
else:
self.messagebar.set_text(tr("Invalid or Expired Code"))
else:
self.initial.set_sensitive(False)
self.messagebar.set_text("")
def initialClick(self, widget):
if self.companyName == "OpenCode":
self.userCodeFlag = False
self.initialwindow.hide()
self.window.show_all()
self.actualWindowPos = 1
else:
self.installation.setInstallPath(self.installation.getInstallPath(), self.companyName)
self.userCodeFlag = True
if self.codeToSearch[0][1]:
self.inputSVNUser.set_text(self.codeToSearch[0][1])
if self.codeToSearch[0][2]:
self.inputSVNPassword.set_text(self.codeToSearch[0][2])
self.installation.initialization(self.companyId) #Needed, because this initialization starts when you switch to window1
self.actualWindowPos = 2 #defined in two because, 'startInstall' make 'nextWindow' if SVN credentials are valid
if self.advoptions.get_active() == True:
self.workWithAdvancedOptions(widget)
else: #this is the behavior standard if not selected advanced options
if self.userCodeFlag:
self.messagebar.set_text(tr("Checking Username and Password from SVN"))
self.startInstall(widget)
else:
self.window.show_all()
"""Initializate all values of advanced options by default"""
def advOptInitial(self):
self.opt1install.set_active(False) #Select install path
self.opt2svn.set_active(False) #input svn credentials
self.opt3report.set_active(False) #show report after installation
self.opt3report.set_sensitive(True)
self.opt4shortcut.set_active(True) #create shortcut after install, only windows.
self.opt5console.set_active(False)
self.opt6companyname.set_active(True) #By Default, the last folder must be the company name
def workWithAdvancedOptions(self, widget):
shouldStartInstall = True
if self.opt1install.get_active() or self.opt2svn.get_active(): #Select install path
self.actualWindowPos = 2
if self.opt1install.get_active() == False:
self.folderChooser.set_sensitive(False)
else:
self.folderChooser.set_sensitive(True)
self.preparateWin1()
self.window1.show_all()
self.initialwindow.hide()
shouldStartInstall = False
if not self.opt2svn.get_active():#input svn credentials
self.inputSVNUser.set_sensitive(False)
self.inputSVNPassword.set_sensitive(False)
else:
self.inputSVNUser.set_sensitive(True)
self.inputSVNPassword.set_sensitive(True)
if self.opt3report.get_active(): #show report after installation
self.installation.showReportAfterInstall = True
if self.opt4shortcut.get_active(): #create shortcut after install, only windows.
self.installation.createShortcut = True
if self.opt5console.get_active(): #create shortcut with --console
self.installation.openConsole = True
else:
self.installation.openConsole = False
else:
self.installation.createShortcut = False
if self.opt6companyname.get_active(): #It's a copy of def companyFolderSetter().
self.installation.disableLastFolderAsCompanyName = False
else:
self.installation.disableLastFolderAsCompanyName = True
if shouldStartInstall:
self.messagebar.set_text(tr("Checking Username and Password from SVN"))
self.startInstall(widget)
def shortcutButtonToggled(self, widget):
if self.opt4shortcut.get_active():
self.opt5console.set_sensitive(True)
else:
self.opt5console.set_active(False)
self.opt5console.set_sensitive(False)
def companyFolderSetter(self, widget):
if self.opt6companyname.get_active():
self.installation.disableLastFolderAsCompanyName = False
else:
self.installation.disableLastFolderAsCompanyName = True
def nextNotify(self, widget=None):
self.notifyAnimation.set_reveal_child(False)
GObject.timeout_add(500,self.showAgainNotify)
def showAgainNotify(self): #this function is only for animation
totalMsg = len(self.installation.notificationsList)
self.actualNotify += 1
if self.actualNotify >= totalMsg:
self.actualNotify = 0
if totalMsg != 0:
msg = self.installation.notificationsList[self.actualNotify][self.messagePos]
self.textNotification.set_markup(msg)
self.notifyAnimation.set_reveal_child(True)
return False
def closeNotify(self,widget, userDate=1):
myActualList = self.installation.notificationsList
myActualList.remove(myActualList[self.actualNotify]) #Delete from the list actual Notify
self.validateNextNotifyButton() #call the next for move on.
self.nextNotify()
def validateNextNotifyButton(self):
if len(self.installation.notificationsList) <=1:
self.infoBarButton.set_opacity(0.5)
self.infoBarButton.set_sensitive(False)
if len(self.installation.notificationsList) >1:
self.infoBarButton.set_opacity(1)
self.infoBarButton.set_sensitive(True)
if len(self.installation.notificationsList) ==0:
self.notificationRevealer.set_reveal_child(False)
def checkAvailableNotifications(self):
| |
'':
self.addExdata(self._buf, False)
self._bufSize = int(size) << 20
self._buf = ''
return self
def closeBuffer(self):
return self.openBuffer(0)
def beginRebuild(self):
self.execCommand({'cmd': CMD_INDEX_REBUILD, 'arg1': 0}, CMD_OK_DB_REBUILD)
self._rebuild = True
return self
def endRebuild(self):
if self._rebuild is True:
self._rebuild = False
self.execCommand({'cmd': CMD_INDEX_REBUILD, 'arg1': 1}, CMD_OK_DB_REBUILD)
return self
def stopRebuild(self):
try:
self.execCommand({'cmd': CMD_INDEX_REBUILD, 'arg1': 2}, CMD_OK_DB_REBUILD)
self._rebuild = False
except XSException as e:
if e.getCode() is not CMD_ERR_WRONGPLACE:
raise XSException(*e.args)
return self
def setDb(self, name):
self.execCommand({'cmd': CMD_INDEX_SET_DB, 'buf': name}, CMD_OK_DB_CHANGED)
return self
def flushLogging(self):
try:
self.execCommand(CMD_FLUSH_LOGGING, CMD_OK_LOG_FLUSHED)
except XSException as e:
if e.getCode() is CMD_ERR_BUSY:
return False
raise XSException(*e.args)
return True
def flushIndex(self):
try:
self.execCommand(CMD_INDEX_COMMIT, CMD_OK_DB_COMMITED)
except XSException as e:
if e.getCode() == CMD_ERR_BUSY or e.getCode() == CMD_ERR_RUNNING:
return False
raise XSException(*e.args)
return True
def getCustomDict(self):
res = self.execCommand(CMD_INDEX_USER_DICT, CMD_OK_INFO)
return res.buf
def setCustomDict(self, content):
cmd = {'cmd': CMD_INDEX_USER_DICT, 'arg1': 1, 'buf': content}
self.execCommand(cmd, CMD_OK_DICT_SAVED)
def close(self, ioerr=False):
self.closeBuffer()
super(XSIndex, self).close(ioerr)
def _appendBuffer(self, buf):
self._buf += buf
if len(self._buf) >= self._bufSize:
self.addExdata(self._buf, False)
self._buf = ''
def __del__(self):
if self._rebuild is True:
try:
self.endRebuild()
except Exception as e:
pass
for srv in self._adds:
srv.close()
self._adds = []
super(XSIndex, self).__del__()
class XSSearch(XSServer):
PAGE_SIZE = 10
LOG_DB = 'log_db'
_defaultOp = CMD_QUERY_OP_AND
_prefix = None
_fieldSet = None
_resetScheme = False
_query = None
_terms = []
_count = None
_lastCount = None
_highlight = None
_curDb = None
_curDbs = []
_lastDb = None
_lastDbs = []
_facets = []
_limit = 0
_offset = 0
_charset = 'UTF-8'
def open(self, conn):
super(XSSearch, self).open(conn)
self._prefix = {}
self._fieldSet = False
self._lastCount = False
def setCharset(self, charset):
self._charset = charset.upper()
if self._charset == 'UTF8':
self._charset = 'UTF-8'
return self
def setFuzzy(self, value=True):
self._defaultOp = CMD_QUERY_OP_OR if value is True else CMD_QUERY_OP_AND
return self
def setCutOff(self, percent, weight=0):
percent = max(0, min(100, int(percent)))
weight = max(0, (int(weight * 10) & 255))
cmd = XSCommand(CMD_SEARCH_SET_CUTOFF, percent, weight)
self.execCommand(cmd)
return self
def setRequireMatchedTerm(self, value=True):
arg1 = CMD_SEARCH_MISC_MATCHED_TERM
arg2 = 1 if value is True else 0
cmd = XSCommand(CMD_SEARCH_SET_MISC, arg1, arg2)
self.execCommand(cmd)
return self
def setAutoSynonyms(self, value=True):
flag = CMD_PARSE_FLAG_BOOLEAN | CMD_PARSE_FLAG_PHRASE | CMD_PARSE_FLAG_LOVEHATE
if value is True:
flag |= CMD_PARSE_FLAG_AUTO_MULTIWORD_SYNONYMS
cmd = {'cmd': CMD_QUERY_PARSEFLAG, 'arg': flag}
self.execCommand(cmd)
return self
def setSynonymScale(self, value):
arg1 = CMD_SEARCH_MISC_SYN_SCALE
arg2 = max(0, (int(value * 100) & 255))
cmd = XSCommand(CMD_SEARCH_SET_MISC, arg1, arg2)
self.execCommand(cmd)
return self
def getAllSynonyms(self, limit=0, offset=0, stemmed=False):
page = pack('II', int(offset), int(limit)) if limit > 0 else ''
cmd = {'cmd': CMD_SEARCH_GET_SYNONYMS, 'buf1': page}
cmd['arg1'] = 1 if stemmed == True else 0
res = self.execCommand(cmd, CMD_OK_RESULT_SYNONYMS)
ret = {}
if res.buf:
for line in res.buf.split("\n"):
value = "\t".join(line)
key = value.pop(0)
ret[key] = value
return ret
def getQuery(self, query =None):
query = '' if query is None else self.preQueryString(query)
cmd = XSCommand(CMD_QUERY_GET_STRING, 0, self._defaultOp, query)
res = self.execCommand(cmd, CMD_OK_QUERY_STRING)
if 'VALUE_RANGE' in res.buf:
regex = '/(VALUE_RANGE) (\d+) (\S+) (\S+?)(?=\))/'
res.buf = re.sub(regex, self.formatValueRange, res.buf)
if 'VALUE_GE' in res.buf or 'VALUE_LE' in res.buf:
regex = '/(VALUE_[GL]E) (\d+) (\S+?)(?=\))/'
res.buf = re.sub(regex, self.formatValueRange, res.buf)
return XS.convert(res.buf, self._charset, 'UTF-8')
def setQuery(self, query):
self.clearQuery()
if query is not None:
self._query = query
self.addQueryString(query)
return self
def setMultiSort(self, fields, reverse=False, relevance_first=False):
if not isinstance(fields, dict):
return self.setSort(fields, not reverse, relevance_first)
buf = ''
for key in fields:
value = fields[key]
if isinstance(value, bool):
vno = self.xs.getField(key, True).vno
asc = value
else:
vno = self.xs.getField(value, True).vno
asc = False
if vno != XSFieldScheme.MIXED_VNO:
buf += '%s%s' % (chr(vno), chr(1 if asc else 0))
if not buf:
_type = CMD_SORT_TYPE_MULTI
if relevance_first:
_type |= CMD_SORT_FLAG_RELEVANCE
if not reverse:
_type |= CMD_SORT_FLAG_ASCENDING
cmd = XSCommand(CMD_SEARCH_SET_SORT, _type, 0, buf)
self.execCommand(cmd)
return self
def setSort(self, field, asc=False, relevance_first=False):
if not isinstance(fields, dict):
return self.setSort(fields, asc, relevance_first)
if field is None:
cmd = XSCommand(CMD_SEARCH_SET_SORT, CMD_SORT_TYPE_RELEVANCE)
else:
_type = CMD_SORT_TYPE_VALUE
if relevance_first:
_type |= CMD_SORT_FLAG_RELEVANCE
if asc:
_type |= CMD_SORT_FLAG_ASCENDING
vno = self.xs.getField(field, True).vno
cmd = XSCommand(CMD_SEARCH_SET_SORT, _type, vno)
self.execCommand(cmd)
return self
def setDocOrder(self, asc=False):
_type = CMD_SORT_TYPE_DOCID | (CMD_SORT_FLAG_ASCENDING if asc else 0)
cmd = XSCommand(CMD_SEARCH_SET_SORT, _type)
self.execCommand(cmd)
return self
def setCollapse(self, field, num=1):
vno = XSFieldScheme.MIXED_VNO if field is None else self.xs.getField(field, True).vno
_max = min(255, int(num))
cmd = XSCommand(CMD_SEARCH_SET_COLLAPSE, _max, vno)
self.execCommand(cmd)
return self
def addRange(self, field, _from, to):
if not _from:
_from = None
if not to:
to = None
if _from is not None or to is not None:
if len(_from) > 255 or len(to) > 255:
raise XSException('Value of range is too long')
vno = self.xs.getField(field).vno
_from = XS.convert(_from, 'UTF-8', self._charset)
to = XS.convert(to, 'UTF-8', self._charset)
if _from is None:
cmd = XSCommand(CMD_QUERY_VALCMP, CMD_QUERY_OP_FILTER, vno, to, chr(CMD_VALCMP_LE))
elif to is None:
cmd = XSCommand(CMD_QUERY_VALCMP, CMD_QUERY_OP_FILTER, vno, _from, chr(CMD_VALCMP_GE))
else:
cmd = XSCommand(CMD_QUERY_RANGE, CMD_QUERY_OP_FILTER, vno, _from, to)
self.execCommand(cmd)
return self
def addWeight(self, field, term, weight=1):
return self.addQueryTerm(field, term, CMD_QUERY_OP_AND_MAYBE, weight)
def setFacets(self, field, exact=False):
buf = ''
if not isinstance(field, (list, tuple)):
field = [field]
for name in field:
ff = self.xs.getField(name)
if ff.type is not XSFieldMeta.TYPE_STRING:
raise XSException("Field `name' cann't be used for facets search, can only be string type")
buf += chr(ff.vno)
cmd = {'cmd': CMD_SEARCH_SET_FACETS, 'buf': buf}
cmd['arg1'] = 1 if exact is True else 0
self.execCommand(cmd)
return self
def getFacets(self, field=None):
if field is None:
return self._facets
return self._facets[field] if field in self._facets else {}
def setScwsMulti(self, level):
level = int(level)
if level >= 0 and level < 16:
cmd = {'cmd': CMD_SEARCH_SCWS_SET, 'arg1': CMD_SCWS_SET_MULTI, 'arg2': level}
self.execCommand(cmd)
return self
def setLimit(self, limit, offset=0):
self._limit = int(limit)
self._offset = int(offset)
return self
def setDb(self, name):
name = str(name) if name else ''
self.execCommand({'cmd': CMD_SEARCH_SET_DB, 'buf': name})
self._lastDb = self._curDb
self._lastDbs = self._curDbs
self._curDb = name
self._curDbs = []
return self
def addDb(self, name):
name = str(name)
self.execCommand({'cmd': CMD_SEARCH_ADD_DB, 'buf': name})
self._curDbs.append(name)
return self
def markResetScheme(self):
self._resetScheme = True
def terms(self, query=None, convert=True):
query = '' if query is None else self.preQueryString(query)
if query == '' and self._terms is not None:
ret = self._terms
else:
cmd = XSCommand(CMD_QUERY_GET_TERMS, 0, self._defaultOp, query)
res = self.execCommand(cmd, CMD_OK_QUERY_TERMS)
ret = []
tmps = res.buf.split(' ')
for tmp in tmps:
if not tmp or ':' in tmp:
continue
ret.append(tmp)
if not query:
self._terms = ret
return XS.convert(ret, self._charset, 'UTF-8') if convert else ret
def count(self, query=None):
query = '' if query is None else self.preQueryString(query)
if not query and self._count is not None:
return self._count
cmd = XSCommand(CMD_SEARCH_GET_TOTAL, 0, self._defaultOp, query)
res = self.execCommand(cmd, CMD_OK_SEARCH_TOTAL)
ret = unpack('I', res.buf)
if not query:
self._count = ret[0]
return ret[0]
def search(self, query=None, saveHighlight=True):
if self._curDb is not self.LOG_DB and saveHighlight:
self._highlight = query
query = '' if query is None else self.preQueryString(query)
page = pack('II', self._offset, self._limit if self._limit > 0 else self.PAGE_SIZE)
cmd = XSCommand(CMD_SEARCH_GET_RESULT, 0, self._defaultOp, query, page)
res = self.execCommand(cmd, CMD_OK_RESULT_BEGIN)
tmp = unpack('I', res.buf)
self._lastCount = tmp[0]
ret = []
self._facets = {}
vnoes = self.xs.getScheme().getVnoMap()
while 1:
res = self.getRespond()
if res.cmd == CMD_SEARCH_RESULT_FACETS:
off = 0
while (off + 6) < len(res.buf):
tmp = unpack('BBI', res.buf[off:off+6])
if tmp[0] in vnoes:
name = vnoes[tmp[0]]
value = res.buf[off+6:off+6+tmp[1]]
if name in self._facets:
self._facets[name] = {}
self._facets[name][value] = tmp[2]
off += tmp[1] + 6
elif res.cmd == CMD_SEARCH_RESULT_DOC:
doc = XSDocument(res.buf, self._charset)
ret.append(doc)
elif res.cmd == CMD_SEARCH_RESULT_FIELD:
try:
name = vnoes[res.arg] if res.arg in vnoes else res.arg
doc.setField(name, res.buf)
except NameError:
pass
elif res.cmd == CMD_SEARCH_RESULT_MATCHED:
try:
doc.setField('matched', explode(' ', res.buf), True)
except NameError:
pass
elif res.cmd == CMD_OK and res.arg == CMD_OK_RESULT_END:
break
else:
msg = 'Unexpected respond in search {CMD:' + res.cmd + ', ARG:' + res.arg + '}'
raise XSException(msg)
if not query:
self._count = self._lastCount
| |
x4 * x4) / 36.0) / (L * L)
tmpVar2 = (x3 / (2 * L)) * np.sqrt(E / (4 * G))
PC = tmpVar * (1 - tmpVar2)
g[0] = tauMax - tau
g[1] = sigmaMax - sigma
g[2] = x4 - x1
g[3] = PC - P
g = np.where(g < 0, -g, 0)
return f, g
class CRE23():
def __init__(self):
self.problem_name = 'CRE23'
self.n_objectives = 2
self.n_variables = 4
self.n_constraints = 4
self.ubound = np.zeros(self.n_variables)
self.lbound = np.zeros(self.n_variables)
self.lbound[0] = 55
self.lbound[1] = 75
self.lbound[2] = 1000
self.lbound[3] = 11
self.ubound[0] = 80
self.ubound[1] = 110
self.ubound[2] = 3000
self.ubound[3] = 20
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_constraints)
x1 = x[0]
x2 = x[1]
x3 = x[2]
x4 = x[3]
# First original objective function
f[0] = 4.9 * 1e-5 * (x2 * x2 - x1 * x1) * (x4 - 1.0)
# Second original objective function
f[1] = ((9.82 * 1e6) * (x2 * x2 - x1 * x1)) / (x3 * x4 * (x2 * x2 * x2 - x1 * x1 * x1))
# Reformulated objective functions
g[0] = (x2 - x1) - 20.0
g[1] = 0.4 - (x3 / (3.14 * (x2 * x2 - x1 * x1)))
g[2] = 1.0 - (2.22 * 1e-3 * x3 * (x2 * x2 * x2 - x1 * x1 * x1)) / np.power((x2 * x2 - x1 * x1), 2)
g[3] = (2.66 * 1e-2 * x3 * x4 * (x2 * x2 * x2 - x1 * x1 * x1)) / (x2 * x2 - x1 * x1) - 900.0
g = np.where(g < 0, -g, 0)
return f, g
class CRE24():
def __init__(self):
self.problem_name = 'CRE24'
self.n_objectives = 2
self.n_variables = 7
self.n_constraints = 11
self.lbound = np.zeros(self.n_variables)
self.ubound = np.zeros(self.n_variables)
self.lbound[0] = 2.6
self.lbound[1] = 0.7
self.lbound[2] = 17
self.lbound[3] = 7.3
self.lbound[4] = 7.3
self.lbound[5] = 2.9
self.lbound[6] = 5.0
self.ubound[0] = 3.6
self.ubound[1] = 0.8
self.ubound[2] = 28
self.ubound[3] = 8.3
self.ubound[4] = 8.3
self.ubound[5] = 3.9
self.ubound[6] = 5.5
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_constraints)
x1 = x[0]
x2 = x[1]
x3 = np.round(x[2])
x4 = x[3]
x5 = x[4]
x6 = x[5]
x7 = x[6]
# First original objective function (weight)
f[0] = 0.7854 * x1 * (x2 * x2) * (((10.0 * x3 * x3) / 3.0) + (14.933 * x3) - 43.0934) - 1.508 * x1 * (x6 * x6 + x7 * x7) + 7.477 * (x6 * x6 * x6 + x7 * x7 * x7) + 0.7854 * (x4 * x6 * x6 + x5 * x7 * x7)
# Second original objective function (stress)
tmpVar = np.power((745.0 * x4) / (x2 * x3), 2.0) + 1.69 * 1e7
f[1] = np.sqrt(tmpVar) / (0.1 * x6 * x6 * x6)
# Constraint functions
g[0] = -(1.0 / (x1 * x2 * x2 * x3)) + 1.0 / 27.0
g[1] = -(1.0 / (x1 * x2 * x2 * x3 * x3)) + 1.0 / 397.5
g[2] = -(x4 * x4 * x4) / (x2 * x3 * x6 * x6 * x6 * x6) + 1.0 / 1.93
g[3] = -(x5 * x5 * x5) / (x2 * x3 * x7 * x7 * x7 * x7) + 1.0 / 1.93
g[4] = -(x2 * x3) + 40.0
g[5] = -(x1 / x2) + 12.0
g[6] = -5.0 + (x1 / x2)
g[7] = -1.9 + x4 - 1.5 * x6
g[8] = -1.9 + x5 - 1.1 * x7
g[9] = -f[1] + 1300.0
tmpVar = np.power((745.0 * x5) / (x2 * x3), 2.0) + 1.575 * 1e8
g[10] = -np.sqrt(tmpVar) / (0.1 * x7 * x7 * x7) + 1100.0
g = np.where(g < 0, -g, 0)
return f, g
class CRE25():
def __init__(self):
self.problem_name = 'CRE25'
self.n_objectives = 2
self.n_variables = 4
self.n_constraints = 1
self.lbound = np.full(self.n_variables, 12)
self.ubound = np.full(self.n_variables, 60)
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_constraints)
# all the four variables must be inverger values
x1 = np.round(x[0])
x2 = np.round(x[1])
x3 = np.round(x[2])
x4 = np.round(x[3])
# First original objective function
f[0] = np.abs(6.931 - ((x3 / x1) * (x4 / x2)))
# Second original objective function (the maximum value among the four variables)
l = [x1, x2, x3, x4]
f[1] = max(l)
g[0] = 0.5 - (f[0] / 6.931)
g = np.where(g < 0, -g, 0)
return f, g
class CRE31():
def __init__(self):
self.problem_name = 'CRE31'
self.n_objectives = 3
self.n_variables = 7
self.n_constraints = 10
self.lbound = np.zeros(self.n_variables)
self.ubound = np.zeros(self.n_variables)
self.lbound[0] = 0.5
self.lbound[1] = 0.45
self.lbound[2] = 0.5
self.lbound[3] = 0.5
self.lbound[4] = 0.875
self.lbound[5] = 0.4
self.lbound[6] = 0.4
self.ubound[0] = 1.5
self.ubound[1] = 1.35
self.ubound[2] = 1.5
self.ubound[3] = 1.5
self.ubound[4] = 2.625
self.ubound[5] = 1.2
self.ubound[6] = 1.2
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_constraints)
x1 = x[0]
x2 = x[1]
x3 = x[2]
x4 = x[3]
x5 = x[4]
x6 = x[5]
x7 = x[6]
# First original objective function
f[0] = 1.98 + 4.9 * x1 + 6.67 * x2 + 6.98 * x3 + 4.01 * x4 + 1.78 * x5 + 0.00001 * x6 + 2.73 * x7
# Second original objective function
f[1] = 4.72 - 0.5 * x4 - 0.19 * x2 * x3
# Third original objective function
Vmbp = 10.58 - 0.674 * x1 * x2 - 0.67275 * x2
Vfd = 16.45 - 0.489 * x3 * x7 - 0.843 * x5 * x6
f[2] = 0.5 * (Vmbp + Vfd)
# Constraint functions
g[0] = 1 -(1.16 - 0.3717 * x2 * x4 - 0.0092928 * x3)
g[1] = 0.32 -(0.261 - 0.0159 * x1 * x2 - 0.06486 * x1 - 0.019 * x2 * x7 + 0.0144 * x3 * x5 + 0.0154464 * x6)
g[2] = 0.32 -(0.214 + 0.00817 * x5 - 0.045195 * x1 - 0.0135168 * x1 + 0.03099 * x2 * x6 - 0.018 * x2 * x7 + 0.007176 * x3 + 0.023232 * x3 - 0.00364 * x5 * x6 - 0.018 * x2 * x2)
g[3] = 0.32 -(0.74 - 0.61 * x2 - 0.031296 * x3 - 0.031872 * x7 + 0.227 * x2 * x2)
g[4] = 32 -(28.98 + 3.818 * x3 - 4.2 * x1 * x2 + 1.27296 * x6 - 2.68065 * x7)
g[5] = 32 -(33.86 + 2.95 * x3 - 5.057 * x1 * x2 - 3.795 * x2 - 3.4431 * x7 + 1.45728)
g[6] = 32 -(46.36 - 9.9 * x2 - 4.4505 * x1)
g[7] = 4 - f[1]
g[8] = 9.9 - Vmbp
g[9] = 15.7 - Vfd
g = np.where(g < 0, -g, 0)
return f, g
class CRE32():
def __init__(self):
self.problem_name = 'CRE32'
self.n_objectives = 3
self.n_variables = 6
self.n_constraints = 9
self.lbound = np.zeros(self.n_variables)
self.ubound = np.zeros(self.n_variables)
self.lbound[0] = 150.0
self.lbound[1] = 20.0
self.lbound[2] = 13.0
self.lbound[3] = 10.0
self.lbound[4] = 14.0
self.lbound[5] = 0.63
self.ubound[0] = 274.32
self.ubound[1] = 32.31
self.ubound[2] = 25.0
self.ubound[3] = 11.71
self.ubound[4] = 18.0
self.ubound[5] = 0.75
def evaluate(self, x):
f = np.zeros(self.n_objectives)
# NOT g
constraintFuncs = np.zeros(self.n_constraints)
x_L = x[0]
x_B = x[1]
x_D = x[2]
x_T = x[3]
x_Vk = x[4]
x_CB = x[5]
displacement = 1.025 * x_L * x_B * x_T * x_CB
V = 0.5144 * x_Vk
g = 9.8065
Fn = V / np.power(g * x_L, 0.5)
a = | |
# 'Linear'
if data[0] is None and data[1] is None:
dx = self.draw_app.x
dy = self.draw_app.y
else:
dx = data[0]
dy = data[1]
geo_list = []
geo = None
self.points = [dx, dy]
for item in range(self.drill_array_size):
if self.drill_axis == 'X':
geo = self.util_shape(((dx + (self.drill_pitch * item)), dy))
if self.drill_axis == 'Y':
geo = self.util_shape((dx, (dy + (self.drill_pitch * item))))
if self.drill_axis == 'A':
x_adj = self.drill_pitch * math.cos(math.radians(self.drill_linear_angle))
y_adj = self.drill_pitch * math.sin(math.radians(self.drill_linear_angle))
geo = self.util_shape(
((dx + (x_adj * item)), (dy + (y_adj * item)))
)
if static is None or static is False:
geo_list.append(affinity.translate(geo, xoff=(dx - self.last_dx), yoff=(dy - self.last_dy)))
else:
geo_list.append(geo)
# self.origin = data
self.last_dx = dx
self.last_dy = dy
return DrawToolUtilityShape(geo_list)
elif self.drill_array == 'circular': # 'Circular'
if data[0] is None and data[1] is None:
cdx = self.draw_app.x
cdy = self.draw_app.y
else:
cdx = data[0]
cdy = data[1]
utility_list = []
try:
radius = distance((cdx, cdy), self.origin)
except Exception:
radius = 0
if radius == 0:
self.draw_app.delete_utility_geometry()
if len(self.pt) >= 1 and radius > 0:
try:
if cdx < self.origin[0]:
radius = -radius
# draw the temp geometry
initial_angle = math.asin((cdy - self.origin[1]) / radius)
temp_circular_geo = self.circular_util_shape(radius, initial_angle)
# draw the line
temp_points = [x for x in self.pt]
temp_points.append([cdx, cdy])
temp_line = LineString(temp_points)
for geo_shape in temp_circular_geo:
utility_list.append(geo_shape.geo)
utility_list.append(temp_line)
return DrawToolUtilityShape(utility_list)
except Exception as e:
log.debug("DrillArray.utility_geometry -- circular -> %s" % str(e))
def circular_util_shape(self, radius, angle):
self.drill_direction = self.draw_app.ui.drill_array_dir_radio.get_value()
self.drill_angle = self.draw_app.ui.drill_angle_entry.get_value()
circular_geo = []
if self.drill_direction == 'CW':
for i in range(self.drill_array_size):
angle_radians = math.radians(self.drill_angle * i)
x = self.origin[0] + radius * math.cos(-angle_radians + angle)
y = self.origin[1] + radius * math.sin(-angle_radians + angle)
geo_sol = self.util_shape((x, y))
# geo_sol = affinity.rotate(geo_sol, angle=(math.pi - angle_radians), use_radians=True)
circular_geo.append(DrawToolShape(geo_sol))
else:
for i in range(self.drill_array_size):
angle_radians = math.radians(self.drill_angle * i)
x = self.origin[0] + radius * math.cos(angle_radians + angle)
y = self.origin[1] + radius * math.sin(angle_radians + angle)
geo_sol = self.util_shape((x, y))
# geo_sol = affinity.rotate(geo_sol, angle=(angle_radians - math.pi), use_radians=True)
circular_geo.append(DrawToolShape(geo_sol))
return circular_geo
def util_shape(self, point):
if point[0] is None and point[1] is None:
point_x = self.draw_app.x
point_y = self.draw_app.y
else:
point_x = point[0]
point_y = point[1]
start_hor_line = ((point_x - (self.selected_dia / 2)), point_y)
stop_hor_line = ((point_x + (self.selected_dia / 2)), point_y)
start_vert_line = (point_x, (point_y - (self.selected_dia / 2)))
stop_vert_line = (point_x, (point_y + (self.selected_dia / 2)))
return MultiLineString([(start_hor_line, stop_hor_line), (start_vert_line, stop_vert_line)])
def make(self):
self.geometry = []
geo = None
try:
QtGui.QGuiApplication.restoreOverrideCursor()
except Exception:
pass
# add the point to drills if the diameter is a key in the dict, if not, create it add the drill location
# to the value, as a list of itself
if self.selected_dia not in self.draw_app.points_edit:
self.draw_app.points_edit[self.selected_dia] = []
for i in range(self.drill_array_size):
self.draw_app.points_edit[self.selected_dia].append(self.points)
self.draw_app.current_storage = self.draw_app.storage_dict[self.selected_dia]
if self.drill_array == 'linear': # 'Linear'
for item in range(self.drill_array_size):
if self.drill_axis == 'X':
geo = self.util_shape(((self.points[0] + (self.drill_pitch * item)), self.points[1]))
if self.drill_axis == 'Y':
geo = self.util_shape((self.points[0], (self.points[1] + (self.drill_pitch * item))))
if self.drill_axis == 'A':
x_adj = self.drill_pitch * math.cos(math.radians(self.drill_linear_angle))
y_adj = self.drill_pitch * math.sin(math.radians(self.drill_linear_angle))
geo = self.util_shape(
((self.points[0] + (x_adj * item)), (self.points[1] + (y_adj * item)))
)
self.geometry.append(DrawToolShape(geo))
else: # 'Circular'
if (self.drill_angle * self.drill_array_size) > 360:
self.draw_app.app.inform.emit('[WARNING_NOTCL] %s' %
_("Too many items for the selected spacing angle."))
self.draw_app.app.jump_signal.disconnect()
return
radius = distance(self.destination, self.origin)
if radius == 0:
self.draw_app.app.inform.emit('[ERROR_NOTCL] %s' % _("Failed."))
self.draw_app.delete_utility_geometry()
self.draw_app.select_tool('drill_select')
return
if self.destination[0] < self.origin[0]:
radius = -radius
initial_angle = math.asin((self.destination[1] - self.origin[1]) / radius)
circular_geo = self.circular_util_shape(radius, initial_angle)
self.geometry += circular_geo
self.complete = True
self.draw_app.app.inform.emit('[success] %s' % _("Done."))
self.draw_app.in_action = False
self.draw_app.ui.array_frame.hide()
self.draw_app.app.jump_signal.disconnect()
def on_key(self, key):
key_modifier = QtWidgets.QApplication.keyboardModifiers()
if key_modifier == QtCore.Qt.ShiftModifier:
mod_key = 'Shift'
elif key_modifier == QtCore.Qt.ControlModifier:
mod_key = 'Control'
else:
mod_key = None
if mod_key == 'Control':
pass
elif mod_key is None:
# Toggle Drill Array Direction
if key == QtCore.Qt.Key_Space:
if self.draw_app.ui.drill_axis_radio.get_value() == 'X':
self.draw_app.ui.drill_axis_radio.set_value('Y')
elif self.draw_app.ui.drill_axis_radio.get_value() == 'Y':
self.draw_app.ui.drill_axis_radio.set_value('A')
elif self.draw_app.ui.drill_axis_radio.get_value() == 'A':
self.draw_app.ui.drill_axis_radio.set_value('X')
# ## Utility geometry (animated)
self.draw_app.update_utility_geometry(data=(self.draw_app.snap_x, self.draw_app.snap_y))
def clean_up(self):
self.draw_app.selected = []
self.draw_app.ui.tools_table_exc.clearSelection()
self.draw_app.plot_all()
try:
self.draw_app.app.jump_signal.disconnect()
except (TypeError, AttributeError):
pass
class SlotAdd(FCShapeTool):
"""
Resulting type: Polygon
"""
def __init__(self, draw_app):
DrawTool.__init__(self, draw_app)
self.name = 'slot_add'
self.draw_app = draw_app
self.draw_app.ui.slot_frame.show()
self.selected_dia = None
try:
self.draw_app.app.inform.emit(_("Click to place ..."))
self.selected_dia = self.draw_app.tool2tooldia[self.draw_app.last_tool_selected]
# as a visual marker, select again in tooltable the actual tool that we are using
# remember that it was deselected when clicking on canvas
item = self.draw_app.ui.tools_table_exc.item((self.draw_app.last_tool_selected - 1), 1)
self.draw_app.ui.tools_table_exc.setCurrentItem(item)
except KeyError:
self.draw_app.app.inform.emit('[WARNING_NOTCL] %s' % _("To add a slot first select a tool"))
self.draw_app.select_tool("drill_select")
return
try:
QtGui.QGuiApplication.restoreOverrideCursor()
except Exception:
pass
self.cursor = QtGui.QCursor(QtGui.QPixmap(self.draw_app.app.resource_location + '/aero_slot.png'))
QtGui.QGuiApplication.setOverrideCursor(self.cursor)
self.steps_per_circ = self.draw_app.app.defaults["geometry_circle_steps"]
self.half_height = 0.0
self.half_width = 0.0
self.radius = float(self.selected_dia / 2.0)
geo = self.utility_geometry(data=(self.draw_app.snap_x, self.draw_app.snap_y))
if isinstance(geo, DrawToolShape) and geo.geo is not None:
self.draw_app.draw_utility_geometry(geo=geo)
self.draw_app.app.inform.emit(_("Click on target location ..."))
self.draw_app.app.jump_signal.connect(lambda x: self.draw_app.update_utility_geometry(data=x))
# Switch notebook to Properties page
self.draw_app.app.ui.notebook.setCurrentWidget(self.draw_app.app.ui.properties_tab)
def click(self, point):
self.make()
return "Done."
def utility_geometry(self, data=None):
self.points = data
geo_data = self.util_shape(data)
if geo_data:
return DrawToolUtilityShape(geo_data)
else:
return None
def util_shape(self, point):
if point is None:
return
# updating values here allows us to change the aperture on the fly, after the Tool has been started
self.selected_dia = self.draw_app.tool2tooldia[self.draw_app.last_tool_selected]
self.radius = float(self.selected_dia / 2.0)
self.steps_per_circ = self.draw_app.app.defaults["geometry_circle_steps"]
try:
slot_length = float(self.draw_app.ui.slot_length_entry.get_value())
except ValueError:
# try to convert comma to decimal point. if it's still not working error message and return
try:
slot_length = float(self.draw_app.ui.slot_length_entry.get_value().replace(',', '.'))
self.draw_app.ui.slot_length_entry.set_value(slot_length)
except ValueError:
self.draw_app.app.inform.emit('[WARNING_NOTCL] %s' %
_("Value is missing or wrong format. Add it and retry."))
return
try:
slot_angle = float(self.draw_app.ui.slot_angle_spinner.get_value())
except ValueError:
self.draw_app.app.inform.emit('[WARNING_NOTCL] %s' %
_("Value is missing or wrong format. Add it and retry."))
return
if self.draw_app.ui.slot_axis_radio.get_value() == 'X':
self.half_width = slot_length / 2.0
self.half_height = self.radius
else:
self.half_width = self.radius
self.half_height = slot_length / 2.0
if point[0] is None and point[1] is None:
point_x = self.draw_app.x
point_y = self.draw_app.y
else:
point_x = point[0]
point_y = point[1]
geo = []
if self.half_height > self.half_width:
p1 = (point_x - self.half_width, point_y - self.half_height + self.half_width)
p2 = (point_x + self.half_width, point_y - self.half_height + self.half_width)
p3 = (point_x + self.half_width, point_y + self.half_height - self.half_width)
p4 = (point_x - self.half_width, point_y + self.half_height - self.half_width)
down_center = [point_x, point_y - self.half_height + self.half_width]
d_start_angle = math.pi
d_stop_angle = 0.0
down_arc = arc(down_center, self.half_width, d_start_angle, d_stop_angle, 'ccw', self.steps_per_circ)
up_center = [point_x, point_y + self.half_height - self.half_width]
u_start_angle = 0.0
u_stop_angle = math.pi
up_arc = arc(up_center, self.half_width, u_start_angle, u_stop_angle, 'ccw', self.steps_per_circ)
geo.append(p1)
for pt in down_arc:
geo.append(pt)
geo.append(p2)
geo.append(p3)
for pt in up_arc:
geo.append(pt)
geo.append(p4)
if self.draw_app.ui.slot_axis_radio.get_value() == 'A':
return affinity.rotate(geom=Polygon(geo), angle=-slot_angle)
else:
return Polygon(geo)
else:
p1 = (point_x - self.half_width + self.half_height, point_y - self.half_height)
p2 = (point_x + self.half_width - self.half_height, point_y - self.half_height)
p3 = (point_x + self.half_width - self.half_height, point_y + self.half_height)
p4 = (point_x - self.half_width + self.half_height, point_y + self.half_height)
left_center = [point_x - self.half_width + self.half_height, point_y]
d_start_angle = math.pi / 2
d_stop_angle = 1.5 * math.pi
left_arc = arc(left_center, self.half_height, d_start_angle, d_stop_angle, 'ccw', self.steps_per_circ)
right_center = [point_x + self.half_width - self.half_height, point_y]
u_start_angle = 1.5 * math.pi
u_stop_angle = math.pi / 2
right_arc = arc(right_center, self.half_height, u_start_angle, u_stop_angle, 'ccw', self.steps_per_circ)
geo.append(p1)
geo.append(p2)
for pt in right_arc:
geo.append(pt)
geo.append(p3)
geo.append(p4)
for pt in left_arc:
geo.append(pt)
return Polygon(geo)
def make(self):
try:
QtGui.QGuiApplication.restoreOverrideCursor()
except Exception:
pass
try:
self.geometry = DrawToolShape(self.util_shape(self.points))
except Exception as e:
log.debug("SlotAdd.make() --> %s" % str(e))
# add the point to drills/slots if the diameter is a key in the dict, if not, create it add the drill location
# to the value, as a list of itself
if self.selected_dia in self.draw_app.slot_points_edit:
self.draw_app.slot_points_edit[self.selected_dia].append(self.points)
else:
self.draw_app.slot_points_edit[self.selected_dia] = [self.points]
self.draw_app.current_storage = self.draw_app.storage_dict[self.selected_dia]
self.draw_app.in_action = False
self.complete = True
self.draw_app.app.inform.emit('[success] %s' % _("Done."))
self.draw_app.ui.slot_frame.hide()
self.draw_app.app.jump_signal.disconnect()
def on_key(self, key):
# Toggle Pad Direction
if key == QtCore.Qt.Key_Space:
if self.draw_app.ui.slot_axis_radio.get_value() == 'X':
self.draw_app.ui.slot_axis_radio.set_value('Y')
elif self.draw_app.ui.slot_axis_radio.get_value() == 'Y':
| |
<reponame>clohr/model-service
import csv
import datetime
import io
import json
import os
from collections import OrderedDict
from dataclasses import dataclass
from typing import Any, Dict, List
from uuid import UUID
import boto3
import pytest
from moto import mock_s3
from publish.config import PublishConfig
from publish.models import (
ExportedGraphManifests,
ExportGraphSchema,
FileManifest,
ModelService,
)
from publish.publish import publish_dataset, read_file_manifests, write_graph_manifests
from server.models import ModelProperty
from server.models import datatypes as dt
def to_utf8(encoded):
try:
return str(encoded, "utf-8")
except:
return encoded
@dataclass(frozen=True)
class CSV:
rows: List[Dict[str, str]]
size: int
@dataclass(frozen=True)
class JSON:
content: Any
size: int
def sort_rows(rows: List[Dict[str, str]]) -> List[Dict[str, str]]:
"""
Sort the rows of a `CSV`.
"""
return sorted(rows, key=lambda r: list(r.values()))
@pytest.fixture(scope="function")
def s3(aws_credentials):
with mock_s3():
yield boto3.client("s3", region_name="us-east-1")
@pytest.fixture(scope="session")
def config():
return PublishConfig("test-embargo-bucket", "10/233")
@pytest.fixture(scope="function")
def read_csv(s3, config):
"""
Read a CSV file from S3
"""
def func(key) -> CSV:
resp = s3.get_object(Bucket=config.s3_bucket, Key=key)
reader = csv.DictReader(io.StringIO(to_utf8(resp["Body"].read())))
return CSV(rows=[row for row in reader], size=resp["ContentLength"])
return func
@pytest.fixture(scope="function")
def read_json(s3, config):
"""
Read a JSON file from S3
"""
def func(key) -> JSON:
resp = s3.get_object(Bucket=config.s3_bucket, Key=key)
content = json.loads(to_utf8(resp["Body"].read()))
return JSON(content=content, size=resp["ContentLength"])
return func
@pytest.fixture(scope="function")
def metadata_key(config):
"""
Build a key to a file in the 'metadata' directory of the published dataset.
"""
def func(filename) -> str:
return f"{config.s3_publish_key}/metadata/{filename}"
return func
def test_publish(
s3, partitioned_db, sample_patient_db, config, read_csv, read_json, metadata_key
):
# Helpers
# ==========================================================================
def id_of(record_name):
return str(sample_patient_db["records"][record_name].id)
s3.create_bucket(Bucket=config.s3_bucket)
# Setup graph - add more data to the patient DB.
# ==========================================================================
# Add a linked property
best_friend = partitioned_db.create_model_relationship(
from_model=sample_patient_db["models"]["patient"],
name="best_friend",
to_model=sample_patient_db["models"]["patient"],
display_name="Best friend",
one_to_many=False,
)
partitioned_db.create_record_relationship(id_of("bob"), best_friend, id_of("alice"))
# Alice has a package proxy
partitioned_db.create_package_proxy(
id_of("alice"), package_id=1234, package_node_id="N:package:1234"
)
# Bob also has a package proxy. However, this package no longer exists in
# API. The exporter needs to ignore it.
# TODO: https://app.clickup.com/t/2c3ec9
partitioned_db.create_package_proxy(
id_of("bob"), package_id=4567, package_node_id="N:package:4567"
)
# Add another relationship named "attends" The relationship instances for
# this relationship need to be exported in the same CSV file as the
# (patient)-[attends]->(visit) relationships, but have a distinct entry in
# the graph schema.
event = partitioned_db.create_model("event", display_name="Event", description="")
partitioned_db.update_properties(
event,
ModelProperty(
name="name", display_name="Name", data_type="String", model_title=True
),
)
attends = partitioned_db.create_model_relationship(
sample_patient_db["models"]["patient"], "attends", event
)
birthday = partitioned_db.create_records(event, [{"name": "Birthday"}])[0]
partitioned_db.create_record_relationship(id_of("alice"), attends, birthday)
# These are the file manifests provided by `discover-publish`
file_manifests = [
FileManifest(
id=UUID("aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"),
path="files/pkg1/file1.txt",
size=2293,
file_type="TEXT",
source_package_id="N:package:1234",
),
FileManifest(
id=UUID("bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb"),
path="files/pkg1/file2.csv",
size=234443,
file_type="CSV",
source_package_id="N:package:1234",
),
]
# Publish dataset
# ==========================================================================
graph_manifests = publish_dataset(
partitioned_db, s3, config, file_manifests=file_manifests
)
for o in s3.list_objects(Bucket=config.s3_bucket).get("Contents", []):
# Don't export a CSV for the best_friend linked property.
assert o["Key"] != "10/233/metadata/relationships/best_friend.csv"
# Check graph schema
# ==========================================================================
schema_json = read_json(metadata_key("schema.json"))
assert sorted(schema_json.content["models"], key=lambda m: m["name"]) == [
{
"name": "event",
"displayName": "Event",
"description": "",
"file": "records/event.csv",
"properties": [
{
"name": "name",
"displayName": "Name",
"description": "",
"dataType": {"type": "String"},
}
],
},
{
"name": "file",
"displayName": "File",
"description": "A file in the dataset",
"file": "records/file.csv",
"properties": [
{
"name": "path",
"displayName": "Path",
"description": "The path to the file from the root of the dataset",
"dataType": {"type": "String"},
}
# TODO: add sourcePackageId (enhancement)
],
},
{
"name": "medication",
"displayName": "Medication",
"description": "a medication",
"file": "records/medication.csv",
"properties": [
{
"name": "name",
"displayName": "Name",
"description": "",
"dataType": {"type": "String"},
}
],
},
{
"name": "patient",
"displayName": "Patient",
"description": "a person",
"file": "records/patient.csv",
"properties": [
{
"name": "name",
"displayName": "Name",
"description": "",
"dataType": {"type": "String"},
},
{
"name": "age",
"displayName": "Age",
"description": "",
"dataType": {"type": "Long"},
},
{
"name": "best_friend",
"displayName": "<NAME>",
"description": "",
"dataType": {
"type": "Model",
"to": "patient",
"file": "records/patient.csv",
},
},
],
},
{
"name": "visit",
"displayName": "Visit",
"description": "a visit",
"file": "records/visit.csv",
"properties": [
{
"name": "day",
"displayName": "Day",
"description": "",
"dataType": {"type": "String"},
}
],
},
]
assert sorted(
schema_json.content["relationships"], key=lambda r: (r["from"], r["to"])
) == sorted(
[
{
"name": "attends",
"from": "patient",
"to": "visit",
"file": "relationships/attends.csv",
},
{
"name": "attends",
"from": "patient",
"to": "event",
"file": "relationships/attends.csv",
},
{
"name": "belongs_to",
"from": "",
"to": "",
"file": "relationships/belongs_to.csv",
},
{
"name": "prescribed",
"from": "visit",
"to": "medication",
"file": "relationships/prescribed.csv",
},
],
key=lambda r: (r["from"], r["to"]),
)
# Check records
# ==========================================================================
patient_csv = read_csv(metadata_key("records/patient.csv"))
assert sort_rows(patient_csv.rows) == sort_rows(
[
OrderedDict(
{
"id": id_of("alice"),
"name": "Alice",
"age": "34",
"best_friend": None,
"best_friend:display": None,
}
),
OrderedDict(
{
"id": id_of("bob"),
"name": "Bob",
"age": "20",
"best_friend": id_of("alice"),
"best_friend:display": "Alice",
}
),
]
)
visit_csv = read_csv(metadata_key("records/visit.csv"))
assert sort_rows(visit_csv.rows) == sort_rows(
[
OrderedDict({"id": id_of("monday"), "day": "Monday"}),
OrderedDict({"id": id_of("tuesday"), "day": "Tuesday"}),
]
)
medication_csv = read_csv(metadata_key("records/medication.csv"))
assert sort_rows(medication_csv.rows) == sort_rows(
[
OrderedDict({"id": id_of("aspirin"), "name": "Aspirin"}),
OrderedDict({"id": id_of("motrin"), "name": "Motrin"}),
OrderedDict({"id": id_of("tylenol"), "name": "Tylenol"}),
]
)
event_csv = read_csv(metadata_key("records/event.csv"))
assert event_csv.rows == [{"id": str(birthday.id), "name": "Birthday"}]
# Check relationships
# ==========================================================================
attends_csv = read_csv(metadata_key("relationships/attends.csv"))
assert sort_rows(attends_csv.rows) == sort_rows(
[
OrderedDict(
{
"from": id_of("alice"),
"to": id_of("monday"),
"relationship": "attends",
}
),
OrderedDict(
{
"from": id_of("bob"),
"to": id_of("tuesday"),
"relationship": "attends",
}
),
# Contains relationships from multiple model relationships
OrderedDict(
{
"from": id_of("alice"),
"to": str(birthday.id),
"relationship": "attends",
}
),
]
)
prescribed_csv = read_csv(metadata_key("relationships/prescribed.csv"))
assert sort_rows(prescribed_csv.rows) == sort_rows(
[
OrderedDict(
{
"from": id_of("monday"),
"to": id_of("aspirin"),
"relationship": "prescribed",
}
),
OrderedDict(
{
"from": id_of("tuesday"),
"to": id_of("aspirin"),
"relationship": "prescribed",
}
),
OrderedDict(
{
"from": id_of("tuesday"),
"to": id_of("tylenol"),
"relationship": "prescribed",
}
),
]
)
# Check proxy packages
# ==========================================================================
file_csv = read_csv(metadata_key("records/file.csv"))
assert sort_rows(file_csv.rows) == sort_rows(
[
OrderedDict(
{
"id": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
"path": "files/pkg1/file1.txt",
"sourcePackageId": "N:package:1234",
}
),
OrderedDict(
{
"id": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
"path": "files/pkg1/file2.csv",
"sourcePackageId": "N:package:1234",
}
),
]
)
# Check proxy package relationships
# ==========================================================================
belongs_to_csv = read_csv(metadata_key("relationships/belongs_to.csv"))
assert sort_rows(belongs_to_csv.rows) == sort_rows(
[
OrderedDict(
{
"from": id_of("alice"),
"to": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
"relationship": "belongs_to",
}
),
OrderedDict(
{
"from": id_of("alice"),
"to": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
"relationship": "belongs_to",
}
),
]
)
# Check file manifest output
# ==========================================================================
assert sorted(graph_manifests) == sorted(
[
FileManifest(
path="metadata/schema.json", file_type="Json", size=schema_json.size
),
FileManifest(
path="metadata/records/event.csv", file_type="CSV", size=event_csv.size
),
FileManifest(
path="metadata/records/file.csv", file_type="CSV", size=file_csv.size
),
FileManifest(
path="metadata/records/medication.csv",
file_type="CSV",
size=medication_csv.size,
),
FileManifest(
path="metadata/records/patient.csv",
file_type="CSV",
size=patient_csv.size,
),
FileManifest(
path="metadata/records/visit.csv", file_type="CSV", size=visit_csv.size
),
FileManifest(
path="metadata/relationships/attends.csv",
file_type="CSV",
size=attends_csv.size,
),
FileManifest(
path="metadata/relationships/prescribed.csv",
file_type="CSV",
size=prescribed_csv.size,
),
FileManifest(
path="metadata/relationships/belongs_to.csv",
file_type="CSV",
size=belongs_to_csv.size,
),
]
)
def test_record_value_serialization(s3, config, read_csv, metadata_key, partitioned_db):
s3.create_bucket(Bucket=config.s3_bucket)
patient = partitioned_db.create_model("patient", "Patient")
partitioned_db.update_properties(
patient,
ModelProperty(
name="string",
display_name="String",
data_type=dt.String(),
model_title=True,
),
ModelProperty(name="boolean", display_name="Boolean", data_type=dt.Boolean()),
ModelProperty(name="long", display_name="Long", data_type=dt.Long()),
ModelProperty(name="double", display_name="Double", data_type=dt.Double()),
ModelProperty(name="date", display_name="Date", data_type=dt.Date()),
ModelProperty(name="optional", display_name="Optional", data_type=dt.String()),
ModelProperty(
name="string_array",
display_name="String Array",
data_type=dt.Array(items=dt.String()),
),
ModelProperty(
name="boolean_array",
display_name="Boolean Array",
data_type=dt.Array(items=dt.Boolean()),
),
ModelProperty(
name="long_array",
display_name="Long Array",
data_type=dt.Array(items=dt.Long()),
),
ModelProperty(
name="double_array",
display_name="Double Array",
data_type=dt.Array(items=dt.Double()),
),
ModelProperty(
name="date_array",
display_name="Date Array",
data_type=dt.Array(items=dt.Date()),
),
)
record = partitioned_db.create_records(
patient,
[
{
"string": 'tricky"char,acter"string',
"boolean": True,
"long": 12345,
"double": 3.14159,
"date": datetime.datetime(year=2004, month=5, day=5),
"optional": None,
"string_array": ["red", "green", "semi;colon"],
"boolean_array": [True, False],
"long_array": [1, 2, 3],
"double_array": [1.1, 2.2, 3.3],
"date_array": [
datetime.datetime(year=2004, month=5, day=5),
datetime.datetime(year=2014, month=5, day=16),
],
}
],
)[0]
publish_dataset(partitioned_db, s3, config, file_manifests=[])
patient_csv = read_csv(metadata_key("records/patient.csv"))
assert patient_csv.rows == [
OrderedDict(
{
"id": str(record.id),
"string": 'tricky"char,acter"string',
"boolean": "true",
"long": "12345",
"double": "3.14159",
"date": "2004-05-05T00:00:00",
"optional": "",
"string_array": "red;green;semi_colon",
"boolean_array": "true;false",
"long_array": "1;2;3",
"double_array": "1.1;2.2;3.3",
"date_array": "2004-05-05T00:00:00;2014-05-16T00:00:00",
}
)
]
def test_proxy_relationships_are_merged_with_record_relationships(
s3, config, read_csv, metadata_key, partitioned_db
):
s3.create_bucket(Bucket=config.s3_bucket)
person = partitioned_db.create_model("person", "Person")
partitioned_db.update_properties(
person,
ModelProperty(
name="name", display_name="String", data_type=dt.String(), model_title=True
),
)
item = partitioned_db.create_model("item", "Item")
partitioned_db.update_properties(
item,
ModelProperty(
name="name", display_name="String", data_type=dt.String(), model_title=True
),
)
# This relationship uses the default "belongs_to" package proxy relationship,
# and should be exported in the same CSV file.
item_belongs_to_person = partitioned_db.create_model_relationship(
item, "belongs_to", person, one_to_many=True
)
person_likes_person = partitioned_db.create_model_relationship(
person, "likes", person, one_to_many=True
)
alice = partitioned_db.create_record(person, {"name": "Alice"})
bob = partitioned_db.create_record(person, {"name": "Bob"})
laptop = partitioned_db.create_record(item, {"name": "Laptop"})
partitioned_db.create_record_relationship(alice, person_likes_person, bob)
partitioned_db.create_record_relationship(laptop, item_belongs_to_person, alice)
# Package proxy using default `belongs_to` relationship
partitioned_db.create_package_proxy(
alice, package_id=1234, package_node_id="N:package:1234"
)
# Package proxy using a non-standard `likes` relationship
partitioned_db.create_package_proxy(
alice,
package_id=4567,
package_node_id="N:package:4567",
legacy_relationship_type="likes",
)
file_manifests = [
FileManifest(
id=UUID("aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"),
path="10/233/files/pkg1/file1.txt",
size=2293,
file_type="TEXT",
source_package_id="N:package:1234",
),
FileManifest(
id=UUID("bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb"),
path="10/233/files/pkg1/file2.csv",
size=234443,
file_type="CSV",
source_package_id="N:package:1234",
),
FileManifest(
id=UUID("cccccccc-cccc-cccc-cccc-cccccccccccc"),
path="10/233/files/pkg2/file3.dcm",
size=338923,
file_type="DICOM",
source_package_id="N:package:4567",
),
]
graph_manifests = publish_dataset(
partitioned_db, s3, config, file_manifests=file_manifests
)
assert sorted([m.path for m in graph_manifests]) == [
"metadata/records/file.csv",
"metadata/records/item.csv",
"metadata/records/person.csv",
"metadata/relationships/belongs_to.csv",
"metadata/relationships/likes.csv",
"metadata/schema.json",
]
belongs_to_csv = read_csv(metadata_key("relationships/belongs_to.csv"))
assert sort_rows(belongs_to_csv.rows) == sort_rows(
[
OrderedDict(
{
"from": str(alice.id),
"to": | |
U = t*X' - X
To get the system:
.. math:: U' = t*(A(t)*U + b(t))
Where $U$ is the vector of dependent variables, $X$ is the vector of dependent
variables in `funcs` and $X'$ is the first order derivative of $X$ with respect to
$t$. It may or may not reduce the system into linear first order system of ODEs.
Then a check is made to determine if the system passed can be reduced or not, if
this substitution works, then the system is reduced and its solved for the new
substitution. After we get the solution for $U$:
.. math:: U = a(t)
We substitute and return the reduced system:
.. math::
a(t) = t*X' - X
Parameters
==========
A: Matrix
Coefficient matrix($A(t)*t$) of the second order system of this form.
b: Matrix
Non-homogeneous term($b(t)$) of the system of ODEs.
funcs: List
List of dependent variables
t: Symbol
Independent variable of the system of ODEs.
Returns
=======
List
"""
U = Matrix([t*func.diff(t) - func for func in funcs])
sol = linodesolve(A, t, t*b)
reduced_eqs = [Eq(u, s) for s, u in zip(sol, U)]
reduced_eqs = canonical_odes(reduced_eqs, funcs, t)[0]
return reduced_eqs
def _second_order_subs_type2(A, funcs, t_):
r"""
Returns a second order system based on the coefficient matrix passed.
Explanation
===========
This function returns a system of second order ODE of the following form:
.. math::
X'' = A * X
Here, $X$ is the vector of dependent variables, but a bit modified, $A$ is the
coefficient matrix passed.
Along with returning the second order system, this function also returns the new
dependent variables with the new independent variable `t_` passed.
Parameters
==========
A: Matrix
Coefficient matrix of the system
funcs: List
List of old dependent variables
t_: Symbol
New independent variable
Returns
=======
List, List
"""
func_names = [func.func.__name__ for func in funcs]
new_funcs = [Function(Dummy("{}_".format(name)))(t_) for name in func_names]
rhss = A * Matrix(new_funcs)
new_eqs = [Eq(func.diff(t_, 2), rhs) for func, rhs in zip(new_funcs, rhss)]
return new_eqs, new_funcs
def _is_euler_system(As, t):
return all(_matrix_is_constant((A*t**i).applyfunc(ratsimp), t) for i, A in enumerate(As))
def _classify_linear_system(eqs, funcs, t, is_canon=False):
r"""
Returns a dictionary with details of the eqs if the system passed is linear
and can be classified by this function else returns None
Explanation
===========
This function takes the eqs, converts it into a form Ax = b where x is a vector of terms
containing dependent variables and their derivatives till their maximum order. If it is
possible to convert eqs into Ax = b, then all the equations in eqs are linear otherwise
they are non-linear.
To check if the equations are constant coefficient, we need to check if all the terms in
A obtained above are constant or not.
To check if the equations are homogeneous or not, we need to check if b is a zero matrix
or not.
Parameters
==========
eqs: List
List of ODEs
funcs: List
List of dependent variables
t: Symbol
Independent variable of the equations in eqs
is_canon: Boolean
If True, then this function will not try to get the
system in canonical form. Default value is False
Returns
=======
match = {
'no_of_equation': len(eqs),
'eq': eqs,
'func': funcs,
'order': order,
'is_linear': is_linear,
'is_constant': is_constant,
'is_homogeneous': is_homogeneous,
}
Dict or list of Dicts or None
Dict with values for keys:
1. no_of_equation: Number of equations
2. eq: The set of equations
3. func: List of dependent variables
4. order: A dictionary that gives the order of the
dependent variable in eqs
5. is_linear: Boolean value indicating if the set of
equations are linear or not.
6. is_constant: Boolean value indicating if the set of
equations have constant coefficients or not.
7. is_homogeneous: Boolean value indicating if the set of
equations are homogeneous or not.
8. commutative_antiderivative: Antiderivative of the coefficient
matrix if the coefficient matrix is non-constant
and commutative with its antiderivative. This key
may or may not exist.
9. is_general: Boolean value indicating if the system of ODEs is
solvable using one of the general case solvers or not.
10. rhs: rhs of the non-homogeneous system of ODEs in Matrix form. This
key may or may not exist.
11. is_higher_order: True if the system passed has an order greater than 1.
This key may or may not exist.
12. is_second_order: True if the system passed is a second order ODE. This
key may or may not exist.
This Dict is the answer returned if the eqs are linear and constant
coefficient. Otherwise, None is returned.
"""
# Error for i == 0 can be added but isn't for now
# Check for len(funcs) == len(eqs)
if len(funcs) != len(eqs):
raise ValueError("Number of functions given is not equal to the number of equations %s" % funcs)
# ValueError when functions have more than one arguments
for func in funcs:
if len(func.args) != 1:
raise ValueError("dsolve() and classify_sysode() work with "
"functions of one variable only, not %s" % func)
# Getting the func_dict and order using the helper
# function
order = _get_func_order(eqs, funcs)
system_order = max(order[func] for func in funcs)
is_higher_order = system_order > 1
is_second_order = system_order == 2 and all(order[func] == 2 for func in funcs)
# Not adding the check if the len(func.args) for
# every func in funcs is 1
# Linearity check
try:
canon_eqs = canonical_odes(eqs, funcs, t) if not is_canon else [eqs]
if len(canon_eqs) == 1:
As, b = linear_ode_to_matrix(canon_eqs[0], funcs, t, system_order)
else:
match = {
'is_implicit': True,
'canon_eqs': canon_eqs
}
return match
# When the system of ODEs is non-linear, an ODENonlinearError is raised.
# This function catches the error and None is returned.
except ODENonlinearError:
return None
is_linear = True
# Homogeneous check
is_homogeneous = True if b.is_zero_matrix else False
# Is general key is used to identify if the system of ODEs can be solved by
# one of the general case solvers or not.
match = {
'no_of_equation': len(eqs),
'eq': eqs,
'func': funcs,
'order': order,
'is_linear': is_linear,
'is_homogeneous': is_homogeneous,
'is_general': True
}
if not is_homogeneous:
match['rhs'] = b
is_constant = all(_matrix_is_constant(A_, t) for A_ in As)
# The match['is_linear'] check will be added in the future when this
# function becomes ready to deal with non-linear systems of ODEs
if not is_higher_order:
A = As[1]
match['func_coeff'] = A
# Constant coefficient check
is_constant = _matrix_is_constant(A, t)
match['is_constant'] = is_constant
try:
system_info = linodesolve_type(A, t, b=b)
except NotImplementedError:
return None
match.update(system_info)
antiderivative = match.pop("antiderivative")
if not is_constant:
match['commutative_antiderivative'] = antiderivative
return match
else:
match['type_of_equation'] = "type0"
if is_second_order:
A1, A0 = As[1:]
match_second_order = _match_second_order_type(A1, A0, t)
match.update(match_second_order)
match['is_second_order'] = True
# If system is constant, then no need to check if its in euler
# form or not. It will be easier and faster to directly proceed
# to solve it.
if match['type_of_equation'] == "type0" and not is_constant:
is_euler = _is_euler_system(As, t)
if is_euler:
t_ = Symbol('{}_'.format(t))
match.update({'is_transformed': True, 'type_of_equation': 'type1',
't_': t_})
else:
is_jordan = lambda M: M == Matrix.jordan_block(M.shape[0], M[0, 0])
terms = _factor_matrix(As[-1], t)
if all(A.is_zero_matrix for A in As[1:-1]) and terms is not None and not is_jordan(terms[1]):
P, J = terms[1].jordan_form()
match.update({'type_of_equation': 'type2', 'J': J,
'f(t)': terms[0], 'P': P, 'is_transformed': True})
if match['type_of_equation'] != 'type0' and is_second_order:
match.pop('is_second_order', None)
match['is_higher_order'] = is_higher_order
return match
def _preprocess_eqs(eqs):
processed_eqs = []
for eq in eqs:
processed_eqs.append(eq if isinstance(eq, Equality) else Eq(eq, 0))
return processed_eqs
def _eqs2dict(eqs, funcs):
eqsorig = {}
eqsmap = {}
funcset = set(funcs)
for eq in eqs:
f1, = eq.lhs.atoms(AppliedUndef)
f2s = (eq.rhs.atoms(AppliedUndef) - {f1}) & funcset
eqsmap[f1] = f2s
eqsorig[f1] = eq
return eqsmap, eqsorig
def _dict2graph(d):
nodes = list(d)
edges = [(f1, f2) for f1, f2s in d.items() for f2 in f2s]
G = (nodes, edges)
return G
def _is_type1(scc, t):
eqs, funcs = scc
try:
(A1, A0), b = linear_ode_to_matrix(eqs, funcs, t, | |
= False
return ret
##
# \brief Gets the current FPGA state.
#
# \copydetails CyberRadioDriver::IRadio::getFpgaState()
def getFpgaState(self):
ret = None
if self.fpgaStateCmd is not None:
ret = self.getConfigurationByKeys("fpgaState")
return ret
##
# \brief Sets the current FPGA state.
#
# \copydetails CyberRadioDriver::IRadio::setFpgaState()
def setFpgaState(self, state):
ret = False
if self.fpgaStateCmd is not None:
ret = self.setConfiguration({"fpgaState": state})
return ret
# OVERRIDE
##
# \brief Sets whether or not the object is in verbose mode.
#
# \copydetails CyberRadioDriver::IRadio::setVerbose()
def setVerbose(self, verbose):
# Set this object's verbose mode
log._logger.setVerbose(self, verbose)
# Set verbose mode on all components
for obj in self.componentList:
obj.setVerbose(verbose)
##
# \brief Sets the log file.
#
# \copydetails CyberRadioDriver::IRadio::setLogFile()
def setLogFile(self, logFile):
# Set this object's log file
log._logger.setLogFile(self, logFile)
# Set log file on all components
for obj in self.componentList:
obj.setLogFile(logFile)
##
# \brief Gets the list of connected data port interface indices.
#
# \copydetails CyberRadioDriver::IRadio::getConnectedDataPorts()
def getConnectedDataPorts(self):
ret = []
if self.isCrddConnection:
ret = self._crddGetConnectedDataPortIndices()
return ret
##
# \internal
# \brief Converts a user-specified time string into a number of seconds
# since 1/1/70.
#
# The time string can be either:
# \li Absolute time, in any supported format
# \li Relative time specified as now{-n}, where n is a number of seconds
# \li Relative time specified as now{-[[H:]MM:]SS}
# \li "begin", which is the beginning of known time (1/1/70)
# \li "end", which is the end of trackable time and far beyond the
# useful life of this utility (01/18/2038)
#
# \throws RuntimeException if the time string format cannot be understood.
# \param timestr The time string.
# \param utc Whether or not the user's time string is in UTC time.
# \return The time, in number of seconds since the Epoch
@staticmethod
def timeFromString(timestr, utc=True):
ret = 0
tm = None
tstr = timestr.strip()
if tstr == "":
ret = 0
elif tstr == "begin":
ret = 0
elif tstr == "end":
ret = sys.maxsize
else:
if tstr.find('now') != -1:
tm = datetime.datetime.utcnow() if utc else datetime.datetime.now()
i = tstr.find('-')
if i != -1:
tmp = tstr[i+1:]
tm = tm - datetime.timedelta(seconds=_radio.timeSecsFromString(tmp))
else:
# Replace strings "today" and "yesterday"
tmToday = datetime.datetime.utcnow() if utc else datetime.datetime.now()
tmYesterday = tmToday - datetime.timedelta(days=1)
dateStrToday = tmToday.strftime("%Y%m%d")
dateStrYesterday = tmYesterday.strftime("%Y%m%d")
tstr = tstr.replace("today", dateStrToday).replace("yesterday", dateStrYesterday)
# Try a series of known formats
# -- Formats are 5-tuples: (format string, width, needs year, needs month, needs day)
supportedFmts = [ \
('%Y-%m-%dT%H:%M:%S%z', 24, False, False, False), \
('%Y-%m-%dT%H:%M:%S', 19, False, False, False), \
('%Y%m%d:%H%M%S', 15, False, False, False), \
('%a %b %d %H:%M:%S %Y', 24, False, False, False), \
('%b %d %H:%M:%S', 15, True, False, False), \
('%b %d, %Y %I:%M:%S %p', 24, False, False, False), \
('%Y-%m-%d %H:%M:%S', 19, False, False, False), \
('%Y/%m/%d %H:%M:%S', 19, False, False, False), \
('%Y%m%d_%H%M%S', 15, False, False, False), \
('%m/%d/%Y %H:%M', 16, False, False, False), \
('%m/%d/%y %H:%M:%S', 17, False, False, False), \
('%Y%m%d', 8, False, False, False), \
('%Y-%m-%d', 10, False, False, False), \
('%H:%M:%S', 8, True, True, True), \
('%H%M%S', 6, True, True, True), \
]
for fmt in supportedFmts:
try:
tmp = tstr[:fmt[1]].strip()
#print "[DBG][timeFromString] Convert"
#print "[DBG][timeFromString] -- string:", tmp
#print "[DBG][timeFromString] -- format:", fmt[0]
tm = datetime.datetime.strptime(tmp, fmt[0])
#print "[DBG][timeFromString] -- SUCCESS"
# Replace date items from today's date as needed by the format
# -- Day
if fmt[4]:
tm = tm.replace(day=tmToday.day)
# -- Month
if fmt[3]:
tm = tm.replace(month=tmToday.month)
# -- Year
if fmt[2]:
tm = tm.replace(year=tmToday.year)
# But if the resulting date is in the future, then we need to dial it
# back a year
if tm > tmToday:
tm = tm.replace(year=tmToday.year-1)
break
except:
#print "[DBG][timeFromString] -- FAILURE"
tm = None
if tm is not None:
ret = time.mktime(tm.timetuple())
else:
raise RuntimeError("Improperly formatted time: \"" + tstr + "\"")
return ret
##
# Converts a time string ([+-][[H:]M:]S) to a time in seconds.
#
# \note Hours and minutes are not bounded in any way. These strings provide the
# same result:
# \li "7200"
# \li "120:00"
# \li "2:00:00"
#
# \throws RuntimeError if the time is formatted improperly.
# \param timeStr The time string.
# \return The number of seconds.
@staticmethod
def timeSecsFromString(timeStr):
hrs = 0
mins = 0
secs = 0
sgn = 1
try:
if "-" in timeStr:
sgn = -1
tmp = timeStr.strip().translate(None, " +-")
if tmp != "":
vec = tmp.split(":")
if vec[-1] != "":
secs = int(vec[-1])
else:
raise RuntimeError("Improperly formatted time: \"" + timeStr + "\"")
if len(vec) > 1:
if vec[-2] != "":
mins = int(vec[-2])
else:
raise RuntimeError("Improperly formatted time: \"" + timeStr + "\"")
if len(vec) > 2:
if vec[-3] != "":
hrs = int(vec[-3])
else:
raise RuntimeError("Improperly formatted time: \"" + timeStr + "\"")
except:
raise RuntimeError("Improperly formatted time: \"" + timeStr + "\"")
return ( sgn * (hrs * 3600 + mins * 60 + secs) )
##
# \internal
# \brief Radio handler class that supports nothing more complicated than
# identifying a connected radio.
#
# Used internally to support radio auto-detection.
#
# This class implements the CyberRadioDriver.IRadio interface.
#
class _radio_identifier(_radio):
_name = "Radio Identifier"
json = False
ifSpec = _ifSpec
adcRate = 1.0
numTuner = 0
numTunerBoards = 0
tunerType = None
numWbddc = 0
wbddcType = None
numNbddc = 0
nbddcType = None
numTxs = 0
txType = None
numWbduc = 0
wbducType = None
numNbduc = 0
nbducType = None
numWbddcGroups = 0
wbddcGroupType = None
numNbddcGroups = 0
nbddcGroupType = None
numTunerGroups = 0
tunerGroupType = None
numGigE = 0
numGigEDipEntries = 0
idnQry = command.idn
verQry = command.ver
hrevQry = command.hrev
statQry = None
tstatQry = None
tadjCmd = None
resetCmd = None
cfgCmd = None
ppsCmd = None
utcCmd = None
refCmd = None
rbypCmd = None
sipCmd = None
dipCmd = None
smacCmd = None
dmacCmd = None
calfCmd = None
nbssCmd = None
fnrCmd = None
gpsCmd = None
gposCmd = None
rtvCmd = None
tempCmd = None
gpioStaticCmd = None
gpioSeqCmd = None
tgfcCmd = None
refModes = {}
rbypModes = {}
vitaEnableOptions = {}
connectionModes = ["https", "tcp", "udp", "tty"]
validConfigurationKeywords = []
setTimeDefault = False
# OVERRIDE
##
# \protected
# \brief Queries hardware to determine the object's current configuration.
def _queryConfiguration(self):
# Call the base-class implementation
configKeys.Configurable._queryConfiguration(self)
# This radio has nothing further that it can configure
##
# \brief Radio function (mode) command used by JSON-based radios.
#
class funJSON(command._jsonCommandBase):
mnemonic = "fun"
queryParamMap = {
configKeys.RADIO_FUNCTION: "state",
}
setParamMap = {
configKeys.RADIO_FUNCTION: "state",
}
##
# \internal
# \brief Radio handler class that supports nothing more complicated than
# identifying a connected radio.
#
# Used internally to support radio auto-detection.
#
# This class implements the CyberRadioDriver.IRadio interface.
#
class _radio_identifier_json(_radio):
_name = "Radio Identifier"
json = True
ifSpec = _ifSpec
adcRate = 1.0
numTuner = 0
numTunerBoards = 0
tunerType = None
numWbddc = 0
wbddcType = None
numNbddc = 0
nbddcType = None
numTxs = 0
txType = None
numWbduc = 0
wbducType = None
numNbduc = 0
nbducType = None
numWbddcGroups = 0
wbddcGroupType = None
numNbddcGroups = 0
nbddcGroupType = None
numTunerGroups = 0
tunerGroupType = None
numGigE = 0
numGigEDipEntries = 0
idnQry = None
verQry = None
hrevQry = None
statQry = command.status_json
tstatQry = None
tadjCmd = None
resetCmd = None
cfgCmd = None
ppsCmd = None
utcCmd = None
refCmd = None
rbypCmd = None
sipCmd = None
dipCmd = None
smacCmd = None
dmacCmd = None
calfCmd = None
nbssCmd = None
fnrCmd = None
gpsCmd = None
gposCmd = None
rtvCmd = None
tempCmd = None
gpioStaticCmd = None
gpioSeqCmd = None
tgfcCmd = None
funCmd = funJSON
refModes = {}
rbypModes = |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.