input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
#!/usr/bin/env python
import numpy as np
import scipy.io as io
import matplotlib.pyplot as plt
from scipy.stats import norm
import argparse
import copy
import tqdm
from hdphmm.utils import timeseries as ts
def initialize():
parser = argparse.ArgumentParser(description='Generate timeseries with different underlying models')
parser.add_argument('-t', '--type', default='AR', type=str, help="Underlying dynamical model. Only AR is fully"
"implemented.")
parser.add_argument('-d', '--ndraws', default=2000, type=int, help='Number of time steps to take')
parser.add_argument('-n', '--ntraj', default=4, type=int, help='Number of trajetories to generate')
parser.add_argument('-f', '--format', nargs='+', default='npz', type=str, help='Format of output array (mat or npz)')
parser.add_argument('-nd', '--ndimensions', default=1, type=int, help='Number of dimensions of trajectory.')
# Define transition matrix. Either provide your own, or let this script generate one
parser.add_argument('-T', '--transition_matrix', nargs='+', action='append', type=float, help='Define a transition '
'matrix. If provided, this will be used to determine the number of states. Each row of the '
'transition matrix should be passed with a separate -T flag')
parser.add_argument('-s', '--nstates', default=3, type=int, help='Number of states to switch between')
parser.add_argument('-slip', '--slip', default=0.01, type=float, help='Determines how frequently things will '
'switch states. A slip of 0 results in a transition matrix with only ones on the diagonal. '
'Higher values of slip will give smaller and smaller ratios of diagonals to the rest.')
# Autoregressive parameters
parser.add_argument('-r', '--order', default=1, type=int, help='Autoregressive order (number of time lags that '
'yt depends on.')
parser.add_argument('-phis', '--phis', nargs='+', action='append', type=float, help='Define autoregressive'
'coefficients for each state. Coefficients for each state should be passed in order with '
'separate -phis flags. If this is not specified, the coefficients will be randomly generated'
'for you.')
# noise parameters
parser.add_argument('-cov', '--covariance', nargs='+', action='append', type=float, help='Covariance matrix for '
'each state. Pass matrix for each state with a separate flag.')
# phantom state linking
parser.add_argument('-l', '--link', action="store_true", help='Link together the independent trajetories with a'
'phantom state in between.')
parser.add_argument('-pl', '--phantom_length', default=100, type=int, help='Number of time steps for phantom state '
'linkage')
return parser
class StateError(Exception):
""" Raised if an undefined reaction is attempted """
def __init__(self, message):
super().__init__(message)
class GenARData:
def __init__(self, params=None, dim=3, transition_matrix=None, phis=None, nstates=None,
slip=0.25, order=1, cov=None, stdmax=1, mu=None):
""" Given a transition matrix, generate timeseries using Markov Chains
:param params: a dictionary containing parameters of an AR HMM process. This is how one should pass the
parameters generated by hdphmm.InfiniteHMM stored in InfiniteHMM.converged_params.
:param dim: number of dimensions of trajectory data
:param transition_matrix: a list of N N-length lists. Each list represents a row of the transition matrix. If \
None or False is passed, a transition matrix will be generated randomly
:param phis: a list of N order-length lists of autoregressive coefficients for each state. In order of phi_1, \
phi_2 etc. If None, these will be randomly generated
:param nstates: number of states
:param slip: determines ratio of diagonal elements to the rest. A higher 'high' will give you a smaller ratio \
of diagonals to the rest and vice versa. A high of zero will result in an identity matrix, meaning there will \
be no transitions from the initial state
:param order: autoregressive order. Only specified if type is 'AR'
:param cov: covariance matrix of multivariate Gaussian white noise for each state. If None, a random covariance
matrix will be generated
:param stdmax: maximum standard deviation of Gaussian white noise. This is only used if stds=None
:type params: dict
:type dim: int
:type transition_matrix: list of lists
:type nstates: int
:type slip: float
:type order: int
:type stds: list
:type stdmax: float
:type phis: list of lists
"""
self.T = None
self.dwells = []
self.hops = []
if params is not None:
self.T = params['T']
self.nstates = self.T.shape[1]
self.phis = params['A']
self.cov = params['sigma']
self.pi_init = params['pi_init']
self.dim = self.cov.shape[1]
self.order = self.phis.shape[1]
self.mu = params['mu']
#self.count_matrix = self._get_count_matrix(params['z'])
else:
self.dim = dim
self.order = order
if transition_matrix is not None:
self.T = np.array(transition_matrix)
self.nstates = self.T.shape[1] # define number of states based on provided transition matrix
else:
if not nstates:
raise StateError("If no transition matrix is provided, the number of states must be specified")
self.nstates = nstates
self.generate_transition_matrix(slip)
self.phis = np.zeros([self.nstates, order, dim, dim])
if phis is not None:
# only works for r = 1
for s in range(self.nstates):
try:
self.phis[s, 0, ...] = np.array(phis[s]).reshape(dim, dim)
except IndexError:
raise IndexError('You have not provided enough phi matrices for the number of requested states')
else:
# NOTE: for multidimensional case, off-diagonal terms in each phi coefficient matrix are set to zero.
# I'm not sure what the stabilty rules are for the multidimensional case
self.phis = np.zeros([1, order, dim, dim, self.nstates])
for s in range(self.nstates):
self.phis[0, s, ...] = generate_ar_parameters(order, dim)
self.cov = np.zeros([self.nstates, dim, dim])
if cov is None:
for s in range(self.nstates):
A = np.random.uniform(0, stdmax, size=(dim, dim))
self.cov[0, ..., s] = A @ A.T
else:
for s in range(self.nstates):
self.cov[s, ...] = np.array(cov[s]).reshape(dim, dim)
self.mu = np.zeros([self.nstates, self.dim])
if mu is not None:
for s in range(self.nstates):
self.mu[s, :] = mu[s]
self.state_labels = np.arange(self.nstates)
self.traj = None
self.state_sequence = None
def _get_count_matrix(self, z):
nclusters = len(np.unique(z))
# need A, sigma, transition matrix, pi_init
count_matrix = np.zeros([nclusters, nclusters])
found_states = np.unique(z)
ndx_dict = {found_states[i]: i for i in range(len(found_states))}
count_matrix = np.zeros([nclusters, nclusters])
for frame in range(1, nT - 1): # start at frame 1. May need to truncate more as equilibration
transitioned_from = [ndx_dict[i] for i in ihmm_final.z[:, frame - 1]]
transitioned_to = [ndx_dict[i] for i in ihmm_final.z[:, frame]]
for pair in zip(transitioned_from, transitioned_to):
count_matrix[pair[0], pair[1]] += 1
# for frame in range(1, z.shape[1]): # start at frame 1. May need to truncate more as equilibration
# transitioned_from = z[:, frame - 1]
# transitioned_to = z[:, frame]
# for pair in zip(transitioned_from, transitioned_to):
# count_matrix[pair[0], pair[1]] += 1
return count_matrix
def generate_transition_matrix(self, high):
""" generate a semi-random transition matrix
:param high: determines ratio of diagonal elements to the rest. A higher 'high' will give you a smaller ratio of
diagonals to the rest and vice versa. A high of zero will result in an identity matrix, meaning there will be no
transitions from the initial state
:type high: float
"""
T = np.eye(self.nstates) # start with identify matrix
T += np.random.uniform(0, high, size=(self.nstates, self.nstates)) # add random draws from uniform distribution
self.T = T / T.sum(axis=1, keepdims=1) # normalize so sum of rows is 1
def gen_trajectory(self, ndraws, ntraj, bound_dimensions=None, progress=True, state_no=None, resample_T=False,
alpha=1):
""" Generate time series with chosen underlying dynamics
:param ndraws: number of sequential points to generate
:param ntraj: number of independent trajectories to generate
:param unbound_dimensions: indices of dimensions whose mean should not be fixed.
:param progress: show progress bar
:param state_no: if not None, generate a trajectory for the given state index
:param resample_T: resample the rows of the transition matrix by drawing from a dirichlet process
:param alpha: multiply rows of transition matrix by this number
:type ndraws: int
:type ntraj: int
:type unbound_dimensions: NoneType, list or np.ndarray
:type progress: bool
:type state_no: int or NoneType
:type resample_T: bool
:type alpha: int or float
"""
return self._gen_ar_hmm(ndraws, ntraj, bound_dimensions=bound_dimensions, state_no=state_no, progress=progress,
resample_T=resample_T, alpha=alpha)
#return self._gen_ar_hmm_fixed_distance(10)
def _gen_ar_hmm_fixed_distance(self, length, bound_dimensions=None):
""" Generate a mean-zero autoregressive timeseries based on the transition matrix and autoregressive parameters.
The timeseries is defined as:
yt = \sum_{n=1}^{r} phi_n * y_{t-n} + \epsilon_t
where r is autoregressive order and \epsilon_t is Gaussian white noise with state-dependent variance
:param ndraws: number of points to generate for timeseries
:param phis: autoregressive coefficients for each state (n_phis x n_states)
:type ndraws: int
:type phis: np.ndarray
"""
state_labels = np.arange(self.nstates)
current_state = np.random.choice(state_labels, p=self.pi_init)
previous_state = current_state
mean = np.zeros(self.dim)
traj = [np.array([0, 0]) for i in range(self.order)] # r, z
zeroed = [np.array([0, 0]) for i in range(self.order)]
# traj = np.zeros([self.order + 1, self.dim])
# zeroed = np.zeros_like(traj)
tlen = self.order
n = 0
# while np.abs(traj[-1][1]) < length:
self.traj = np.zeros([4807, 100, 2])
for i in | |
:param async_req bool
:param str id: (required)
:param list[ApiParameter] parameters:
:param str run_name: name to identify the run on the Kubeflow Pipelines UI, defaults to component name
:return: ApiRunCodeResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'parameters', 'run_name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method run_dataset" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `run_dataset`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'run_name' in params:
query_params.append(('run_name', params['run_name'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'parameters' in params:
body_params = params['parameters']
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/datasets/{id}/run', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ApiRunCodeResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def set_featured_datasets(self, dataset_ids, **kwargs): # noqa: E501
"""set_featured_datasets # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_featured_datasets(dataset_ids, async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[str] dataset_ids: Array of dataset IDs to be featured. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.set_featured_datasets_with_http_info(dataset_ids, **kwargs) # noqa: E501
else:
(data) = self.set_featured_datasets_with_http_info(dataset_ids, **kwargs) # noqa: E501
return data
def set_featured_datasets_with_http_info(self, dataset_ids, **kwargs): # noqa: E501
"""set_featured_datasets # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_featured_datasets_with_http_info(dataset_ids, async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[str] dataset_ids: Array of dataset IDs to be featured. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['dataset_ids'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method set_featured_datasets" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'dataset_ids' is set
if ('dataset_ids' not in params or
params['dataset_ids'] is None):
raise ValueError("Missing the required parameter `dataset_ids` when calling `set_featured_datasets`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'dataset_ids' in params:
body_params = params['dataset_ids']
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/datasets/featured', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def upload_dataset(self, uploadfile, **kwargs): # noqa: E501
"""upload_dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upload_dataset(uploadfile, async_req=True)
>>> result = thread.get()
:param async_req bool
:param file uploadfile: The dataset YAML file to upload. Can be a GZip-compressed TAR file (.tgz, .tar.gz) or a YAML file (.yaml, .yml). Maximum size is 32MB. (required)
:param str name:
:return: ApiDataset
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.upload_dataset_with_http_info(uploadfile, **kwargs) # noqa: E501
else:
(data) = self.upload_dataset_with_http_info(uploadfile, **kwargs) # noqa: E501
return data
def upload_dataset_with_http_info(self, uploadfile, **kwargs): # noqa: E501
"""upload_dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upload_dataset_with_http_info(uploadfile, async_req=True)
>>> result = thread.get()
:param async_req bool
:param file uploadfile: The dataset YAML file to upload. Can be a GZip-compressed TAR file (.tgz, .tar.gz) or a YAML file (.yaml, .yml). Maximum size is 32MB. (required)
:param str name:
:return: ApiDataset
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['uploadfile', 'name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method upload_dataset" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'uploadfile' is set
if ('uploadfile' not in params or
params['uploadfile'] is None):
raise ValueError("Missing the required parameter `uploadfile` when calling `upload_dataset`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'name' in params:
query_params.append(('name', params['name'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
if 'uploadfile' in params:
local_var_files['uploadfile'] = params['uploadfile'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/datasets/upload', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ApiDataset', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def upload_dataset_file(self, id, uploadfile, **kwargs): # noqa: E501
"""upload_dataset_file # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upload_dataset_file(id, uploadfile, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: The id of the dataset. (required)
:param file uploadfile: The file to upload, overwriting existing. Can be a GZip-compressed TAR file (.tgz), a YAML file (.yaml), Python script (.py), or Markdown file (.md) (required)
:return: ApiDataset
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.upload_dataset_file_with_http_info(id, uploadfile, **kwargs) # noqa: E501
else:
(data) = self.upload_dataset_file_with_http_info(id, uploadfile, **kwargs) # noqa: E501
return data
def upload_dataset_file_with_http_info(self, id, uploadfile, **kwargs): # noqa: E501
"""upload_dataset_file # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upload_dataset_file_with_http_info(id, uploadfile, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: The id of the dataset. (required)
:param file uploadfile: The file to upload, overwriting existing. Can be a GZip-compressed TAR file (.tgz), a YAML file (.yaml), Python script (.py), or Markdown file (.md) (required)
:return: ApiDataset
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'uploadfile'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method upload_dataset_file" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `upload_dataset_file`") # noqa: E501
# verify the required parameter 'uploadfile' is set
if ('uploadfile' not in params or
params['uploadfile'] is None):
raise ValueError("Missing the required parameter `uploadfile` when calling `upload_dataset_file`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'uploadfile' in params:
local_var_files['uploadfile'] = params['uploadfile'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/datasets/{id}/upload', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ApiDataset', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def upload_dataset_from_url(self, url, **kwargs): # noqa: E501
"""upload_dataset_from_url # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upload_dataset_from_url(url, async_req=True)
>>> result = thread.get()
:param async_req bool
:param | |
9 * m.b1400)
m.e1 = Constraint(expr= m.x1 - 0.2 * m.x141 == 0)
m.e2 = Constraint(expr= m.x2 - 0.2 * m.x142 == 0)
m.e3 = Constraint(expr= m.x3 - 0.2 * m.x143 == 0)
m.e4 = Constraint(expr= m.x4 - 0.2 * m.x144 == 0)
m.e5 = Constraint(expr= m.x5 - 0.2 * m.x145 == 0)
m.e6 = Constraint(expr= m.x6 - 0.2 * m.x146 == 0)
m.e7 = Constraint(expr= m.x7 - 0.2 * m.x147 == 0)
m.e8 = Constraint(expr= m.x8 - 0.2 * m.x148 == 0)
m.e9 = Constraint(expr= m.x9 - 0.2 * m.x149 == 0)
m.e10 = Constraint(expr= m.x10 - 0.2 * m.x150 == 0)
m.e11 = Constraint(expr= m.x11 - 0.2 * m.x151 == 0)
m.e12 = Constraint(expr= m.x12 - 0.2 * m.x152 == 0)
m.e13 = Constraint(expr= m.x13 - 0.2 * m.x153 == 0)
m.e14 = Constraint(expr= m.x14 - 0.2 * m.x154 == 0)
m.e15 = Constraint(expr= m.x15 - 0.2 * m.x155 == 0)
m.e16 = Constraint(expr= m.x16 - 0.2 * m.x156 == 0)
m.e17 = Constraint(expr= m.x17 - 0.2 * m.x157 == 0)
m.e18 = Constraint(expr= m.x18 - 0.2 * m.x158 == 0)
m.e19 = Constraint(expr= m.x19 - 0.2 * m.x159 == 0)
m.e20 = Constraint(expr= m.x20 - 0.2 * m.x160 == 0)
m.e21 = Constraint(expr= m.x21 - 0.5 * m.x161 == 0)
m.e22 = Constraint(expr= m.x22 - 0.5 * m.x162 == 0)
m.e23 = Constraint(expr= m.x23 - 0.5 * m.x163 == 0)
m.e24 = Constraint(expr= m.x24 - 0.5 * m.x164 == 0)
m.e25 = Constraint(expr= m.x25 - 0.5 * m.x165 == 0)
m.e26 = Constraint(expr= m.x26 - 0.5 * m.x166 == 0)
m.e27 = Constraint(expr= m.x27 - 0.5 * m.x167 == 0)
m.e28 = Constraint(expr= m.x28 - 0.5 * m.x168 == 0)
m.e29 = Constraint(expr= m.x29 - 0.7 * m.x169 == 0)
m.e30 = Constraint(expr= m.x30 - 0.7 * m.x170 == 0)
m.e31 = Constraint(expr= m.x31 - 0.7 * m.x171 == 0)
m.e32 = Constraint(expr= m.x32 - 0.7 * m.x172 == 0)
m.e33 = Constraint(expr= m.x33 - 0.7 * m.x173 == 0)
m.e34 = Constraint(expr= m.x34 - 0.7 * m.x174 == 0)
m.e35 = Constraint(expr= m.x35 - 0.7 * m.x175 == 0)
m.e36 = Constraint(expr= m.x36 - 0.7 * m.x176 == 0)
m.e37 = Constraint(expr= m.x37 - 1.2 * m.x177 == 0)
m.e38 = Constraint(expr= m.x38 - 1.2 * m.x178 == 0)
m.e39 = Constraint(expr= m.x39 - 1.2 * m.x179 == 0)
m.e40 = Constraint(expr= m.x40 - 1.2 * m.x180 == 0)
m.e41 = Constraint(expr= m.x41 - 1.2 * m.x181 == 0)
m.e42 = Constraint(expr= m.x42 - 1.2 * m.x182 == 0)
m.e43 = Constraint(expr= m.x43 - 1.2 * m.x183 == 0)
m.e44 = Constraint(expr= m.x44 - 1.2 * m.x184 == 0)
m.e45 = Constraint(expr= m.x45 - 0.5 * m.x185 == 0)
m.e46 = Constraint(expr= m.x46 - 0.5 * m.x186 == 0)
m.e47 = Constraint(expr= m.x47 - 0.5 * m.x187 == 0)
m.e48 = Constraint(expr= m.x48 - 0.5 * m.x188 == 0)
m.e49 = Constraint(expr= m.x49 - 0.7 * m.x189 == 0)
m.e50 = Constraint(expr= m.x50 - 0.7 * m.x190 == 0)
m.e51 = Constraint(expr= m.x51 - 0.7 * m.x191 == 0)
m.e52 = Constraint(expr= m.x52 - 0.7 * m.x192 == 0)
m.e53 = Constraint(expr= m.x53 - 1.2 * m.x193 == 0)
m.e54 = Constraint(expr= m.x54 - 1.2 * m.x194 == 0)
m.e55 = Constraint(expr= m.x55 - 1.2 * m.x195 == 0)
m.e56 = Constraint(expr= m.x56 - 1.2 * m.x196 == 0)
m.e57 = Constraint(expr= m.x57 - 1.2 * m.x197 == 0)
m.e58 = Constraint(expr= m.x58 - 1.2 * m.x198 == 0)
m.e59 = Constraint(expr= m.x59 - 1.2 * m.x199 == 0)
m.e60 = Constraint(expr= m.x60 - 1.2 * m.x200 == 0)
m.e61 = Constraint(expr= m.x61 - 1.2 * m.x201 == 0)
m.e62 = Constraint(expr= m.x62 - 1.2 * m.x202 == 0)
m.e63 = Constraint(expr= m.x63 - 1.2 * m.x203 == 0)
m.e64 = Constraint(expr= m.x64 - 1.2 * m.x204 == 0)
m.e65 = Constraint(expr= m.x65 - 1.2 * m.x205 == 0)
m.e66 = Constraint(expr= m.x66 - 1.2 * m.x206 == 0)
m.e67 = Constraint(expr= m.x67 - 1.2 * m.x207 == 0)
m.e68 = Constraint(expr= m.x68 - 1.2 * m.x208 == 0)
m.e69 = Constraint(expr= m.x69 - 0.3 * m.x209 == 0)
m.e70 = Constraint(expr= m.x70 - 0.3 * m.x210 == 0)
m.e71 = Constraint(expr= m.x71 - 0.3 * m.x211 == 0)
m.e72 = Constraint(expr= m.x72 - 0.3 * m.x212 == 0)
m.e73 = Constraint(expr= m.x73 - 0.9 * m.x213 == 0)
m.e74 = Constraint(expr= m.x74 - 0.9 * m.x214 == 0)
m.e75 = Constraint(expr= m.x75 - 0.9 * m.x215 == 0)
m.e76 = Constraint(expr= m.x76 - 0.9 * m.x216 == 0)
m.e77 = Constraint(expr= m.x77 - 0.3 * m.x217 == 0)
m.e78 = Constraint(expr= m.x78 - 0.3 * m.x218 == 0)
m.e79 = Constraint(expr= m.x79 - 0.3 * m.x219 == 0)
m.e80 = Constraint(expr= m.x80 - 0.3 * m.x220 == 0)
m.e81 = Constraint(expr= m.x81 - 0.9 * m.x221 == 0)
m.e82 = Constraint(expr= m.x82 - 0.9 * m.x222 == 0)
m.e83 = Constraint(expr= m.x83 - 0.9 * m.x223 == 0)
m.e84 = Constraint(expr= m.x84 - 0.9 * m.x224 == 0)
m.e85 = Constraint(expr= m.x85 - 0.4 * m.x225 == 0)
m.e86 = Constraint(expr= m.x86 - 0.4 * m.x226 == 0)
m.e87 = Constraint(expr= m.x87 - 0.4 * m.x227 == 0)
m.e88 = Constraint(expr= m.x88 - 0.4 * m.x228 == 0)
m.e89 = Constraint(expr= m.x89 - 0.4 * m.x229 == 0)
m.e90 = Constraint(expr= m.x90 - 0.4 * m.x230 == 0)
m.e91 = Constraint(expr= m.x91 - 0.4 * m.x231 == 0)
m.e92 = Constraint(expr= m.x92 - 0.4 * m.x232 == 0)
m.e93 = Constraint(expr= m.x93 - 0.4 * m.x233 == 0)
m.e94 = Constraint(expr= m.x94 - 0.4 * m.x234 == 0)
m.e95 = Constraint(expr= m.x95 - 0.4 * m.x235 == 0)
m.e96 = Constraint(expr= m.x96 - 0.4 * m.x236 == 0)
m.e97 = Constraint(expr= m.x97 - 1.6 * m.x237 == 0)
m.e98 = Constraint(expr= m.x98 - 1.6 * m.x238 == 0)
m.e99 = Constraint(expr= m.x99 - 1.6 * m.x239 == 0)
m.e100 = Constraint(expr= m.x100 - 1.6 * m.x240 == 0)
m.e101 = Constraint(expr= m.x101 - 1.6 * m.x241 == 0)
m.e102 = Constraint(expr= m.x102 - 1.6 * m.x242 == 0)
m.e103 = Constraint(expr= m.x103 - 1.6 * m.x243 == 0)
m.e104 = Constraint(expr= m.x104 - 1.6 * m.x244 == 0)
m.e105 = Constraint(expr= m.x105 - 1.1 * m.x245 == 0)
m.e106 = Constraint(expr= m.x106 - 1.1 * m.x246 == 0)
m.e107 = Constraint(expr= m.x107 - 1.1 * m.x247 == 0)
m.e108 = Constraint(expr= m.x108 - 1.1 * m.x248 == 0)
m.e109 = Constraint(expr= m.x109 - 1.1 * m.x249 == 0)
m.e110 = Constraint(expr= m.x110 - 1.1 * m.x250 == 0)
m.e111 = Constraint(expr= m.x111 - 1.1 * m.x251 == 0)
m.e112 = Constraint(expr= m.x112 - 1.1 * m.x252 == 0)
m.e113 = Constraint(expr= m.x113 - 0.7 * m.x253 == 0)
m.e114 = Constraint(expr= m.x114 - 0.7 * m.x254 == 0)
m.e115 = Constraint(expr= m.x115 - 0.7 * m.x255 == 0)
m.e116 = Constraint(expr= m.x116 - 0.7 * m.x256 == 0)
m.e117 = Constraint(expr= m.x117 - 0.7 * m.x257 == 0)
m.e118 = Constraint(expr= m.x118 - 0.7 * m.x258 == 0)
m.e119 = Constraint(expr= m.x119 - 0.7 * m.x259 == 0)
m.e120 = Constraint(expr= m.x120 - 0.7 * m.x260 == 0)
m.e121 = Constraint(expr= m.x121 - 0.7 * m.x261 == 0)
m.e122 = Constraint(expr= m.x122 - 0.7 * m.x262 == 0)
m.e123 = Constraint(expr= m.x123 - 0.7 * m.x263 == 0)
m.e124 = Constraint(expr= m.x124 - 0.7 * m.x264 == 0)
m.e125 = Constraint(expr= m.x125 - 0.2 * m.x265 == 0)
m.e126 = Constraint(expr= m.x126 - 0.2 * m.x266 == 0)
m.e127 = Constraint(expr= m.x127 - 0.2 * m.x267 == 0)
m.e128 = Constraint(expr= m.x128 - 0.2 * m.x268 == 0)
m.e129 = Constraint(expr= m.x129 - 0.7 * m.x269 == 0)
m.e130 = Constraint(expr= m.x130 - 0.7 * m.x270 == 0)
m.e131 = Constraint(expr= m.x131 - 0.7 * m.x271 == 0)
m.e132 = Constraint(expr= m.x132 - 0.7 * m.x272 == 0)
m.e133 = Constraint(expr= m.x133 - 0.3 * m.x273 == 0)
m.e134 = Constraint(expr= m.x134 - 0.3 * m.x274 == 0)
m.e135 = Constraint(expr= m.x135 - 0.3 * m.x275 == 0)
m.e136 = Constraint(expr= m.x136 - 0.3 * m.x276 == 0)
m.e137 = Constraint(expr= m.x137 - 0.9 * m.x277 == 0)
m.e138 = Constraint(expr= m.x138 - 0.9 * m.x278 == 0)
m.e139 = Constraint(expr= m.x139 - 0.9 * m.x279 == 0)
m.e140 = Constraint(expr= m.x140 - 0.9 * m.x280 == 0)
m.e141 = Constraint(expr= m.x101 >= 1.2)
m.e142 = Constraint(expr= m.x102 >= 1.15)
m.e143 = Constraint(expr= m.x103 >= 1.1)
m.e144 = Constraint(expr= | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# flake8: noqa: E501
class TcgaCodes(object):
DISEASE_STUDIES = {
# 'Study Abbreviation': 'Study Name',
'LAML': 'Acute Myeloid Leukemia',
'ACC': 'Adrenocortical carcinoma',
'BLCA': 'Bladder Urothelial Carcinoma',
'LGG': 'Brain Lower Grade Glioma',
'BRCA': 'Breast invasive carcinoma',
'CESC': 'Cervical squamous cell carcinoma and endocervical adenocarcinoma',
'CHOL': 'Cholangiocarcinoma',
'LCML': 'Chronic Myelogenous Leukemia',
'COAD': 'Colon adenocarcinoma',
'CNTL': 'Controls',
'ESCA': 'Esophageal carcinoma ',
'FPPP': 'FFPE Pilot Phase II',
'GBM': 'Glioblastoma multiforme',
'HNSC': 'Head and Neck squamous cell carcinoma',
'KICH': 'Kidney Chromophobe',
'KIRC': 'Kidney renal clear cell carcinoma',
'KIRP': 'Kidney renal papillary cell carcinoma',
'LIHC': 'Liver hepatocellular carcinoma',
'LUAD': 'Lung adenocarcinoma',
'LUSC': 'Lung squamous cell carcinoma',
'DLBC': 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma',
'MESO': 'Mesothelioma',
'MISC': 'Miscellaneous',
'OV': 'Ovarian serous cystadenocarcinoma',
'PAAD': 'Pancreatic adenocarcinoma',
'PCPG': 'Pheochromocytoma and Paraganglioma',
'PRAD': 'Prostate adenocarcinoma',
'READ': 'Rectum adenocarcinoma',
'SARC': 'Sarcoma',
'SKCM': 'Skin Cutaneous Melanoma',
'STAD': 'Stomach adenocarcinoma',
'TGCT': 'Testicular Germ Cell Tumors',
'THYM': 'Thymoma',
'THCA': 'Thyroid carcinoma',
'UCS': 'Uterine Carcinosarcoma',
'UCEC': 'Uterine Corpus Endometrial Carcinoma',
'UVM': 'Uveal Melanoma',
}
REPOSITORY_TYPES = {
'bcr', # 'Biospecimen Core Resource'
'cgcc',
'gsc',
}
DATA_PROVIDERS = {
'biotab', # Clinical metadata, skip
'intgen.org',
'nationwidechildrens.org',
'genome.wustl.edu',
'supplemental' # unknown, appears under 'tumor/ov/bcr/', skip
}
DATA_TYPES = {
'bio', # XML format clinical metadata, skip
'biotab', # CSV format clinical metadata, skip
'pathology_reports', # PDF format pathology reports, skip
'diagnostic_images', # SVS format images, use
'tissue_images', # SVS format images, use
'minbio' # unknown, appears under 'tumor/gbm/bcr/intgen.org/', skip
}
SLIDE_LOCATION = {
'TS': 'Top Slide',
'MS': 'Middle Slide',
'BS': 'Bottom Slide',
'DX': 'Top Slide',
}
TISSUE_SOURCE_SITE = {
# 'TSS Code': ('Source Site', 'Study Name', 'BCR'),
'01': ('International Genomics Consortium', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'02': ('MD Anderson Cancer Center', 'Glioblastoma multiforme', 'IGC'),
'04': ('Gynecologic Oncology Group', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'05': ('Indivumed', 'Lung adenocarcinoma', 'IGC'),
'06': ('Henry Ford Hospital', 'Glioblastoma multiforme', 'IGC'),
'07': ('TGen', 'Cell Line Control', 'IGC'),
'08': ('UCSF', 'Glioblastoma multiforme', 'IGC'),
'09': ('UCSF', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'10': ('MD Anderson Cancer Center', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'11': ('MD Anderson Cancer Center', 'Lung squamous cell carcinoma', 'IGC'),
'12': ('Duke', 'Glioblastoma multiforme', 'IGC'),
'13': ('Memorial Sloan Kettering', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'14': ('Emory University', 'Glioblastoma multiforme', 'IGC'),
'15': ('Mayo Clinic - Rochester', 'Glioblastoma multiforme', 'IGC'),
'16': ('Toronto Western Hospital', 'Glioblastoma multiforme', 'IGC'),
'17': ('Washington University', 'Lung adenocarcinoma', 'IGC'),
'18': ('Princess Margaret Hospital (Canada)', 'Lung squamous cell carcinoma', 'IGC'),
'19': ('Case Western', 'Glioblastoma multiforme', 'IGC'),
'1Z': ('Johns Hopkins', 'Thymoma', 'NCH'),
'20': ('Fox Chase Cancer Center', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'21': ('Fox Chase Cancer Center', 'Lung squamous cell carcinoma', 'IGC'),
'22': ('Mayo Clinic - Rochester', 'Lung squamous cell carcinoma', 'IGC'),
'23': ('<NAME>', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'24': ('Washington University', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'25': ('<NAME> - Rochester', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'26': ('University of Florida', 'Glioblastoma multiforme', 'IGC'),
'27': ('Milan - Italy, Fondazione IRCCS Instituto Neuroligico C. Besta', 'Glioblastoma multiforme', 'IGC'),
'28': ('<NAME>', 'Glioblastoma multiforme', 'IGC'),
'29': ('Duke', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'2A': ('Memorial Sloan Kettering Cancer Center', 'Prostate adenocarcinoma', 'NCH'),
'2E': ('University of Kansas Medical Center', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'2F': ('Erasmus MC', 'Bladder Urothelial Carcinoma', 'NCH'),
'2G': ('Erasmus MC', 'Testicular Germ Cell Tumors', 'NCH'),
'2H': ('Erasmus MC', 'Esophageal carcinoma ', 'NCH'),
'2J': ('<NAME>', 'Pancreatic adenocarcinoma', 'NCH'),
'2K': ('Greenville Health System', 'Kidney renal papillary cell carcinoma', 'NCH'),
'2L': ('Technical University of Munich', 'Pancreatic adenocarcinoma', 'NCH'),
'2M': ('Technical University of Munich', 'Esophageal carcinoma ', 'NCH'),
'2N': ('Technical University of Munich', 'Stomach adenocarcinoma', 'NCH'),
'2P': ('University of California San Diego', 'Pancreatic adenocarcinoma', 'NCH'),
'2V': ('University of California San Diego', 'Liver hepatocellular carcinoma', 'NCH'),
'2W': ('University of New Mexico', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'2X': ('ABS IUPUI', 'Testicular Germ Cell Tumors', 'NCH'),
'2Y': ('Moffitt Cancer Center', 'Liver hepatocellular carcinoma', 'NCH'),
'2Z': ('Moffitt Cancer Center', 'Kidney renal papillary cell carcinoma', 'NCH'),
'30': ('Harvard', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'31': ('Imperial College', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'32': ('St. Joseph\'s Hospital (AZ)', 'Glioblastoma multiforme', 'IGC'),
'33': ('Johns Hopkins', 'Lung squamous cell carcinoma', 'IGC'),
'34': ('University of Pittsburgh', 'Lung squamous cell carcinoma', 'IGC'),
'35': ('Cureline', 'Lung adenocarcinoma', 'IGC'),
'36': ('BC Cancer Agency', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'37': ('Cureline', 'Lung squamous cell carcinoma', 'IGC'),
'38': ('UNC', 'Lung adenocarcinoma', 'IGC'),
'39': ('MSKCC', 'Lung squamous cell carcinoma', 'IGC'),
'3A': ('Moffitt Cancer Center', 'Pancreatic adenocarcinoma', 'NCH'),
'3B': ('Moffitt Cancer Center', 'Sarcoma', 'NCH'),
'3C': ('Columbia University', 'Breast invasive carcinoma', 'NCH'),
'3E': ('Columbia University', 'Pancreatic adenocarcinoma', 'NCH'),
'3G': ('MD Anderson Cancer Center', 'Thymoma', 'NCH'),
'3H': ('MD Anderson Cancer Center', 'Mesothelioma', 'NCH'),
'3J': ('Carle Cancer Center', 'Breast invasive carcinoma', 'NCH'),
'3K': ('Boston Medical Center', 'Liver hepatocellular carcinoma', 'NCH'),
'3L': ('Albert Einstein Medical Center', 'Colon adenocarcinoma', 'NCH'),
'3M': ('University of Kansas Medical Center', 'Stomach adenocarcinoma', 'NCH'),
'3N': ('Greenville Health System', 'Skin Cutaneous Melanoma', 'NCH'),
'3P': ('Greenville Health System', 'Ovarian serous cystadenocarcinoma', 'NCH'),
'3Q': ('Greenville Health Systems', 'Thymoma', 'NCH'),
'3R': ('University of New Mexico', 'Sarcoma', 'NCH'),
'3S': ('University of New Mexico', 'Thymoma', 'NCH'),
'3T': ('Emory University', 'Thymoma', 'NCH'),
'3U': ('University of Chicago', 'Mesothelioma', 'NCH'),
'3W': ('University of California San Diego', 'Sarcoma', 'NCH'),
'3X': ('Alberta Health Services', 'Cholangiocarcinoma', 'NCH'),
'3Z': ('Mary Bird Perkins Cancer Center - Our Lady of the Lake', 'Kidney renal clear cell carcinoma', 'NCH'),
'41': ('Christiana Healthcare', 'Glioblastoma multiforme', 'IGC'),
'42': ('Christiana Healthcare', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'43': ('Christiana Healthcare', 'Lung squamous cell carcinoma', 'IGC'),
'44': ('Christiana Healthcare', 'Lung adenocarcinoma', 'IGC'),
'46': ('St. Joseph\'s Medical Center (MD)', 'Lung squamous cell carcinoma', 'IGC'),
'49': ('Johns Hopkins', 'Lung adenocarcinoma', 'IGC'),
'4A': ('Mary Bird Perkins Cancer Center - Our Lady of the Lake', 'Kidney renal papillary cell carcinoma', 'NCH'),
'4B': ('Mary Bird Perkins Cancer Center - Our Lady of the Lake', 'Lung adenocarcinoma', 'NCH'),
'4C': ('Mary Bird Perkins Cancer Center - Our Lady of the Lake', 'Thyroid carcinoma', 'NCH'),
'4D': ('Molecular Response', 'Ovarian serous cystadenocarcinoma', 'NCH'),
'4E': ('Molecular Response', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'4G': ('Sapienza University of Rome', 'Cholangiocarcinoma', 'NCH'),
'4H': ('Proteogenex, Inc.', 'Breast invasive carcinoma', 'NCH'),
'4J': ('Proteogenex, Inc.', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'4K': ('Proteogenex, Inc.', 'Testicular Germ Cell Tumors', 'NCH'),
'4L': ('Proteogenex, Inc.', 'Prostate adenocarcinoma', 'NCH'),
'4N': ('<NAME>kins Cancer Center - Our Lady of the Lake', 'Colon adenocarcinoma', 'NCH'),
'4P': ('Duke University', 'Head and Neck squamous cell carcinoma', 'NCH'),
'4Q': ('Duke University', 'Sarcoma', 'NCH'),
'4R': ('Duke University', 'Liver hepatocellular carcinoma', 'NCH'),
'4S': ('Duke University', 'Prostate adenocarcinoma', 'NCH'),
'4T': ('Duke University', 'Colon adenocarcinoma', 'NCH'),
'4V': ('Hospital Louis Pradel', 'Thymoma', 'NCH'),
'4W': ('University of Miami', 'Glioblastoma multiforme', 'NCH'),
'4X': ('Yale University', 'Thymoma', 'NCH'),
'4Y': ('Medical College of Wisconsin', 'Sarcoma', 'NCH'),
'4Z': ('Barretos Cancer Hospital', 'Bladder Urothelial Carcinoma', 'NCH'),
'50': ('University of Pittsburgh', 'Lung adenocarcinoma', 'IGC'),
'51': ('UNC', 'Lung squamous cell carcinoma', 'IGC'),
'52': ('University of Miami', 'Lung squamous cell carcinoma', 'IGC'),
'53': ('University of Miami', 'Lung adenocarcinoma', 'IGC'),
'55': ('International Genomics Consortium', 'Lung adenocarcinoma', 'IGC'),
'56': ('International Genomics Consortium', 'Lung squamous cell carcinoma', 'IGC'),
'57': ('International Genomics Consortium', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'58': ('Thoraxklinik at University Hospital Heidelberg', 'Lung squamous cell carcinoma', 'IGC'),
'59': ('Roswell Park', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'5A': ('Wake Forest University', 'Cholangiocarcinoma', 'NCH'),
'5B': ('Medical College of Wisconsin', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'5C': ('Cureline', 'Liver hepatocellular carcinoma', 'NCH'),
'5D': ('University of Miami', 'Sarcoma', 'NCH'),
'5F': ('Duke University', 'Thyroid carcinoma', 'NCH'),
'5G': ('Cleveland Clinic Foundation', 'Thymoma', 'NCH'),
'5H': ('Retina Consultants Houston', 'Uveal Melanoma', 'NCH'),
'5J': ('Cureline', 'Acute Myeloid Leukemia', 'NCH'),
'5K': ('St. Joseph\'s Hospital AZ', 'Thymoma', 'NCH'),
| |
EMPTY_BOARD = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
]
EMPTY_BOARD_STRING = " \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n"
BOARD_WITH_ROW_2_COLUMN_4_ALIVE = [
# 0 1 2 3 4
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], # 0
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], # 1
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], # 2
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
]
BOARD_WITH_ROW_2_COLUMN_4_ALIVE_STRING = " \n \n * \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n"
BOARD_WITH_SQUARE_AT_ROW_2_COLUMN_4_ALIVE = [
# 0 1 2 3 4
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], # 0
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], # 1
[0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], # 2
[0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, | |
j, k, m)*y(m, n, p)
answer(i, j, k, n, p, z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# thirdranktensor . fourthranktensor
#---------------------------------------------------------------------------
def testThirdRankTensorDotFourthRankTensor(self):
for dim in dims:
r3type = eval("ThirdRankTensor%id" % dim)
r4type = eval("FourthRankTensor%id" % dim)
r5type = eval("FifthRankTensor%id" % dim)
x = fillRandom(r3type)
y = fillRandom(r4type)
result = innerProduct(x, y)
answer = r5type()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
for m in xrange(dim):
for n in xrange(dim):
for p in xrange(dim):
z = answer(i, j, m, n, p) + x(i, j, k)*y(k, m, n, p)
answer(i, j, m, n, p, z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# thirdranktensor . thirdranktensor
#---------------------------------------------------------------------------
def testThirdRankTensorDotThirdRankTensor(self):
for dim in dims:
r3type = eval("ThirdRankTensor%id" % dim)
r4type = eval("FourthRankTensor%id" % dim)
x = fillRandom(r3type)
y = fillRandom(r3type)
result = innerProduct(x, y)
answer = r4type()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
for m in xrange(dim):
for n in xrange(dim):
z = answer(i, j, m, n) + x(i, j, k)*y(k, m, n)
answer(i, j, m, n, z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#===============================================================================
# Test class for outer product.
#===============================================================================
class TestOuterProduct(unittest.TestCase):
#---------------------------------------------------------------------------
# scalar x value
#---------------------------------------------------------------------------
def testScalarOuterThing(self):
for typestring in ("Vector%id", "Tensor%id", "SymTensor%id", "ThirdRankTensor%id"):
for dim in dims:
ttype = eval(typestring % dim)
x = rangen.uniform(*ranrange)
y = fillRandom(ttype)
result = outerProduct(x, y)
answer = ttype()
for i in xrange(ttype.numElements):
answer[i] = x*y[i]
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch for %s: %s != %s" % (ttype.__name__, result, answer))
return
#---------------------------------------------------------------------------
# value x scalar
#---------------------------------------------------------------------------
def testThingOuterScalar(self):
for typestring in ("Vector%id", "Tensor%id", "SymTensor%id", "ThirdRankTensor%id"):
for dim in dims:
ttype = eval(typestring % dim)
x = rangen.uniform(*ranrange)
y = fillRandom(ttype)
result = outerProduct(y, x)
answer = ttype()
for i in xrange(ttype.numElements):
answer[i] = x*y[i]
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch for %s: %s != %s" % (ttype.__name__, result, answer))
return
#---------------------------------------------------------------------------
# vector x vector
#---------------------------------------------------------------------------
def testVectorOuterVector(self):
for dim in dims:
type = eval("Vector%id" % dim)
ttype = eval("Tensor%id" % dim)
x = fillRandom(type)
y = fillRandom(type)
result = outerProduct(x, y)
answer = ttype()
for i in xrange(dim):
for j in xrange(dim):
answer(i, j, x[i]*y[j])
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# tensor x vector
#---------------------------------------------------------------------------
def testTensorOuterVector(self):
for typestring in ("Tensor%id", "SymTensor%id"):
for dim in dims:
vtype = eval("Vector%id" % dim)
ttype = eval(typestring % dim)
trttype = eval("ThirdRankTensor%id" % dim)
x = fillRandom(ttype)
y = fillRandom(vtype)
result = outerProduct(x, y)
answer = trttype()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
answer(i, j, k, x(i,j)*y(k))
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# vector x tensor
#---------------------------------------------------------------------------
def testVectorOuterTensor(self):
for typestring in ("Tensor%id", "SymTensor%id"):
for dim in dims:
vtype = eval("Vector%id" % dim)
ttype = eval(typestring % dim)
trttype = eval("ThirdRankTensor%id" % dim)
x = fillRandom(vtype)
y = fillRandom(ttype)
result = outerProduct(x, y)
answer = trttype()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
answer(i, j, k, x(i)*y(j,k))
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#===============================================================================
# Test class for double inner product.
#===============================================================================
class TestDoubleInnerProduct(unittest.TestCase):
#---------------------------------------------------------------------------
# tensor .. tensor
#---------------------------------------------------------------------------
def testTensorDoubleDotTensor(self):
for ttypestring1 in ("Tensor%id", "SymTensor%id"):
for ttypestring2 in ("Tensor%id", "SymTensor%id"):
for dim in dims:
t1type = eval(ttypestring1 % dim)
t2type = eval(ttypestring2 % dim)
x = fillRandom(t1type)
y = fillRandom(t2type)
result = innerDoubleProduct(x, y)
result2 = x.doubledot(y)
answer = 0.0
for i in xrange(dim):
for j in xrange(dim):
answer += x(i,j)*y(j,i)
self.failUnless(abs(result - answer) < 1.0e-10, "Mismatch: %s != %s" % (result, answer))
self.failUnless(abs(result2 - answer) < 1.0e-10, "Mismatch: %s != %s" % (result2, answer))
return
#---------------------------------------------------------------------------
# tensor .. thirdranktensor
#---------------------------------------------------------------------------
def testTensorDoubleDotThirdRankTensor(self):
for ttypestring in ("Tensor%id", "SymTensor%id"):
for dim in dims:
vtype = eval("Vector%id" % dim)
r2type = eval(ttypestring % dim)
r3type = eval("ThirdRankTensor%id" % dim)
x = fillRandom(r2type)
y = fillRandom(r3type)
result = innerDoubleProduct(x, y)
answer = vtype()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
answer[k] += x(i, j)*y(j, i, k)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# thirdranktensor .. tensor
#---------------------------------------------------------------------------
def testThirdRankTensorDoubleDotTensor(self):
for ttypestring in ("Tensor%id", "SymTensor%id"):
for dim in dims:
vtype = eval("Vector%id" % dim)
r2type = eval(ttypestring % dim)
r3type = eval("ThirdRankTensor%id" % dim)
x = fillRandom(r3type)
y = fillRandom(r2type)
result = innerDoubleProduct(x, y)
answer = vtype()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
answer[i] += x(i, j, k)*y(k, j)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# thirdranktensor .. thirdranktensor
#---------------------------------------------------------------------------
def testThirdRankTensorDoubleDotThirdRankTensor(self):
for dim in dims:
r2type = eval("Tensor%id" % dim)
r3type = eval("ThirdRankTensor%id" % dim)
x = fillRandom(r3type)
y = fillRandom(r3type)
result = innerDoubleProduct(x, y)
answer = r2type()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
for m in xrange(dim):
z = answer(i,m) + x(i,j,k)*y(k,j,m)
answer(i,m,z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# tensor .. fourthranktensor
#---------------------------------------------------------------------------
def testTensorDoubleDotFourthRankTensor(self):
for ttypestring in ("Tensor%id", "SymTensor%id"):
for dim in dims:
ttype = eval("Tensor%id" % dim)
r2type = eval(ttypestring % dim)
r4type = eval("FourthRankTensor%id" % dim)
x = fillRandom(r2type)
y = fillRandom(r4type)
result = innerDoubleProduct(x, y)
answer = ttype()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
for m in xrange(dim):
z = answer(k, m) + x(i, j)*y(j, i, k, m)
answer(k, m, z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# fourthranktensor .. tensor
#---------------------------------------------------------------------------
def testFourthRankTensorDoubleDotTensor(self):
for ttypestring in ("Tensor%id", "SymTensor%id"):
for dim in dims:
ttype = eval("Tensor%id" % dim)
r2type = eval(ttypestring % dim)
r4type = eval("FourthRankTensor%id" % dim)
x = fillRandom(r4type)
y = fillRandom(r2type)
result = innerDoubleProduct(x, y)
answer = ttype()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
for m in xrange(dim):
z = answer(i, j) + x(i, j, k, m)*y(m, k)
answer(i, j, z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# thirdranktensor .. fourthranktensor
#---------------------------------------------------------------------------
def testThirdRankTensorDoubleDotFourthRankTensor(self):
for dim in dims:
r3type = eval("ThirdRankTensor%id" % dim)
r4type = eval("FourthRankTensor%id" % dim)
x = fillRandom(r3type)
y = fillRandom(r4type)
result = innerDoubleProduct(x, y)
answer = r3type()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
for m in xrange(dim):
for n in xrange(dim):
z = answer(i, m, n) + x(i, j, k)*y(k, j, m, n)
answer(i, m, n, z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# fourthranktensor .. thirdranktensor
#---------------------------------------------------------------------------
def testFourthRankTensorDoubleDotThirdRankTensor(self):
for dim in dims:
r3type = eval("ThirdRankTensor%id" % dim)
r4type = eval("FourthRankTensor%id" % dim)
x = fillRandom(r4type)
y = fillRandom(r3type)
result = innerDoubleProduct(x, y)
answer = r3type()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
for m in xrange(dim):
for n in xrange(dim):
z = answer(i, j, n) + x(i, j, k, m)*y(m, k, n)
answer(i, j, n, z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# fourthranktensor .. fourthranktensor
#---------------------------------------------------------------------------
def testFourthRankTensorDoubleDotFourthRankTensor(self):
for dim in dims:
r4type = eval("FourthRankTensor%id" % dim)
x = fillRandom(r4type)
y = fillRandom(r4type)
result = innerDoubleProduct(x, y)
answer = r4type()
for i in xrange(dim):
for j in xrange(dim):
for k in xrange(dim):
for m in xrange(dim):
for n in xrange(dim):
for p in xrange(dim):
z = answer(i, j, n, p) + x(i, j, k, m)*y(m, k, n, p)
answer(i, j, n, p, z)
self.failUnless(isEqual(result, answer, tol=tol), "Mismatch: %s != %s" % (result, answer))
return
#---------------------------------------------------------------------------
# tensor .. fifthranktensor
#---------------------------------------------------------------------------
def testTensorDoubleDotFifthRankTensor(self):
for ttypestring in ("Tensor%id", "SymTensor%id"):
for dim in dims:
r2type = eval(ttypestring % dim)
r3type = eval("ThirdRankTensor%id" % dim)
r5type = eval("FifthRankTensor%id" % dim)
| |
<reponame>ark0015/DetectorDesignSensitivities<filename>Functions/waveform_Wphase.py
import numpy as np
def Get_Waveform(source,pct_of_peak=0.01):
"""Uses Mass Ratio (q <= 18), aligned spins (abs(a/m)~0.85 or when q=1 abs(a/m)<0.98),
fitting coefficients for QNM type, and sampling rate
Returns the frequency, the Phenom amplitude of the inspiral-merger-ringdown
Uses methods found in <https://arxiv.org/abs/1508.07253> and <https://arxiv.org/abs/1508.07250>
Parameters
----------
source : object
source object from StrainandNoise, contains all source parameters
pct_of_peak : float, optional
the percentange of the strain at merger that dictates the maximum frequency the waveform is calculated at in geometrized units (G=c=1)
Returns
-------
Mf : numpy array of floats
the waveform frequencies in geometrized units (G=c=1)
fullwaveform : numpy array of floats
the waveform strain in geometrized units (G=c=1)
"""
f_low = source.f_low
N = source.nfreqs
q = source.q
x1 = source.chi1
x2 = source.chi2
fitcoeffs = source._fitcoeffs
#M = m1+m2 #Total Mass
#q = m2/m1 #Mass Ratio: Paper tested up to 18
#eta = m1*m2/M**2 reduced mass: Paper tested up to 0.05 (q=18)
eta = q/(q+1)**2
x_PN = chi_PN(eta,x1,x2) #PN reduced spin parameter
a_f = a_final(x1,x2,q,eta) #dimensionless spin
##################
#Finds f_ringdown and f_damp from fit taken from <https://arxiv.org/abs/gr-qc/0512160>
n = 0 #QNM indices
l = 2
m = 2
numn = 3 #number of n's included in the table
index = (l-2)*(2*l+1)*numn + (l-m)*numn + n
f_fit = fitcoeffs[index][3:6]
q_fit = fitcoeffs[index][6:9]
omega_RD = f_fit[0]+f_fit[1]*(1-a_f)**f_fit[2] #M omega_{lmn}
tau = 2*(q_fit[0]+q_fit[1]*(1-a_f)**q_fit[2])/omega_RD #tau_{lmn}/M = 2 Q_{lmn}/(M omega_{lmn})
########################
f_RD = omega_RD/2/np.pi
f_damp = 1/tau/2/np.pi
Gamma1 = Lambda(eta,x_PN,4)
Gamma2 = Lambda(eta,x_PN,5)
Gamma3 = Lambda(eta,x_PN,6)
f_peak = Calc_f_peak(f_RD,f_damp,[Gamma1,Gamma2,Gamma3])
f1 = 0.014
f3 = f_peak
f2 = (f1+f3)/2
f1_phase = 0.018
f2_phase = 0.5*f_RD
cutoffFreq = Find_Cutoff_Freq(f_RD,f_damp,[Gamma1,Gamma2,Gamma3],pct_of_peak=pct_of_peak)
#If lowest frequency is greater than cutoffFreq, then raise error.
if f_low >= cutoffFreq:
raise ValueError('Lower frequency bound (ie. f_low) must be lower than that of the merger ringdown.')
Mf = np.logspace(np.log10(f_low),np.log10(cutoffFreq),N)
#Mf_phase = np.logspace(log10(0.0035),log10(1.15*f_RD),N)
#Mf_phase = np.logspace(log10(0.0035),log10(0.12),N)
v1 = A_insp(f1,eta,x1,x2,x_PN)
v2 = Lambda(eta,x_PN,3)
v3 = A_MR(f3,f_RD,f_damp,[Gamma1,Gamma2,Gamma3])
fund1 = DA_insp(f1,eta,x1,x2,x_PN)
fund3 = DA_MR(f3,f_RD,f_damp,[Gamma1,Gamma2,Gamma3])
#############################
#Calculate Solutions to eqn 21 in intermediate region
Del_solns = A_intermediate(f1,f2,f3,v1,v2,v3,fund1,fund3) # Solutions to eqn 21
##############################
#Calculate all sections of waveform and Paste together
indxf1 = np.argmin(np.abs(Mf-f1))
indxfpeak = np.argmin(np.abs(Mf-f_peak))
tmpinspiral = A_norm(Mf[0:indxf1+1],eta)*A_insp(Mf[0:indxf1+1],eta,x1,x2,x_PN)
tmpintermediate = A_norm(Mf[indxf1+1:indxfpeak],eta)*A_int(Mf[indxf1+1:indxfpeak],Del_solns)
tmpmergerringdown = A_norm(Mf[indxfpeak:],eta)*A_MR(Mf[indxfpeak:],f_RD,f_damp,[Gamma1,Gamma2,Gamma3])
fullwaveform = np.hstack((tmpinspiral,tmpintermediate,tmpmergerringdown))
##############################
#Calculate all section of waveform Phase
indxf1_phase = np.argmin(np.abs(Mf-f1_phase))
indxf2_phase = np.argmin(np.abs(Mf-f2_phase))
tc=0.0
##############################
#Calculate Phase connections alpha0 and Beta0:
dphi_ins = Dphi_ins(f1_phase,eta,x1,x2,x_PN,tc)
phi_ins = Phi_ins(f1_phase,eta,x1,x2,x_PN,tc)
beta1 = eta*dphi_ins - Dphi_int(f1_phase,eta,x_PN,0.0)
beta0 = eta*phi_ins - Phi_int(f1_phase,eta,x_PN,beta1,0.0)
alpha1 = Dphi_int(f2_phase,eta,x_PN,beta1) - Dphi_MR(f2_phase,eta,x_PN,f_RD,f_damp,0.0)
alpha0 = Phi_int(f2_phase,eta,x_PN,beta1,beta0) - Phi_MR(f2_phase,eta,x_PN,f_RD,f_damp,alpha1,0.0)
dinspiral_phase = Dphi_ins(Mf[:indxf1_phase+1],eta,x1,x2,x_PN,tc)
dintermediate_phase = (1/eta)*Dphi_int(Mf[indxf1_phase+1:indxf2_phase],eta,x_PN,beta1)
dmerger_ringdown_phase = (1/eta)*Dphi_MR(Mf[indxf2_phase:],eta,x_PN,f_RD,f_damp,alpha1)
inspiral_phase = Phi_ins(Mf[0:indxf1_phase+1],eta,x1,x2,x_PN,tc)
intermediate_phase = (1/eta)*Phi_int(Mf[indxf1_phase+1:indxf2_phase],eta,x_PN,beta1,beta0)
merger_ringdown_phase = (1/eta)*Phi_MR(Mf[indxf2_phase:],eta,x_PN,f_RD,f_damp,alpha1,alpha0)
############################
#Join subsections of phase and amplitude
fullphase = np.hstack((inspiral_phase,intermediate_phase,merger_ringdown_phase))
return [Mf,fullwaveform,fullphase]
def A_norm(freqs,eta):
"""Calculates the constant scaling factor A_0
Parameters
----------
freqs : array
The frequencies in Natural units (Mf, G=c=1) of the waveform
eta : float
The reduced mass ratio
"""
const = np.sqrt(2*eta/3/np.pi**(1/3))
return const*freqs**-(7/6)
def A_insp(freqs,eta,x1,x2,X_PN):
"""Calculates the Inspiral Amplitude
Parameters
----------
freqs : array
The frequencies in Natural units (Mf, G=c=1) of the waveform
eta : float
The reduced mass ratio
x1 : float
The dimensionless spin parameter abs(a/m) for black hole m1.
x2 : float
The dimensionless spin parameter abs(a/m) for black hole m2.
x_PN : float
The PN reduced spin parameter
"""
A_PN = 0.0
A_higher = 0.0
for i in range(7):
A_PN = A_PN + PN_coeffs(eta,x1,x2,i)*(np.pi*freqs)**(i/3)
if i >= 1 and i <= 3:
A_higher = A_higher + Lambda(eta,X_PN,i-1)*freqs**((6+i)/3)
return (A_PN + A_higher)
def DA_insp(freqs,eta,x1,x2,X_PN):
"""Calculates Derivative of the inspiral amplitude.
Parameters
----------
freqs : array
The frequencies in Natural units (Mf, G=c=1) of the waveform
eta : float
The reduced mass ratio
x1 : float
The dimensionless spin parameter abs(a/m) for black hole m1.
x2 : float
The dimensionless spin parameter abs(a/m) for black hole m2.
x_PN : float
The PN reduced spin parameter
"""
DA_PN = 0.0
DA_higher = 0.0
for i in range(7):
PN_const = np.pi**(i/3)*(i/3)*PN_coeffs(eta,x1,x2,i)
DA_PN = DA_PN + PN_const*(freqs)**((i-3)/3)
if i >= 1 and i <= 3:
higher_const = ((6+i)/3)*Lambda(eta,X_PN,i-1)
DA_higher = DA_higher + higher_const*freqs**((i+3)/3)
return DA_PN + DA_higher
def A_MR(freqs,f_RD,f_damp,Gammas):
"""Calculates the Normalized Merger-Ringdown Amplitude
Parameters
----------
freqs : array
The frequencies in Natural units (Mf, G=c=1) of the waveform
f_RD : float
Frequency of the Ringdown transition
f_damp : float
Damping frequency
Gammas : array-like
Normalizes lorentzian to correct shape
"""
varf = freqs-f_RD
fg_d = Gammas[2]*f_damp
return (Gammas[0]*fg_d)/(varf**2+fg_d**2)*np.exp(-(Gammas[1]/fg_d)*varf)
def DA_MR(freqs,f_RD,f_damp,Gammas):
"""Calculates Derivative of the Merger-Ringdown Amplitude
Parameters
----------
freqs : array
The frequencies in Natural units (Mf, G=c=1) of the waveform
f_RD : float
Frequency of the Ringdown transition
f_damp : float
Damping frequency
Gammas : array-like
Normalizes lorentzian to correct shape
"""
varf = freqs-f_RD
fg_d = Gammas[2]*f_damp
A_MR_0 = A_MR(freqs,f_RD,f_damp,Gammas)
return -A_MR_0*(2*varf/(varf**2+fg_d**2)+Gammas[1]/fg_d)
def A_intermediate(f1,f2,f3,v1,v2,v3,d1,d3):
"""Solves system of equations for intermediate amplitude matching"""
Mat = np.array([[1., f1, f1**2, f1**3, f1**4],[1., f2, f2**2, f2**3, f2**4],[1., f3, f3**2, f3**3, f3**4], \
[0., 1., 2*f1, 3*f1**2, 4*f1**3],[0., 1., 2*f3, 3*f3**2, 4*f3**3]],dtype='float')
a = np.array([v1,v2,v3,d1,d3],dtype='float')
return np.linalg.solve(Mat,a)
def A_int(freqs,delt):
"""Calculates the Intermediate Amplitude
Parameters
----------
freqs : array
The frequencies in Natural units (Mf, G=c=1) of the waveform
delt : array
Coefficient solutions to match the inspiral to the merger-ringdown portion of the waveform
"""
return (delt[0]+delt[1]*freqs+delt[2]*freqs**2+delt[3]*freqs**3+delt[4]*freqs**4)
###########################################################################
#Phase portion of waveform
###########################################################################
def Phi_ins(freqs,eta,x1,x2,x_PN,t_c,phi_c,sigma0):
"""Calculates the Inspiral Phase
Parameters
----------
freqs : array
The frequencies in Natural units (Mf, G=c=1) of the waveform
eta : float
The reduced mass ratio
x1 : float
The dimensionless spin parameter abs(a/m) for black hole m1.
x2 : float
The dimensionless spin parameter abs(a/m) for black hole m2.
x_PN : float
The PN reduced spin parameter
t_c : float
Coalescence time??
"""
#t_c = 0.0 #????
#phi_c = 0.0 #????????
#sigma0 = 0.0 #???
sigma1 = Lambda(eta,x_PN,7)
sigma2 = Lambda(eta,x_PN,8)
sigma3 = Lambda(eta,x_PN,9)
sigma4 = Lambda(eta,x_PN,10)
TF2_expansion = 0.0
TF2_const = 3/(128*eta)
piMf = np.pi*freqs
for i in range(5):
#First four of summation, others need to be separate for log(pi*Mf) factors.
TF2_expansion = TF2_expansion + PN_coeffs_phase(eta,x1,x2,i)*(piMf)**((i-5)/3)
TF2_expansion = (TF2_expansion + (1+np.log10(piMf))*PN_coeffs_phase(eta,x1,x2,5) +
(PN_coeffs_phase(eta,x1,x2,6) - (6848/63)*np.log10(64*piMf))*(piMf)**(1/3) +
PN_coeffs_phase(eta,x1,x2,7)*(piMf)**(2/3))
phi_TF2 = 2*t_c*piMf - phi_c - np.pi/4 + TF2_const*TF2_expansion
return (phi_TF2 + (1/eta)*(sigma0 + sigma1*freqs + (3/4*sigma2)*freqs**(4/3) + (3/5*sigma3)*freqs**(5/3) + (.5*sigma4)*freqs**2))
def Dphi_ins(freqs,eta,x1,x2,x_PN,t_c):
"""Calculates the Inspiral Phase Derivative
Parameters
----------
freqs : array
The frequencies in Natural units (Mf, G=c=1) of the waveform
eta : float
The reduced mass ratio
x1 : float
The dimensionless spin parameter abs(a/m) for black hole m1.
x2 : float
The dimensionless spin parameter abs(a/m) for black hole m2.
x_PN : float
The PN reduced spin parameter
t_c : float
Coalescence time??
"""
#t_c = -200.0 #????
sigma1 = Lambda(eta,x_PN,7)
sigma2 = Lambda(eta,x_PN,8)
sigma3 = Lambda(eta,x_PN,9)
sigma4 = Lambda(eta,x_PN,10)
TF2_const_deriv = 3/(128*eta)
TF2_expans_deriv = 0.0
for i in range(5):
TF2_expans_deriv = TF2_expans_deriv + (PN_coeffs_phase(eta,x1,x2,i)*np.pi**((i-5)/3)*((i-5)/3))*freqs**((i-8)/3)
Dphi_TF2 = (2*np.pi*t_c + TF2_const_deriv*(TF2_expans_deriv +
PN_coeffs_phase(eta,x1,x2,4)/freqs +
(PN_coeffs_phase(eta,x1,x2,5)-(6848/63)*(3+np.log10((64*np.pi)*freqs)))*(np.pi**(1/3)/3)*freqs**(-2/3) +
((2/3)*np.pi**(2/3)*PN_coeffs_phase(eta,x1,x2,6))*freqs**(-1/3)))
return(Dphi_TF2 + (1/eta)*(sigma1 + sigma2*freqs**(1/3) + sigma3*freqs**(2/3) + sigma4*freqs))
def Phi_MR(freqs,eta,x_PN,f_RD,f_damp,alpha1,alpha0):
"""Calculates the Normalized Merger-Ringdown Phase
Parameters
----------
freqs : array
The frequencies in Natural units (Mf, G=c=1) of the waveform
eta : float
The reduced mass ratio
x_PN : float
The PN reduced spin parameter
f_RD : float
Frequency of the Ringdown transition
f_damp : float
Damping frequency
alpha1 : float
??
alpha0 : float
??
"""
###########################
#Calculates phase for merger ringdown (eqn 14)
alpha2 = Lambda(eta,x_PN,15)
alpha3 = Lambda(eta,x_PN,16)
alpha4 = Lambda(eta,x_PN,17)
alpha5 = Lambda(eta,x_PN,18)
lorentzian = (freqs - alpha5*f_RD)/f_damp
return (alpha0 + alpha1*freqs - alpha2/freqs + (4./3.*alpha3)*freqs**(.75) + alpha4*np.arctan(lorentzian))
def Dphi_MR(freqs,eta,x_PN,f_RD,f_damp,alpha1):
"""Calculates Derivative of the Merger-Ringdown Phase Derivative
Parameters
----------
freqs : array
The frequencies in Natural units (Mf, G=c=1) of the waveform
eta : float
The | |
# -*- coding: utf-8 -*-
"""
featherpmm.py: Extends feather file format with a paired file
that contains extra metadata. If the feather file is /path/to/foo.feather,
the metadata file is /path/to/foo.pmm.
"""
from __future__ import division
import os
import datetime
import numpy as np
import sys
try:
import pyarrow.feather as feather
except ImportError:
feather = None
from pmmif import pmm
PANDAS_STRING_DTYPE = np.dtype('O')
PANDAS_BOOL_DTYPE = np.dtype('bool')
PANDAS_FLOAT_DTYPE = np.dtype('float64')
NULL_STRING = '∅'
NULL_SUFFIX = '_' + NULL_STRING
UTF8 = 'UTF-8'
isPython2 = sys.version_info.major < 3
if isPython2:
bytes_type = str # type: type
unicode_type = unicode # type: type
else:
bytes_type = bytes
unicode_type = str
class Dataset(object):
"""
Container for a Pandas dataframe and a metadata object from PMM.
Metadata fulfils two functions:
1. It allows association of 'intended' types of columns, in cases
for which Pandas forces promotion. For example:
- Integer columns that contain nulls are promoted to float
in Pandas, but can be marked as 'integer' in the metadata.
- Boolean columns that contain nulls are promoted to Object
in Pandas, but can be marked as 'boolean' in the metadata.
2. It allows additional annotations to be associated with the
dataframe, and with individual columns of the dataframe.
- Tags (with optional values)
- Descriptions
A dataset object has two attributes:
- df is an ordinary Pandas dataframe
- md is a PMM Metadata object
"""
def __init__(self, df, md=None, name=None):
"""
Create Dataset object.
- df is a Pandas dataframe.
- md is a PMM metadata object, and is optional.
- name is the name to be associated with the dataset, for the
case where no metadata is provided.
If no metadata is provided, the metadata is inferred from the
dataframe.
"""
self.df = df
self.md = md or _create_pmm_metadata(df, name)
def add_field(self, name, col, pmmtype=None):
"""
Add a new field to the dataset.
Adds the field to the Pandas dataframe, and declares its type
in the PMM metadata.
The type, if provided, must be one of: 'boolean', 'integer',
'real', 'string', 'datestamp'. If the type is not specified, the
type is inferred from the dataframe.
The advantages of using add_field, as opposed to just creating it
directly in the dataframe, are:
- the option of specifying an intended type, even if Pandas has
promoted the type in the dataframe.
- creating a corresponding entry in the metadata, to facilitate
tagging, descriptions, etc.
"""
self.df[name] = col
self.declare_field(name, pmmtype)
def declare_field(self, name, pmmtype=None):
"""
Declare the type of a field in the dataset, which must already
exist in the Pandas dataframe.
This is intended for use when a dataframe has been created without
any metadata (for example, from a CSV file), and then more detailed
type information needs to be declared for existing fields.
The type, if provided, must be one of the types described for
add_field, above.
"""
if self.df[name].isnull().all(): # all null or no records
if pmmtype == 'string':
self.df[name] = self.df[name].astype(np.dtype('O'))
elif pmmtype == 'datestamp':
self.df[name] = (self.df[name]
.astype(np.dtype('datetime64[ns]')))
fieldMetadata = _create_pmm_field(self.df[name], pmmtype=pmmtype)
self.md.add_field(name, fieldMetadata)
def tag_field(self, colname, tagname, value=None):
"""
Add a tag to a field. The tag can optionally have a value, but
by default does not.
The field must already exist in the metadata.
If a value is provided, it must be of one of the following Python
types:
- None
- bool
- int
- float
- str
- datetime.datetime
or it can be a (potentially nested) list or dictionary over these
types with string keys.
"""
self.md[colname].tags[tagname] = value
def tag_dataset(self, tagname, value=None):
"""
Add a tag to the dataset. The tag can optionally have a value, but
by default does not. If a value is provided, it must have one of
the types described in tag_field above.
"""
self.md.tags[tagname] = value
def update_metadata(self):
"""
Update the metadata to bring it into line with the dataset.
After calling this method, all of the fields that exist in the
dataset will now exist in the metadata too, and the metadata will
not contain any fields that do not appear in the dataframe.
It will infer types in the metadata for any fields that do not
already have metadata, but will not alter the types of existing
fields in the metadata.
"""
_reset_fields_from_dataframe(self)
def merge_metadata(self, other, fields=None):
"""
After merging another dataset in (via pd.merge), we don't have any of
the metadata associated with the fields that have come in from that
other dataset. This function takes the 'other metadata' and brings it
into the metadata for the main dataset. The new metadata wins if the
fields exists in both if:
1. the fields parameter is left empty, or
2. the fields parameter is a list but field is not in the list
"""
newfields = [fx.name for fx in other.md.fields]
for f in newfields:
if (fields is None) or (f in fields):
if f in [fx.name for fx in self.md.fields]:
del self.md.fields[[fx.name
for fx in self.md.fields].index(f)]
_add_metadata_from_other_dataset(self.md, other.md)
_reset_fields_from_dataframe(self)
def append(self, other):
"""
Append another dataset to an existing one.
The second dataframe is appended to the first one, and the metadata
for any fields that only exist in the second one is added to the
first one.
"""
self.df = self.df.append(other.df, ignore_index=True)
_add_metadata_from_other_dataset(self.md, other.md)
_reset_fields_from_dataframe(self)
def read_dataframe(featherpath):
"""
Similar to feather.read_dataframe except that it also reads the
corresponding .pmm file, if present, and returns a Dataset
object rather than a dataframe.
The Dataset object contains the Pandas dataframe in its df attribute,
and the metadata in its md attribute.
"""
if feather is None:
raise Exception('Feather-format is not available')
df = feather.read_feather(featherpath)
pmmpath, datasetname = _split_feather_path(featherpath)
if os.path.exists(pmmpath):
md = pmm.load(pmmpath)
else:
md = _create_pmm_metadata(df, datasetname)
df = _recover_problematical_all_null_columns(Dataset(df, md))
return Dataset(df, md)
def write_dataframe(dataset, featherpath):
"""
Similar to feather.write_dataframe except that it also writes a
corresponding .pmm file, and expects a Dataset object rather than a
dataframe.
The Dataset object contains the Pandas dataframe in its df attribute,
and the metadata in its md attribute.
"""
if feather is None:
raise Exception('Feather-format is not available')
pmmpath, datasetname = _split_feather_path(featherpath)
if dataset.md is None:
dataset.md = _create_pmm_metadata(dataset.df, datasetname)
_reset_fields_from_dataframe(dataset)
df = _sanitize_problematical_all_null_columns(dataset)
try:
# feather doesn't always write file correctly if it already exists
if os.path.exists(featherpath):
os.remove(featherpath)
feather.write_feather(df, featherpath)
dataset.md.save(pmmpath)
except:
# feather leaves dud files around if it fails to write
if os.path.exists(featherpath):
os.remove(featherpath)
if os.path.exists(pmmpath):
os.remove(pmmpath)
raise
#
# The rest of the functions below are internal to this module, and should not
# be called from outside.
#
def _sanitize_problematical_all_null_columns(ds):
"""
Feather doesn't like all-null string columns or all-null boolean columns,
so this method transforms them before saving.
They are transformed into float64 fields with NaN et every value,
and the pandas column name gets a '_∅t' appended, where t is a
type indicator --- b for boolean, s for string or u for unknown.
"""
origdf, md = ds.df, ds.md
df = origdf[list(origdf)] # Copye
nTransformed = 0
fieldnames = list(df.columns)
nRecords = len(df.index)
dfIsUnicode = fieldnames and type(fieldnames[0]) == unicode_type
fn = _unicode_definite if dfIsUnicode else _utf8_definite
null_suffix = fn(NULL_SUFFIX)
for i, f in enumerate(fieldnames):
if (df[f].dtype not in (PANDAS_STRING_DTYPE, PANDAS_BOOL_DTYPE)
or df[f].notnull().sum() > 0): # includes bools with nulls
continue
if df[f].dtype == PANDAS_BOOL_DTYPE and nRecords > 0:
continue
typeChar = ('b' if md[f].type == 'boolean'
else 's' if md[f].type == 'string'
else 'u')
altname = f + null_suffix + typeChar
if altname in fieldnames:
continue # already there. Whatever...
# restore any all-null string fields
df[altname] = np.array([np.nan] * nRecords,
dtype=PANDAS_FLOAT_DTYPE)
nTransformed += 1
fieldnames[i] = altname
if nTransformed > 0:
df = df[fieldnames]
return df
def _recover_problematical_all_null_columns(ds):
"""
Feather doesn't like all-null string columns or all-null boolean columns,
so they are sanitized before saving; this untransforms them.
"""
df, md = ds.df, ds.md
nTransformed = 0
fieldnames = list(df.columns)
nRecords = len(df.index)
dfIsUnicode = fieldnames and type(fieldnames[0]) == unicode_type
fn = _unicode_definite if dfIsUnicode else _utf8_definite
null_suffix = fn(NULL_SUFFIX)
for i, f | |
<reponame>PrivateStorageio/SecureAccessTokenAuthorizer
# Copyright 2022 PrivateStorage.io, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
"""
A system for replicating local SQLite3 database state to remote storage.
Theory of Operation
===================
A function to wrap a ``sqlite3.Connection`` in a new type is provided. This
new type provides facilities for accomplishing two goals:
* It (can someday) presents an expanded connection interface which includes
the ability to switch the database into "replicated" mode. This is an
application-facing interface meant to be used when the application is ready
to discharge its responsibilities in the replication process.
* It (can someday) expose the usual cursor interface wrapped around the usual
cursor behavior combined with extra logic to record statements which change
the underlying database (DDL and DML statements). This recorded data then
feeds into the above replication process once it is enabled.
An application's responsibilities in the replication process are to arrange
for remote storage of "snapshots" and "event streams". See the
replication/recovery design document for details of these concepts.
Once replication has been enabled, the application (can someday be) informed
whenever the event stream changes (respecting database transactionality) and
data can be shipped to remote storage as desired.
It is essential to good replication performance that once replication is
enabled all database-modifying actions are captured in the event stream. This
is the reason for providing a ``sqlite3.Connection``-like object for use by
application code rather than a separate side-car interface: it minimizes the
opportunities for database changes which are overlooked by this replication
system.
"""
__all__ = [
"ReplicationAlreadySetup",
"fail_setup_replication",
"setup_tahoe_lafs_replication",
"with_replication",
"statements_to_snapshot",
"connection_to_statements",
"snapshot",
]
import os
import re
from enum import Enum
from io import BytesIO
from sqlite3 import Connection as _SQLite3Connection
from sqlite3 import Cursor as _SQLite3Cursor
from typing import (
IO,
Any,
Awaitable,
Callable,
ClassVar,
Generator,
Iterable,
Iterator,
Optional,
Protocol,
Sequence,
)
import cbor2
from attrs import Factory, define, field, frozen
from compose import compose
from eliot import log_call
from twisted.application.service import IService, Service
from twisted.internet.defer import CancelledError, Deferred, DeferredQueue, succeed
from twisted.logger import Logger
from twisted.python.filepath import FilePath
from twisted.python.lockfile import FilesystemLock
from ._types import CapStr
from .config import REPLICA_RWCAP_BASENAME, Config
from .sql import Connection, Cursor, SQLRuntimeType, SQLType, statement_mutates
from .tahoe import DataProvider, DirectoryEntry, ITahoeClient, attenuate_writecap
# function which can set remote ZKAPAuthorizer state.
Uploader = Callable[[str, DataProvider], Awaitable[None]]
# function which can remove entries from ZKAPAuthorizer state.
Pruner = Callable[[Callable[[str], bool]], Awaitable[None]]
# functions which can list all entries in ZKAPAuthorizer state
Lister = Callable[[], Awaitable[list[str]]]
EntryLister = Callable[[], Awaitable[dict[str, DirectoryEntry]]]
class SnapshotPolicy(Protocol):
"""
Encode policy rules about when to take and upload a new snapshot.
"""
def should_snapshot(self, snapshot_size: int, replica_sizes: list[int]) -> bool:
"""
Given the size of a new snapshot and the size of an existing replica
(snapshot and event streams), is now a good time to take a new
snapshot?
"""
SNAPSHOT_NAME = "snapshot"
@frozen
class Replica:
"""
Manage a specific replica.
"""
upload: Uploader
prune: Pruner
entry_lister: EntryLister
async def list(self) -> list[str]:
return list(await self.entry_lister())
class ReplicationJob(Enum):
"""
The kinds of jobs that the Replication queue knows about
:ivar startup: The job that is run once when the replication service
starts and which is responsible for inspecting local and remote state
to determine if any actions are immediately necessary (even before any
further local changes are made).
:ivar event_stream: The job to upload a new event stream object.
:ivar snapshot: The job to upload a new snapshot object and prune
now-obsolete event stream objects.
:ivar consider_snapshot: The job to inspect replica event stream and
snapshot state and potentially schedule a new snapshot which will
allow pruning of existing event streams.
"""
startup = 1
event_stream = 2
snapshot = 3
consider_snapshot = 4
@frozen
class Change:
"""
Represent an item in a replication event stream
:ivar sequence: The sequence number of this event.
:ivar statement: The SQL statement associated with this event.
:ivar important: Whether this change was "important" or not.
:ivar arguments: Any arguments for the SQL statement.
"""
sequence: int
statement: str
arguments: Sequence[SQLType] = field(converter=tuple)
important: bool
@arguments.validator
def _validate_arguments(self, attribute, value) -> None:
"""
Require that the value has as elements only values are legal SQL values.
:note: attrs validators run after attrs converters.
"""
if all(isinstance(o, SQLRuntimeType) for o in value):
return None
raise ValueError("sequence contains values incompatible with SQL")
@frozen
class EventStream:
"""
A series of database operations represented as `Change` instances.
:ivar version: An identifier for the schema of the serialized form of this
event stream. This will appear inside the serialized form. A change
to the schema will be accompanied with an increment to this value.
"""
changes: Sequence[Change] = field(converter=tuple)
version: ClassVar[int] = 1
def highest_sequence(self) -> Optional[int]:
"""
:returns: the highest sequence number in this EventStream (or
None if there are no events)
"""
if not self.changes:
return None
return max(change.sequence for change in self.changes)
def to_bytes(self) -> IO[bytes]:
"""
:returns: a producer of bytes representing this EventStream.
"""
return BytesIO(
cbor2.dumps(
{
"version": self.version,
"events": tuple(
(
event.sequence,
event.statement,
event.arguments,
event.important,
)
for event in self.changes
),
}
)
)
@classmethod
def from_bytes(cls, stream: IO[bytes]) -> EventStream:
"""
:returns EventStream: an instance of EventStream from the given
bytes (which should have been produced by a prior call to
``to_bytes``)
"""
data = cbor2.load(stream)
serial_version = data.get("version", None)
if serial_version != cls.version:
raise ValueError(
f"Unknown serialized event stream version {serial_version}"
)
return cls(
changes=[
# List comprehension has incompatible type List[Change]; expected List[_T_co]
# https://github.com/python-attrs/attrs/issues/519
Change(*args) # type: ignore
for args in data["events"]
]
)
class AlreadySettingUp(Exception):
"""
Another setup attempt is currently in progress.
"""
class ReplicationAlreadySetup(Exception):
"""
An attempt was made to setup of replication but it is already set up.
"""
async def fail_setup_replication():
"""
A replication setup function that always fails.
"""
raise Exception("Test not set up for replication")
async def setup_tahoe_lafs_replication(client: ITahoeClient) -> str:
"""
Configure the ZKAPAuthorizer plugin that lives in the Tahoe-LAFS node with
the given configuration to replicate its state onto Tahoe-LAFS storage
servers using that Tahoe-LAFS node.
"""
# Find the configuration path for this node's replica.
config_path = client.get_private_path(REPLICA_RWCAP_BASENAME)
# Take an advisory lock on the configuration path to avoid concurrency
# shennanigans.
config_lock = FilesystemLock(config_path.asTextMode().path + ".lock")
if not config_lock.lock():
raise AlreadySettingUp()
try:
# Check to see if there is already configuration.
if config_path.exists():
raise ReplicationAlreadySetup()
# Create a directory with it
rw_cap = await client.make_directory()
# Store the resulting write-cap in the node's private directory
config_path.setContent(rw_cap.encode("ascii"))
finally:
# On success and failure, release the lock since we're done with the
# file for now.
config_lock.unlock()
# Attenuate it to a read-cap
rocap = attenuate_writecap(rw_cap)
# Return the read-cap
return rocap
def is_replication_setup(config: Config) -> bool:
"""
:return: ``True`` if and only if replication has previously been setup for
the Tahoe-LAFS node associated with the given configuration.
"""
# Find the configuration path for this node's replica.
return FilePath(config.get_private_path(REPLICA_RWCAP_BASENAME)).exists()
def get_replica_rwcap(config: Config) -> CapStr:
"""
:return: a mutable directory capability for our replica.
:raises: Exception if replication is not setup
"""
rwcap_file = FilePath(config.get_private_path(REPLICA_RWCAP_BASENAME))
return rwcap_file.getContent().decode("ascii")
@define
class _Important:
"""
A context-manager to set and unset the ._important flag on a
_ReplicationCapableConnection
"""
_replication_cursor: _ReplicationCapableCursor
def __enter__(self) -> None:
self._replication_cursor._important = True
def __exit__(self, *args) -> None:
self._replication_cursor._important = False
return None
def with_replication(
connection: _SQLite3Connection, enable_replication: bool
) -> _ReplicationCapableConnection:
"""
Wrap the given connection in a layer which is capable of entering a
"replication mode". In replication mode, the wrapper stores all changes
made through the connection so that they are available to be replicated by
another component. In normal mode, changes are not stored.
:param connection: The SQLite3 connection to wrap.
:param enable_replication: If ``True`` then the wrapper is placed in
"replication mode" initially. Otherwise it | |
<filename>src/FinFET.py<gh_stars>1-10
#BSD 3-Clause License
#
#Copyright (c) 2019, The Regents of the University of Minnesota
#
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#* Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
#* Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#* Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
#FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
#DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author = <NAME>
"""
import math
import json
import argparse
import numpy as np
from thermalModel import thermalModel
#direction convention
#length is along x Dimesion 0
#width is along y Dimension 1
#height is along z Dimension 2
class FinFET:
def __init__(self, TECH, MOS, n_gate, n_fin, f_model_param, f_tool_config):
self.TECH = TECH
self.MOS = MOS
self.n_gate = n_gate
self.n_fin = n_fin
self.initialize(f_tool_config, f_model_param)
self.create_model()
def quant(self, a, reso):
return math.ceil(a / reso) * reso
def load_json(self, json_file):
with open(json_file) as f:
json_data = json.load(f)
return json_data
def initialize(self, file_tool_config, file_model_param):
tool_config = self.load_json(file_tool_config)
model_param = self.load_json(file_model_param)
# properties
l_chnl = model_param["dimensions"]["l_chnl"]
t_gate = model_param["dimensions"]["t_gate"]
#design dimensions
t_substrate = model_param["dimensions"][
"t_substrate"] #thickness of substrate that has been modelled for after the point at which it is
t_box = model_param["dimensions"][
"t_box"] #25 # thickness of box layer
t_chnl = model_param["dimensions"][
"t_chnl"] #6 # thickness of channel, source and drain diffusions
t_gox = model_param["dimensions"][
"t_gox"] #1 # thickness of gate oxide
t_diff_ext = model_param["dimensions"][
"t_diff_ext"] #20 # height of the diffusion extension above the diffusion
t_cont = model_param["dimensions"][
"t_cont"] #10 thickness of contact bar
# across all fins
e_gate = model_param["dimensions"][
"e_gate"] #10 # extension of gate out of diffusion
l_gate_space = model_param["dimensions"][
"l_gate_space"] #35 # lenght of source and drain diffusion
l_diff_ext = model_param["dimensions"][
"l_diff_ext"] #25 # length of the source and drain diffusion extension
l_cont = model_param["dimensions"]["l_cont"] #10 #length of contact
w_cont = model_param["dimensions"]["w_cont"] #10 #width of contact
self.res = tool_config["resolution"]
l_sp_diff_ext = tool_config[
"l_sp_diff_ext"] #5 # spacing between gate and either edge
sp_edge = tool_config["sp_edge"] #5 # spacing to the edges
t_sp_edge = tool_config["t_sp_edge"] #20 # spacing to the edges
l_g2sd_junc = model_param["dimensions"]["l_g2sd_junc"]
l_sd_junc = model_param["dimensions"]["l_sd_junc"]
w_fin = model_param["dimensions"]["w_fin"]
w_fin_space = model_param["dimensions"]["w_fin_space"]
self.t_sub2gnd = tool_config[
"t_sub2gnd"] #475 # thickness of substrate to ground not represented in node
self.t_cnt2gnd = tool_config[
"t_cnt2gnd"] #1000 # distance from contact to ground
self.resx = self.res[0]
self.resy = self.res[1]
self.resz = self.res[2]
resx = self.res[0]
resy = self.res[1]
resz = self.res[2]
self.w_fin = self.quant(w_fin, resy)
self.w_fin_space = self.quant(w_fin_space, resy)
self.l_chnl = self.quant(l_chnl, resx)
self.t_gate = self.quant(t_gate, resz)
self.t_box = self.quant(t_box, resz)
self.t_chnl = self.quant(t_chnl, resz)
self.t_gox = self.quant(t_gox, resz)
self.l_gox = self.quant(t_gox, resx)
self.w_gox = self.quant(t_gox, resy)
self.t_cont = self.quant(t_cont, resz)
self.l_sp_edge = self.quant(sp_edge, resx)
self.w_sp_edge = self.quant(sp_edge, resy)
self.t_sp_edge = self.quant(t_sp_edge, resz)
self.e_gate = self.quant(e_gate, resy)
self.l_gate_space = self.quant(l_gate_space, resx)
self.t_substrate = self.quant(t_substrate, resz)
self.l_diff_ext = self.quant(l_diff_ext, resx)
self.t_diff_ext = self.quant(t_diff_ext, resz)
self.l_sp_diff_ext = self.quant(l_sp_diff_ext, resx)
self.l_cont = self.quant(l_cont, resx)
self.w_cont = self.quant(w_cont, resy)
self.l_g2sd_junc = self.quant(l_g2sd_junc, resx)
self.l_sd_junc = self.quant(l_sd_junc, resx)
# t_sub2gnd and t_cnt2gnd do not need quatization as there are not used in mask
assert self.TECH == 'SOI' or self.TECH == 'Bulk', "Undefined TECH type"
self.length = 2*self.l_sp_edge + 2*(self.l_sd_junc + self.l_g2sd_junc) +\
(self.n_gate - 1)*(self.l_chnl+self.l_gate_space) + self.l_chnl
self.width = 2*(self.w_sp_edge+ self.e_gate) +\
(self.n_fin -1)*(2*self.w_gox + self.w_fin + self.w_fin_space) +\
2*self.w_gox + self.w_fin
self.height = self.t_substrate + self.t_box + self.t_chnl + self.t_cont +\
self.t_sp_edge + max(self.t_gox + self.t_gate, self.t_diff_ext)
#print("%d %d %d %d %d"%(self.t_substrate, self.t_box, self.t_chnl,\
# self.t_sp_edge, max(self.t_gox + self.t_gate, self.t_diff_ext)))
print("INFO: Model Dimensions LWH: %4.3f %4.3f %4.3f" %
(self.length, self.width, self.height))
print("INFO: Resolution : %4.3e %4.3e %4.3e" %
(self.resx, self.resy, self.resz))
self.device = thermalModel(length=self.length,
width=self.width,
height=self.height,
resolution=self.res,
n_fin=self.n_fin)
self.device.set_device_parameters(channel_length=self.l_chnl,
gate_thickness=self.t_gate,
substrate2ground=self.t_sub2gnd,
contact2ground=self.t_cnt2gnd,
gate_oxide_thickness=self.t_gox)
self.device.set_conductivity_table(file_model_param)
print("INFO: Initialization complete")
def create_substrate(self):
self.device.create_substrate(thickness=self.t_substrate)
or_z = self.t_substrate
if self.TECH == 'SOI':
#t_box
origin = (0, 0, or_z)
size = (self.length, self.width, self.t_box)
self.device.create_t_box(origin, size)
elif self.TECH == 'Bulk':
origin = (0, 0, or_z)
sz_y = self.w_sp_edge + self.e_gate + self.w_gox
size = (self.length, sz_y, self.t_box)
self.device.create_t_box(origin, size)
for f in range(self.n_fin):
#create the fin
or_y = self.w_sp_edge + self.e_gate + self.w_gox +\
f*(2*self.w_gox + self.w_fin + self.w_fin_space)
sz_y = self.w_fin
origin = (0, or_y, or_z)
size = (self.length, sz_y, self.t_box)
self.device.create_diffusion(origin, size, self.MOS, finFET=1)
#create the box
or_y = or_y + sz_y
origin = (0, or_y, or_z)
if f == self.n_fin - 1:
sz_y = self.width - or_y
else:
sz_y = 2 * self.w_gox + self.w_fin_space
size = (self.length, sz_y, self.t_box)
self.device.create_t_box(origin, size)
or_z = or_z + self.t_box
return or_z
def create_fins(self, or_x_in, or_z_in):
#creates the fin with the surrounding gate and contact
# or_x , or_z inputs
#create source diffsion, channel and drain diffusions for the fin
or_x = or_x_in
or_z = or_z_in
sz_z = self.t_chnl
#source diffusion of fin
for f in range(self.n_fin):
sz_x = self.l_g2sd_junc
or_y = self.w_sp_edge + self.e_gate + self.w_gox +\
f*(2*self.w_gox + self.w_fin + self.w_fin_space)
sz_y = self.w_fin
origin = (or_x, or_y, or_z)
size = (sz_x, sz_y, sz_z)
self.device.create_diffusion(origin, size, self.MOS, finFET=1)
for n in range(self.n_gate):
sz_x = self.l_chnl
or_x = or_x_in + self.l_g2sd_junc + n * (self.l_chnl +
self.l_gate_space)
or_x_gate = or_x
or_y = self.w_sp_edge
#surround gate
origin = (or_x, or_y, or_z)
sz_y = self.e_gate
size = (sz_x, sz_y, sz_z + self.t_gox)
cond = self.device.cond['gate']
self.device.create_box(origin, size, cond)
for f in range(self.n_fin):
# surround gate oxide
or_y = or_y + sz_y
origin = (or_x, or_y, or_z)
sz_y = self.w_gox
size = (sz_x, sz_y, sz_z)
cond = self.device.cond['SiO2']
self.device.create_box(origin, size, cond)
#channel
or_x = or_x_gate
or_y = or_y + sz_y
sz_y = self.w_fin
origin = (or_x, or_y, or_z)
self.device.create_channel(origin=origin,
channel_width=sz_y,
channel_depth=sz_z,
d_type=self.MOS)
# drain diffusion
or_x = or_x_gate + self.l_chnl
if n == self.n_gate - 1:
sz_x = self.l_g2sd_junc
else:
sz_x = self.l_gate_space
origin = (or_x, or_y, or_z)
size = (sz_x, sz_y, sz_z)
self.device.create_diffusion(origin, size, self.MOS, finFET=1)
# surround gate oxide
or_x = or_x_gate
or_y = or_y + sz_y
origin = (or_x, or_y, or_z)
sz_x = self.l_chnl
sz_y = self.w_gox
size = (sz_x, sz_y, sz_z)
cond = self.device.cond['SiO2']
self.device.create_box(origin, size, cond)
#surround gate
or_x = or_x_gate
or_y = or_y + sz_y
origin = (or_x, or_y, or_z)
sz_x = self.l_chnl
if f == self.n_fin - 1:
sz_y = self.e_gate
else:
sz_y = self.w_fin_space
size = (sz_x, sz_y, sz_z + self.t_gox)
cond = self.device.cond['gate']
self.device.create_box(origin, size, cond)
end_x = or_x_gate + self.l_chnl + self.l_g2sd_junc
end_z = or_z + sz_z
return end_x, end_z
def create_SD_junction(self, or_x, or_z):
#or_x input
sz_z = self.t_chnl
or_y = self.w_sp_edge + self.e_gate
sz_y = (self.n_fin -1)*(2*self.w_gox + self.w_fin + self.w_fin_space)+\
2*self.w_gox + self.w_fin
sz_x = self.l_sd_junc
origin = (or_x, or_y, or_z)
size = (sz_x, sz_y, sz_z)
self.device.create_diffusion(origin, size, self.MOS)
return or_x + sz_x, or_z + sz_z
def create_gate_oxide(self, or_x, or_z):
#gate oxide
or_x_in = or_x
or_y = self.w_sp_edge + self.e_gate
sz_y = self.w_fin + 2 * self.w_gox
for n in range(self.n_gate):
or_x = or_x_in +self. l_sd_junc + self.l_g2sd_junc +\
n*(self.l_chnl+self.l_gate_space)
origin = (or_x, or_y, or_z)
self.device.create_gate_oxide(origin=origin, channel_width=sz_y)
end_x = or_x + self.l_chnl
end_z = or_z + self.t_gox
return | |
"""
script to ease construction of CSDGM2-style metadata for an GeMS-style geodatabase.
To use,
Run ValidateDatabase to make sure that the database is complete and there are
no missing DMU, Glossary, or DataSources entries
In ArcCatalog, go to Customize>Options>Metadata and set Metadata Style to
"FGDC CSDGM Metadata". OK and exit.
In ArcCatalog, use the ArcGIS metadata editor to complete the record for the
GeologicMap feature dataset. Save. NOTE THAT whatever errors or you create
in this master metadata record will be faithfully propagated to metadata
records for all parts of the geodatabase!
Run script GeMS_MetadataCSDGM2_Arc10.1.py. This script will:
Export the GeologicMap metadata record in CSDGM2 format
Polish this metadata slightly for use as a master record
For the geodatabase as a whole and for each entity (table, feature dataset,
feature class) in the geodatabase:
Copies the master record.
Adds supplemental information (ArcGIS reports this in Resouce:Details)
about the the GeMS standard and continents of the geodatabase.
Adds a description of the entity taken from the GeMS documentation.
Adds entity-attribute information taken from the GeMS documentation
and the DMU, Glossary, and DataSources tables of the geodatabase.
Writes this XML to a file in the directory that contains the geodatabase.
Imports this XML into the geodatabase as metadata for the appropriate entity.
Look at file <geodatabasename>-metadataLog.txt to see what parts of which metadata
records need to be completed by hand. This will occur wherever you extend the
database schema beyond the schema outlined in the GeMS documentation.
***Note that this script provides for a file that automates description of your
extensions to the GeMS schema so that you need not edit metadata by hand--see
file my_GeMSDefinitions.py in the GeMS Scripts directory.***
Inspect metadata records in ArcCatalog (the Description tab) to see that they are
complete.
Open saved XML files in browser to see that they are appropriate. Scan for duplicate
entries.
You want ISO metadata? Change your Metadata Style and fix records using the
ArcCatalog metadata editor. Export as ISO of your flavor, insofar as ArcCatalog allows.
Let us know how this works.
Usage: prompt>GeMS_MetadataCSDGM2_Arc10.1.py <geodatabase>
<NAME> and <NAME>, US Geological Survey
<EMAIL>, <EMAIL>
"""
# 17 March 2017 Changed NCGMP09 to GeMS, etc.
# 18 April 2017 Added utility functions, local definition-extension file
# 12 August 2017 Modified to recognize GeoMaterial, GeoMaterialConfidence, and GeoMaterialDict.
# Added number of rows in each table to gdb description in SupplementalInfo
#Metadata conversion (ImportMetadata_conversion) is not supported in Pro as of 180926, but is on the roadmap.
import arcpy, sys, os.path, copy, imp, glob
from GeMS_Definition import enumeratedValueDomainFieldList, rangeDomainDict, unrepresentableDomainDict, attribDict, entityDict, GeoMatConfDict
from GeMS_utilityFunctions import *
from xml.dom.minidom import *
versionString = 'GeMS_MetadataCSDGM2_Arc10.py, version of 10 December 2017'
translator = arcpy.GetInstallInfo("desktop")["InstallDir"]+'Metadata/Translator/ARCGIS2FGDC.xml'
debug = False
ncgmp = 'GeMS'
ncgmpFullRef = '"GeMS (Geologic Map Schema)--a standard format for digital publication of geologic maps, version 2.0", available at http://ngmdb.usgs.gov/Info/standards/GeMS/'
eaoverviewCitation = 'Detailed descriptions of entities, attributes, and attribute values are given in metadata for constituent elements of this composite dataset. See also '+ncgmpFullRef+'.'
gdbDesc0a = ' is a composite geodataset that conforms to '+ncgmpFullRef+'. '
gdbDesc0b = ' is part of a composite geodataset that conforms to '+ncgmpFullRef+'. '
gdbDesc2 = 'Metadata records associated with each element within the geodataset contain more detailed descriptions of their purposes, constituent entities, and attributes. '
gdbDesc3 = ('Two shapefile versions of the dataset are also available. The OPEN shapefile version consists '+
'of shapefiles, DBF files, and delimited text files and retains all information in the native '+
'geodatabase, but some programming will likely be necessary to assemble these components into '+
'usable formats. The SIMPLE shapefile version consists only of shapefiles and is easily used, but '+
'lacks some information present in the native geodatabase.')
def __appendOrReplace(rootNode,newNode,nodeTag):
if len(rootNode.getElementsByTagName(nodeTag)) == 0:
rootNode.appendChild(newNode)
else:
rootNode.replaceChild(newNode,rootNode.getElementsByTagName(nodeTag)[0])
def __fieldNameList(fc):
#Returns a list of field names from Field.name in arcpy.ListFields
fldList = arcpy.ListFields(fc)
nameList = []
for fld in fldList:
if not fld.name in ('OBJECTID', 'SHAPE','Shape', 'Shape_Length', 'Shape_Area'):
nameList.append(fld.name)
return nameList
def __findInlineRef(sourceID):
# finds the Inline reference for each DataSource_ID
query = '"DataSources_ID" = \'' + sourceID + '\''
rows = arcpy.SearchCursor(dataSources, query)
row = next(rows)
if not row is None:
#return row.Inline
return row.Source
else:
return ""
def __newElement(dom,tag,text):
nd = dom.createElement(tag)
ndText = dom.createTextNode(text)
nd.appendChild(ndText)
return nd
def __updateAttrDef(fld,dom):
##element tag names are
## attr = Attribute
## attrlabl = Attribute_Label
## attrdef = Attribute_Definition
## attrdefs = Attribute_Definition_Source
labelNodes = dom.getElementsByTagName('attrlabl')
for attrlabl in labelNodes:
if attrlabl.firstChild.data == fld:
attr = attrlabl.parentNode
if fld.find('_ID') > -1:
# substitute generic _ID field for specific
attrdefText = attribDict['_ID']
else:
attrdefText = attribDict[fld]
attrdef = __newElement(dom,'attrdef',attrdefText)
__appendOrReplace(attr,attrdef,'attrdef')
attrdefs = __newElement(dom,'attrdefs',ncgmp)
__appendOrReplace(attr,attrdefs,'attrdefs')
return dom
def __updateEdom(fld, defs, dom):
##element tag names are
## attr = Attribute
## attrdomv = Attribute_Domain_Values
## edom = Enumerated_Domain
## edomv = Enumerated_Domain_Value
## edomd = Enumerated_Domain_Definition
## edomvds = Enumerated_Domain_Value_Definition_Source
labelNodes = dom.getElementsByTagName('attrlabl')
for attrlabl in labelNodes:
if attrlabl.firstChild.data == fld:
attr = attrlabl.parentNode
attrdomv = dom.createElement('attrdomv')
for k in defs.items():
edom = dom.createElement('edom')
edomv = __newElement(dom,'edomv',k[0])
edomvd = __newElement(dom,'edomvd',k[1][0])
edom.appendChild(edomv)
edom.appendChild(edomvd)
if len(k[1][1]) > 0:
edomvds = __newElement(dom,'edomvds',k[1][1])
edom.appendChild(edomvds)
attrdomv.appendChild(edom)
__appendOrReplace(attr,attrdomv,'attrdomv')
return dom
def __updateEntityAttributes(fc, fldList, dom, logFile):
"""For each attribute (field) in fldList,
adds attribute definition and definition source,
classifies as range domain, unrepresentable-value domain or enumerated-value domain, and
for range domains, adds rangemin, rangemax, and units;
for unrepresentable value domains, adds unrepresentable value statement;
for enumerated value domains:
1) Finds all controlled-vocabulary fields in the table sent to it
2) Builds a set of unique terms in each field, ie, the domain
3) Matches each domain value to an entry in the glossary
4) Builds a dictionary of term:(definition, source) items
5) Takes the dictionary items and put them into the metadata
document as Attribute_Domain_Values
Field MapUnit in table DescriptionOfMapUnits is treated as a special case.
"""
cantfindTerm = []
cantfindValue = []
for fld in fldList:
addMsgAndPrint( ' Field: '+ fld)
# if is _ID field or if field definition is available, update definition
if fld.find('_ID') > -1 or fld in attribDict:
dom = __updateAttrDef(fld,dom)
else:
cantfindTerm.append(fld)
#if this is an _ID field
if fld.find('_ID') > -1:
dom = __updateUdom(fld,dom,unrepresentableDomainDict['_ID'])
#if this is another unrepresentable-domain field
if fld in unrepresentableDomainDict:
dom = __updateUdom(fld,dom,unrepresentableDomainDict[fld])
#if this is a defined range-domain field
elif fld in rangeDomainDict:
dom = __updateRdom(fld,dom)
#if this is MapUnit in DMU
elif fld == 'MapUnit' and fc == 'DescriptionOfMapUnits':
dom = __updateUdom(fld,dom,unrepresentableDomainDict['default'])
#if this is a defined Enumerated Value Domain field
elif fld in enumeratedValueDomainFieldList:
valList = []
#create a search cursor on the field
rows = arcpy.SearchCursor(fc,'','', fld)
row = next(rows)
#collect all values/terms in that field
while row:
if not row.getValue(fld) is None:
valList.append(row.getValue(fld))
row = next(rows)
#uniquify the list by converting it to a set object
valList = set(valList)
#create an empty dictionary object to hold the matches between the unique terms
#and their definitions (grabbed from the glossary)
defs = {}
#for each unique term, try to create a search cursor of just one record where the term
#matchs a Term field value from the glossary
if fld == 'MapUnit' and fc != 'DescriptionOfMapUnits':
for t in valList:
query = '"MapUnit" = \'' + t + '\''
rows = arcpy.SearchCursor(DMU, query)
row = next(rows)
#if the searchcursor contains a row
if row:
#create an entry in the dictionary of term:[definition, source] key:value pairs
#this is how we will enumerate through the enumerated_domain section
defs[t] = []
if row.FullName != None:
defs[t].append(row.FullName.encode('utf_8'))
defs[t].append('this report, table DescriptionOfMapUnits')
else:
addMsgAndPrint('MapUnit = '+t+', FullName not defined')
defs[t].append(row.Name.encode('utf_8'))
defs[t].append('this report, table DescriptionOfMapUnits')
else:
if not t in ('',' '): cantfindValue.append([fld,t])
elif fld == 'GeoMaterialConfidence' and fc == 'DescriptionOfMapUnits':
if debug:
addMsgAndPrint('DMU / GeoMaterialsConfidence')
defs = GeoMatConfDict
elif fld == 'GeoMaterial' and fc == 'DescriptionOfMapUnits':
if debug:
addMsgAndPrint('DMU / GeoMaterials!')
for t in valList:
query = '"GeoMaterial" = \'' + t + '\''
if debug:
addMsgAndPrint('query='+query)
rows | |
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
.. attribute:: metric_type (key)
Metric type
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: source2 (key)
Source of path 2
**type**\: union of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
.. attribute:: destination2 (key)
Destination of path 2
**type**\: union of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
.. attribute:: disjoint_level (key)
Disjointness level
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: disjoint_strict (key)
Strict disjointness required
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: shortest_path (key)
Whether path 1 or 2 should be shortest
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: headends_swapped
Headends swapped
**type**\: :py:class:`PceHeadendSwap <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceHeadendSwap>`
.. attribute:: cspf_result
CSPF Result
**type**\: :py:class:`PceCspfRc <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceCspfRc>`
.. attribute:: output_path
Output PCE paths
**type**\: list of :py:class:`OutputPath <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.Pce.Cspf.CspfPaths.CspfPath.OutputPath>`
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(Pce.Cspf.CspfPaths.CspfPath, self).__init__()
self.yang_name = "cspf-path"
self.yang_parent_name = "cspf-paths"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['af','source1','destination1','metric_type','source2','destination2','disjoint_level','disjoint_strict','shortest_path']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("output-path", ("output_path", Pce.Cspf.CspfPaths.CspfPath.OutputPath))])
self._leafs = OrderedDict([
('af', YLeaf(YType.int32, 'af')),
('source1', YLeaf(YType.str, 'source1')),
('destination1', YLeaf(YType.str, 'destination1')),
('metric_type', YLeaf(YType.int32, 'metric-type')),
('source2', YLeaf(YType.str, 'source2')),
('destination2', YLeaf(YType.str, 'destination2')),
('disjoint_level', YLeaf(YType.int32, 'disjoint-level')),
('disjoint_strict', YLeaf(YType.int32, 'disjoint-strict')),
('shortest_path', YLeaf(YType.int32, 'shortest-path')),
('headends_swapped', YLeaf(YType.enumeration, 'headends-swapped')),
('cspf_result', YLeaf(YType.enumeration, 'cspf-result')),
])
self.af = None
self.source1 = None
self.destination1 = None
self.metric_type = None
self.source2 = None
self.destination2 = None
self.disjoint_level = None
self.disjoint_strict = None
self.shortest_path = None
self.headends_swapped = None
self.cspf_result = None
self.output_path = YList(self)
self._segment_path = lambda: "cspf-path" + "[af='" + str(self.af) + "']" + "[source1='" + str(self.source1) + "']" + "[destination1='" + str(self.destination1) + "']" + "[metric-type='" + str(self.metric_type) + "']" + "[source2='" + str(self.source2) + "']" + "[destination2='" + str(self.destination2) + "']" + "[disjoint-level='" + str(self.disjoint_level) + "']" + "[disjoint-strict='" + str(self.disjoint_strict) + "']" + "[shortest-path='" + str(self.shortest_path) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-infra-xtc-oper:pce/cspf/cspf-paths/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(Pce.Cspf.CspfPaths.CspfPath, ['af', 'source1', 'destination1', 'metric_type', 'source2', 'destination2', 'disjoint_level', 'disjoint_strict', 'shortest_path', 'headends_swapped', 'cspf_result'], name, value)
class OutputPath(Entity):
"""
Output PCE paths
.. attribute:: source
Source of path
**type**\: :py:class:`Source <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.Pce.Cspf.CspfPaths.CspfPath.OutputPath.Source>`
.. attribute:: destination
Destination of path
**type**\: :py:class:`Destination <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.Pce.Cspf.CspfPaths.CspfPath.OutputPath.Destination>`
.. attribute:: cost
Cost
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: hops
Hop addresses
**type**\: list of :py:class:`Hops <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.Pce.Cspf.CspfPaths.CspfPath.OutputPath.Hops>`
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(Pce.Cspf.CspfPaths.CspfPath.OutputPath, self).__init__()
self.yang_name = "output-path"
self.yang_parent_name = "cspf-path"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("source", ("source", Pce.Cspf.CspfPaths.CspfPath.OutputPath.Source)), ("destination", ("destination", Pce.Cspf.CspfPaths.CspfPath.OutputPath.Destination))])
self._child_list_classes = OrderedDict([("hops", ("hops", Pce.Cspf.CspfPaths.CspfPath.OutputPath.Hops))])
self._leafs = OrderedDict([
('cost', YLeaf(YType.uint64, 'cost')),
])
self.cost = None
self.source = Pce.Cspf.CspfPaths.CspfPath.OutputPath.Source()
self.source.parent = self
self._children_name_map["source"] = "source"
self._children_yang_names.add("source")
self.destination = Pce.Cspf.CspfPaths.CspfPath.OutputPath.Destination()
self.destination.parent = self
self._children_name_map["destination"] = "destination"
self._children_yang_names.add("destination")
self.hops = YList(self)
self._segment_path = lambda: "output-path"
def __setattr__(self, name, value):
self._perform_setattr(Pce.Cspf.CspfPaths.CspfPath.OutputPath, ['cost'], name, value)
class Source(Entity):
"""
Source of path
.. attribute:: af_name
AFName
**type**\: :py:class:`PceAfId <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceAfId>`
.. attribute:: ipv4
IPv4 address type
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: ipv6
IPv6 address type
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(Pce.Cspf.CspfPaths.CspfPath.OutputPath.Source, self).__init__()
self.yang_name = "source"
self.yang_parent_name = "output-path"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('af_name', YLeaf(YType.enumeration, 'af-name')),
('ipv4', YLeaf(YType.str, 'ipv4')),
('ipv6', YLeaf(YType.str, 'ipv6')),
])
self.af_name = None
self.ipv4 = None
self.ipv6 = None
self._segment_path = lambda: "source"
def __setattr__(self, name, value):
self._perform_setattr(Pce.Cspf.CspfPaths.CspfPath.OutputPath.Source, ['af_name', 'ipv4', 'ipv6'], name, value)
class Destination(Entity):
"""
Destination of path
.. attribute:: af_name
AFName
**type**\: :py:class:`PceAfId <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceAfId>`
.. attribute:: ipv4
IPv4 address type
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: ipv6
IPv6 address type
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(Pce.Cspf.CspfPaths.CspfPath.OutputPath.Destination, self).__init__()
self.yang_name = "destination"
self.yang_parent_name = "output-path"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('af_name', YLeaf(YType.enumeration, 'af-name')),
('ipv4', YLeaf(YType.str, 'ipv4')),
('ipv6', YLeaf(YType.str, 'ipv6')),
])
self.af_name = None
self.ipv4 = None
self.ipv6 = None
self._segment_path = lambda: "destination"
def __setattr__(self, name, value):
self._perform_setattr(Pce.Cspf.CspfPaths.CspfPath.OutputPath.Destination, ['af_name', 'ipv4', 'ipv6'], name, value)
class Hops(Entity):
"""
Hop addresses
.. attribute:: address_family
Address Family
**type**\: int
**range:** 0..255
.. attribute:: ipv4_prefix
IPv4 prefix
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: ipv6_prefix
IPv6 prefix
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(Pce.Cspf.CspfPaths.CspfPath.OutputPath.Hops, self).__init__()
self.yang_name = "hops"
self.yang_parent_name = "output-path"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('address_family', YLeaf(YType.uint8, 'address-family')),
('ipv4_prefix', YLeaf(YType.str, 'ipv4-prefix')),
('ipv6_prefix', YLeaf(YType.str, 'ipv6-prefix')),
])
self.address_family = None
self.ipv4_prefix = None
self.ipv6_prefix = None
self._segment_path = lambda: "hops"
def __setattr__(self, name, value):
self._perform_setattr(Pce.Cspf.CspfPaths.CspfPath.OutputPath.Hops, ['address_family', 'ipv4_prefix', 'ipv6_prefix'], name, value)
class TopologySummary(Entity):
"""
Node summary database in XTC
.. attribute:: stats_topology_update
Statistics on topology update
**type**\: :py:class:`StatsTopologyUpdate <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.Pce.TopologySummary.StatsTopologyUpdate>`
.. attribute:: nodes
Number of PCE nodes
**type**\: int
**range:** 0..4294967295
.. attribute:: lookup_nodes
Number of lookup nodes
**type**\: int
**range:** 0..4294967295
.. attribute:: prefixes
Number of prefixes
**type**\: int
**range:** 0..4294967295
.. attribute:: prefix_sids
Number of total prefix SIDs
**type**\: int
**range:** 0..4294967295
.. attribute:: regular_prefix_sids
Number of reguar prefix SIDs
**type**\: int
**range:** 0..4294967295
.. attribute:: strict_prefix_sids
Number of strict prefix SIDs
**type**\: int
**range:** 0..4294967295
.. attribute:: links
Number of links
**type**\: int
**range:** 0..4294967295
.. attribute:: epe_links
Number of EPE links
**type**\: int
**range:** 0..4294967295
.. attribute:: adjacency_sids
Number of total adjacency SIDs
**type**\: int
**range:** 0..4294967295
.. attribute:: epesids
Number of total EPE SIDs
**type**\: int
**range:** 0..4294967295
.. attribute:: protected_adjacency_sids
Number of protected adjacency SIDs
**type**\: int
**range:** 0..4294967295
.. attribute:: un_protected_adjacency_sids
Number of unprotected adjacency SIDs
**type**\: int
**range:** 0..4294967295
.. attribute:: topology_consistent
True if topology is consistent
**type**\: bool
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(Pce.TopologySummary, self).__init__()
self.yang_name = "topology-summary"
self.yang_parent_name = "pce"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("stats-topology-update", ("stats_topology_update", Pce.TopologySummary.StatsTopologyUpdate))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('nodes', YLeaf(YType.uint32, 'nodes')),
('lookup_nodes', YLeaf(YType.uint32, 'lookup-nodes')),
('prefixes', YLeaf(YType.uint32, 'prefixes')),
('prefix_sids', YLeaf(YType.uint32, 'prefix-sids')),
('regular_prefix_sids', YLeaf(YType.uint32, 'regular-prefix-sids')),
('strict_prefix_sids', YLeaf(YType.uint32, 'strict-prefix-sids')),
('links', YLeaf(YType.uint32, 'links')),
('epe_links', YLeaf(YType.uint32, 'epe-links')),
('adjacency_sids', YLeaf(YType.uint32, 'adjacency-sids')),
('epesids', YLeaf(YType.uint32, 'epesids')),
('protected_adjacency_sids', YLeaf(YType.uint32, 'protected-adjacency-sids')),
('un_protected_adjacency_sids', YLeaf(YType.uint32, 'un-protected-adjacency-sids')),
('topology_consistent', YLeaf(YType.boolean, 'topology-consistent')),
])
self.nodes = None
self.lookup_nodes = None
self.prefixes = None
self.prefix_sids = None
self.regular_prefix_sids = None
self.strict_prefix_sids = None
self.links = None
self.epe_links = None
self.adjacency_sids = None
self.epesids = None
self.protected_adjacency_sids = None
self.un_protected_adjacency_sids = None
self.topology_consistent = None
self.stats_topology_update = Pce.TopologySummary.StatsTopologyUpdate()
self.stats_topology_update.parent = self
self._children_name_map["stats_topology_update"] = "stats-topology-update"
self._children_yang_names.add("stats-topology-update")
self._segment_path = lambda: "topology-summary"
self._absolute_path = lambda: "Cisco-IOS-XR-infra-xtc-oper:pce/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(Pce.TopologySummary, ['nodes', 'lookup_nodes', 'prefixes', 'prefix_sids', 'regular_prefix_sids', 'strict_prefix_sids', 'links', 'epe_links', 'adjacency_sids', 'epesids', 'protected_adjacency_sids', 'un_protected_adjacency_sids', 'topology_consistent'], name, value)
class StatsTopologyUpdate(Entity):
"""
Statistics on topology update
.. attribute:: num_nodes_added
Number of nodes added
**type**\: int
**range:** 0..4294967295
.. attribute:: num_nodes_deleted
Number of nodes deleted
**type**\: int
**range:** 0..4294967295
.. attribute:: num_links_added
Number of links added
**type**\: int
**range:** 0..4294967295
.. attribute:: num_links_deleted
Number of links deleted
**type**\: int
**range:** 0..4294967295
.. attribute:: num_prefixes_added
Number of prefixes added
**type**\: int
**range:** 0..4294967295
.. attribute:: num_prefixes_deleted
Number of prefixes deleted
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def | |
K.cast(K.less(inputs, 0), 'float32') * (K.exp(inputs - 1) * K.maximum(K.cast_to_floatx(0.0), K.minimum(K.cast_to_floatx(1.0), (inputs + 1.0)/2.0)))
def get_config(self):
base_config = super(HardElish, self).get_config()
return dict(list(base_config.items())
def compute_output_shape(self, input_shape):
return input_shape
class BentID(Layer):
'''
Bent's Identity Activation Function.
.. math::
bentId(x) = x + \\frac{\\sqrt{x^{2}+1}-1}{2}
Plot:
.. figure:: _static/bent_id.png
:align: center
Shape:
- Input: Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
- Output: Same shape as the input.
Examples:
>>> X_input = Input(input_shape)
>>> X = BentID()(X_input)
'''
def __init__(self, **kwargs):
super(BentID, self).__init__(**kwargs)
self.supports_masking = True
def call(self, inputs):
return inputs + ((K.sqrt(K.pow(inputs,2)+1)-1)/2)
def get_config(self):
base_config = super(BentID, self).get_config()
return dict(list(base_config.items())
def compute_output_shape(self, input_shape):
return input_shape
class WeightedTanh(Layer):
'''
Weighted TanH Activation Function.
.. math::
Weighted TanH(x, weight) = tanh(x * weight)
Plot:
.. figure:: _static/weighted_tanh.png
:align: center
Shape:
- Input: Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
- Output: Same shape as the input.
Arguments:
- weight: hyperparameter (default=1.0)
Examples:
>>> X_input = Input(input_shape)
>>> X = WeightedTanh(weight=1.0)(X_input)
'''
def __init__(self, weight=1.0, **kwargs):
super(WeightedTanh, self).__init__(**kwargs)
self.supports_masking = True
self.weight = K.cast_to_floatx(weight)
def call(self, inputs):
return K.tanh(inputs * self.weight)
def get_config(self):
config = {'weight': float(self.weight)}
base_config = super(WeightedTanh, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
class SineReLU(Layer):
'''
Sine ReLU Activation Function.
.. math::
SineReLU(x, \\epsilon) = \\left\\{\\begin{matrix} x , x > 0 \\\\ \\epsilon * (sin(x)-cos(x)), x \\leq 0 \\end{matrix}\\right.
Plot:
.. figure:: _static/sine_relu.png
:align: center
Shape:
- Input: Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
- Output: Same shape as the input.
References:
- See related Medium article:
https://medium.com/@wilder.rodrigues/sinerelu-an-alternative-to-the-relu-activation-function-e46a6199997d
Arguments:
- epsilon: hyperparameter (default=0.01)
Examples:
>>> X_input = Input(input_shape)
>>> X = SineReLU(epsilon=0.01)(X_input)
'''
def __init__(self, epsilon=0.01, **kwargs):
super(SineReLU, self).__init__(**kwargs)
self.supports_masking = True
self.epsilon = K.cast_to_floatx(epsilon)
def call(self, inputs):
return K.cast(K.greater_equal(inputs, 0), 'float32') * inputs + K.cast(K.less(inputs, 0), 'float32') * self.epsilon * (K.sin(inputs) - K.cos(inputs))
def get_config(self):
config = {'epsilon': float(self.epsilon)}
base_config = super(SineReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
class ISRLU(Layer):
'''
ISRLU Activation Function.
.. math::
ISRLU(x)=\\left\\{\\begin{matrix} x, x\\geq 0 \\\\ x * (\\frac{1}{\\sqrt{1 + \\alpha*x^2}}), x <0 \\end{matrix}\\right.
Plot:
.. figure:: _static/isrlu.png
:align: center
Shape:
- Input: Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
- Output: Same shape as the input.
Arguments:
- alpha: hyperparameter α controls the value to which an ISRLU saturates for negative inputs (default = 1)
References:
- ISRLU paper: https://arxiv.org/pdf/1710.09967.pdf
Examples:
>>> X_input = Input(input_shape)
>>> X = ISRLU(alpha=1.0)(X_input)
'''
def __init__(self, alpha=1.0, **kwargs):
super(ISRLU, self).__init__(**kwargs)
self.supports_masking = True
self.alpha = K.cast_to_floatx(alpha)
def call(self, inputs):
return K.cast(K.less(inputs, 0), 'float32') * ISRU(alpha=self.alpha)(inputs) + K.cast(K.greater_equal(inputs, 0), 'float32') * inputs
def get_config(self):
config = {'alpha': float(self.alpha)}
base_config = super(ISRLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
class SoftClipping(Layer):
'''
Soft Clipping Activation Function.
.. math::
SC(x) = 1 / \\alpha * log(\\frac{1 + e^{\\alpha * x}}{1 + e^{\\alpha * (x-1)}})
Plot:
.. figure:: _static/sc.png
:align: center
Shape:
- Input: Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
- Output: Same shape as the input.
Arguments:
- alpha: hyper-parameter, which determines how close to linear the central region is and how sharply the linear region turns to the asymptotic values
References:
- See SC paper:
https://arxiv.org/pdf/1810.11509.pdf
Examples:
>>> X_input = Input(input_shape)
>>> X = SoftClipping(alpha=0.5)(X_input)
'''
def __init__(self, alpha=0.5, **kwargs):
super(SoftClipping, self).__init__(**kwargs)
self.supports_masking = True
self.alpha = K.cast_to_floatx(alpha)
def call(self, inputs):
return (1 / self.alpha) * K.log((1 + K.exp(self.alpha * inputs))/(1 + K.exp(self.alpha *(inputs - 1))))
def get_config(self):
config = {'alpha': float(self.alpha)}
base_config = super(SoftClipping, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
class Aria2(Layer):
'''
Aria-2 Activation Function.
.. math::
Aria2(x, \\alpha, \\beta) = (1+e^{-\\beta*x})^{-\\alpha}
Plot:
.. figure:: _static/aria2.png
:align: center
Shape:
- Input: Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
- Output: Same shape as the input.
Arguments:
- alpha: hyper-parameter which has a two-fold effect; it reduces the curvature in 3rd quadrant as well as increases the curvature in first quadrant while lowering the value of activation (default = 1)
- beta: the exponential growth rate (default = 0.5)
References:
- See Aria paper:
https://arxiv.org/abs/1805.08878
Examples:
>>> X_input = Input(input_shape)
>>> X =Aria2(alpha=1.0, beta=0.5)(X_input)
'''
def __init__(self, alpha=1.0, beta=0.5, **kwargs):
super(Aria2, self).__init__(**kwargs)
self.supports_masking = True
self.alpha = K.cast_to_floatx(alpha)
self.beta = K.cast_to_floatx(beta)
def call(self, inputs):
return K.pow((1 + K.exp(-self.beta * inputs)), -self.alpha)
def get_config(self):
config = {'alpha': float(self.alpha), 'beta': float(self.beta)}
base_config = super(Aria2, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
class Celu(Layer):
'''
CELU Activation Function.
.. math::
CELU(x, \\alpha) = max(0,x) + min(0,\\alpha * (exp(x/ \\alpha)-1))
Shape:
- Input: Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
- Output: Same shape as the input.
Arguments:
- alpha: the α value for the CELU formulation (default=1.0)
References:
- See CELU paper:
https://arxiv.org/abs/1704.07483
Examples:
>>> X_input = Input(input_shape)
>>> X = Celu(alpha=1.0)(X_input)
'''
def __init__(self, alpha=1.0, **kwargs):
super(Celu, self).__init__(**kwargs)
self.supports_masking = True
self.alpha = K.cast_to_floatx(alpha)
def call(self, inputs):
return K.cast(K.greater_equal(inputs, 0), 'float32') * inputs + K.cast(K.less(inputs, 0), 'float32') * self.alpha * (K.exp (inputs / self.alpha) - 1)
def get_config(self):
config = {'alpha': float(self.alpha)}
base_config = super(Celu, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
class ReLU6(Layer):
'''
RELU6 Activation Function.
.. math::
RELU6(x) = min(max(0,x),6)
Shape:
- Input: Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
- Output: Same shape as the input.
References:
- See RELU6 paper:
http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf
Examples:
>>> X_input = Input(input_shape)
>>> X = ReLU6()(X_input)
'''
def __init__(self, **kwargs):
super(ReLU6, self).__init__(**kwargs)
self.supports_masking = True
def call(self, inputs):
return K.cast(K.greater_equal(inputs, 6), 'float32') * 6 + K.cast(K.less(inputs, 6), 'float32') * K.relu(inputs)
def get_config(self):
base_config = super(ReLU6, self).get_config()
return dict(list(base_config.items())
def compute_output_shape(self, input_shape):
return input_shape
class HardTanh(Layer):
'''
Hard-TanH Activation Function.
.. math::
Hard-TanH(x) = \\left\\{\\begin{matrix} 1, x > 1 \\\\ x , -1 \\leq x \\leq 1 \\\\ -1, x <- 1 \\end{matrix}\\right.
Shape:
- Input: Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
- Output: Same shape as the input.
Examples:
>>> X_input = Input(input_shape)
>>> X = HardTanh()(X_input)
'''
def __init__(self, **kwargs):
super(HardTanh, self).__init__(**kwargs)
self.supports_masking = True
def call(self, inputs):
return K.cast(K.greater(inputs , 1), 'float32')\
+ inputs * K.cast(K.less_equal(inputs, 1), 'float32') * K.cast(K.greater_equal(inputs, -1), 'float32') - K.cast(K.less(inputs, -1), 'float32')
def get_config(self):
base_config = super(HardTanh, self).get_config()
return dict(list(base_config.items())
def compute_output_shape(self, input_shape):
return input_shape
class LogSigmoid(Layer):
'''
Log-Sigmoid Activation Function.
.. math::
Log-Sigmoid(x) = log (\\frac{1}{1+e^{-x}})
Shape:
- Input: Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
- Output: Same shape as the input.
Examples:
>>> X_input = Input(input_shape)
>>> X = LogSigmoid()(X_input)
'''
def __init__(self, **kwargs):
super(LogSigmoid, self).__init__(**kwargs)
self.supports_masking = True
def call(self, inputs):
return K.log(K.sigmoid(inputs))
def get_config(self):
base_config = super(LogSigmoid, self).get_config()
return dict(list(base_config.items())
def compute_output_shape(self, input_shape):
return input_shape
class TanhShrink(Layer):
'''
TanH-Shrink Activation Function.
.. math::
TanH-Shrink(x) = x - tanh(x)
Shape:
- Input: Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, | |
expected result (raised from the pexpect module).
"""
reporter.step('Getting device information...')
self.__access_priv_exec_mode(child, eol, enable_password)
try:
# Get the name of the default drive. Depending on the device, it may be bootflash,
# flash, slot (for linear memory cards), or disk (for CompactFlash disks)
child.sendline('dir' + eol)
child.expect_exact('dir')
index = child.expect_exact(['More', self.device_prompts[1], ])
dir_list = str(child.before)
if index == 0:
# No need to get the whole directory listing, so break out
child.sendcontrol('c')
child.expect_exact(self.device_prompts[1])
default_file_system = dir_list.split(
'Directory of ')[1].split(':')[0].strip()
if not default_file_system.startswith(('bootflash', 'flash', 'slot', 'disk',)):
raise RuntimeError('Cannot get the device\'s working drive.')
# If the drive is not formatted, a warning will appear, followed by another prompt.
# Wait for it to pass, and get to the correct prompt
index = child.expect_exact(
['before an image can be booted from this device', pexpect.TIMEOUT, ], timeout=5)
if index == 0:
child.expect_exact(self.device_prompts[1])
except (RuntimeError, IndexError) as ex:
# RuntimeError = explicit, while IndexError = implicit if split index is out of range
reporter.warn(ex.message)
default_file_system = None
try:
# Get the IOS version
child.sendline('show version | include [IOSios] [Ss]oftware' + eol)
# child.expect_exact('show version | include')
child.expect_exact(self.device_prompts[1])
software_ver = str(child.before).split(
'show version | include [IOSios] [Ss]oftware\r')[1].split('\r')[0].strip()
if not re.compile(r'[IOSios] [Ss]oftware').search(software_ver):
raise RuntimeError('Cannot get the device\'s software version.')
except (RuntimeError, IndexError) as ex:
reporter.warn(ex.message)
software_ver = None
try:
# Get the name of the device
child.sendline('show inventory | include DESCR:' + eol)
# child.expect_exact('show inventory | include DESCR:')
child.expect_exact(self.device_prompts[1])
device_name = str(child.before).split(
'show inventory | include DESCR:\r')[1].split('\r')[0].strip()
if not re.compile(r'DESCR:').search(device_name):
raise RuntimeError('Cannot get the device\'s name.')
except (RuntimeError, IndexError) as ex:
reporter.warn(ex.message)
device_name = None
try:
# Get the serial number of the device
child.sendline('show version | include [Pp]rocessor [Bb]oard [IDid]' + eol)
# child.expect_exact('show version | include')
child.expect_exact(self.device_prompts[1])
serial_num = str(child.before).split(
'show version | include [Pp]rocessor [Bb]oard [IDid]\r')[1].split('\r')[0].strip()
if not re.compile(r'[Pp]rocessor [Bb]oard [IDid]').search(serial_num):
raise RuntimeError('Cannot get the device\'s serial number.')
except (RuntimeError, IndexError) as ex:
reporter.warn(ex.message)
serial_num = None
# Get rid of ANSI escape sequences
ansi_seq = re.compile('(?:\\x1b\[)([\w;]+)(H)')
default_file_system = ansi_seq.sub('', str(default_file_system)).strip()
software_ver = ansi_seq.sub('', str(software_ver)).strip()
device_name = ansi_seq.sub('', str(device_name)).strip()
serial_num = ansi_seq.sub('', str(serial_num)).strip()
reporter.success()
return default_file_system, software_ver, device_name, serial_num
def format_file_system(self, child, reporter, eol, device_file_system):
"""Format a file system (i.e., memory) on a network device.
:param pexpect.spawn child: Connection in a child application object.
:param labs.cisco.Reporter reporter: A reference to the popup GUI window that reports
the status and progress of the script.
:param str eol: EOL sequence (LF or CRLF) used by the connection.
:param str device_file_system: File system to format.
:return: None
:rtype: None
:raise ValueError: If an argument is invalid.
:raise pexpect.ExceptionPexpect: If the result of a send command does not match the
expected result (raised from the pexpect module).
"""
# Validate inputs
if not device_file_system.startswith(('bootflash', 'flash', 'slot', 'disk',)):
reporter.error()
raise ValueError('Invalid Cisco file system name.')
reporter.step('Formatting device memory...')
self.__access_priv_exec_mode(child, eol)
# Format the memory. Look for the final characters of the following strings:
# 'Format operation may take a while. Continue? [confirm]'
# 'Format operation will destroy all data in 'flash:'. Continue? [confirm]'
# '66875392 bytes available (0 bytes used)'
child.sendline('format {0}:'.format(device_file_system) + eol)
index = 1
while index != 0:
index = child.expect_exact(
[pexpect.TIMEOUT, 'Continue? [confirm]', 'Enter volume ID', ], timeout=5)
if index != 0:
child.sendline(eol)
child.expect_exact('Format of {0} complete'.format(device_file_system), timeout=120)
child.sendline('show {0}'.format(device_file_system) + eol)
child.expect_exact('(0 bytes used)')
child.expect_exact(self.device_prompts[1])
reporter.success()
def set_switch_priority(self, child, reporter, eol,
switch_number=1,
switch_priority=1,
enable_password=None,
commit=True):
"""Set the switch priority in the stack.
:param pexpect.spawn child: Connection in a child application object.
:param labs.cisco.Reporter reporter: A reference to the popup GUI window that reports
the status and progress of the script.
:param str eol: EOL sequence (LF or CRLF) used by the connection.
:param switch_number: Switch reference number in the stack
:param switch_priority: Switch priority in the stack; maximum is 15. The switch with the
largest number (e.g., 15) becomes the master switch for the stack.
:param str enable_password: Password to enable Privileged EXEC Mode from User EXEC Mode.
:param bool commit: True to save changes to startup-config.
:return: None
:rtype: None
:raise ValueError: If an argument is invalid.
:raise pexpect.ExceptionPexpect: If the result of a send command does not match the
expected result (raised from the pexpect module).
"""
reporter.step('Setting switch priority...')
self.__access_priv_exec_mode(child, eol, enable_password=<PASSWORD>_password)
# Validate inputs
if not 1 <= switch_number <= 9:
raise ValueError('Invalid switch stack member number.')
validate_switch_priority(switch_priority)
child.sendline('configure terminal' + eol)
child.expect_exact(self.device_prompts[2])
child.sendline('switch {0} priority {1}'.format(switch_number, switch_priority))
index = 0
while index == 0:
index = child.expect_exact(
['Do you want to continue', 'New Priority has been set successfully', ])
if index == 0:
child.sendline(eol)
child.sendline('end' + eol)
child.expect_exact(self.device_prompts[1])
# Save changes if True
if commit:
self.save_running_configuration(child, eol, enable_password=enable_password)
reporter.success()
def set_switch_ip_addr(self, child, reporter, eol,
vlan_name,
vlan_port,
new_ip_address,
new_netmask,
enable_password=None,
commit=True):
"""Set a switch's IP address.
:param pexpect.spawn child: Connection in a child application object.
:param labs.cisco.Reporter reporter: A reference to the popup GUI window that reports
the status and progress of the script.
:param str eol: EOL sequence (LF or CRLF) used by the connection.
:param str vlan_name: Virtual Local Area Network (VLAN) interface to configure.
:param str vlan_port: Ethernet interface port name to configure and connect to VLAN.
:param str new_ip_address: New IPv4 address for the device.
:param str new_netmask: New netmask for the device.
:param str enable_password: <PASSWORD> enable Privileged EXEC Mode from User EXEC Mode.
:param bool commit: True to save changes to startup-config.
:return: None
:rtype: None
:raise pexpect.ExceptionPexpect: If the result of a send command does not match the
expected result (raised from the pexpect module).
"""
reporter.step('Setting the switch\'s IP address...')
self.__access_priv_exec_mode(child, eol, enable_password=enable_password)
# Validate inputs
# FYI, vlan_port, while not validated, should start with F(ast), G(iga), etc.
validate_ip_address(new_ip_address)
validate_subnet_mask(new_netmask)
child.sendline('configure terminal' + eol)
child.expect_exact(self.device_prompts[2])
# Configure Ethernet port
child.sendline('interface {0}'.format(vlan_port) + eol)
child.expect_exact(self.device_prompts[3])
# Configure the VLAN membership mode
child.sendline('switchport mode access' + eol)
child.expect_exact(self.device_prompts[3])
# Assign the port to the VLAN
child.sendline('switchport access {0}'.format(vlan_name) + eol)
child.expect_exact(self.device_prompts[3])
# Set to forwarding state immediately, bypassing the listening and learning states
# Used to prevent L2 switching loops when connecting to the remote host
child.sendline('spanning-tree portfast' + eol)
child.expect_exact(self.device_prompts[3])
child.sendline('no shutdown' + eol)
child.expect_exact(self.device_prompts[3])
# Configure VLAN
child.sendline('interface {0}'.format(vlan_name) + eol)
child.expect_exact(self.device_prompts[3])
child.sendline('ip address {0} {1}'.format(new_ip_address, new_netmask) + eol)
child.expect_exact(self.device_prompts[3])
child.sendline('no shutdown' + eol)
child.expect_exact(self.device_prompts[3])
child.sendline('end' + eol)
child.expect_exact(self.device_prompts[1])
# Save changes if True
if commit:
self.save_running_configuration(child, eol, enable_password=<PASSWORD>)
reporter.success()
def set_router_ip_addr(self, child, reporter, eol,
ethernet_port,
new_ip_address,
new_netmask,
enable_password=None,
commit=True):
"""Set a router's IP address.
:param pexpect.spawn child: Connection in a child application object.
:param labs.cisco.Reporter reporter: A reference to the popup GUI window that reports
the status and progress of the script.
:param str eol: EOL sequence (LF or CRLF) used by the connection.
:param str ethernet_port: Ethernet interface port name to configure.
:param str new_ip_address: New IPv4 address for the device.
:param str new_netmask: New netmask for the device.
:param str enable_password: Password to enable Privileged EXEC Mode from User EXEC Mode.
:param bool commit: True to save changes to startup-config.
:return: None
:rtype: None
"""
# Validate inputs
# ethernet_port, while not validated, should start with F(ast), G(iga), etc.
validate_ip_address(new_ip_address)
validate_subnet_mask(new_netmask)
reporter.step('Setting the router\'s IP address...')
self.__access_priv_exec_mode(child, eol)
child.sendline('configure terminal' + eol)
child.expect_exact(self.device_prompts[2])
# Configure Ethernet port
child.sendline('interface {0}'.format(ethernet_port) + eol)
child.expect_exact(self.device_prompts[3])
child.sendline('ip address {0} {1}'.format(new_ip_address, new_netmask) + eol)
child.expect_exact(self.device_prompts[3])
child.sendline('no shutdown' + eol)
child.expect_exact(self.device_prompts[3])
child.sendline('end' + eol)
child.expect_exact(self.device_prompts[1])
# Save changes if True
if commit:
self.save_running_configuration(child, eol, enable_password=<PASSWORD>_password)
reporter.success()
def ping_from_device(self, child, reporter, eol,
destination_ip_addr,
count=4,
enable_password=None):
"""Check connectivity with another device.
:param pexpect.spawn child: Connection in a child application object.
:param labs.cisco.Reporter reporter: A reference to the popup GUI window that reports
the status and progress of the script.
:param str eol: EOL sequence (LF or CRLF) used by the connection.
:param str destination_ip_addr: IPv4 address of the other device.
:param int count: Number of | |
<gh_stars>1-10
#!/usr/bin/env python
# coding: utf-8
# In[1]:
"""
Module containng custom Keras models and layers required for FlowNet architecture.
"""
try:
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend as K
except Exception as e:
raise Exception("Error occured while importing dependency packages. More details:\n",e)
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, FlowNet"
__credits__ = ["<NAME>"]
__license__ = ""
__version__ = "0.1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
class ForwardModel(tf.keras.Model):
"""
Model to construct FNN (Forward Neural Network) using custom Keras layers. Subclass of tf.keras.Model
"""
def __init__(self, space_dim=1, time_dep=False, output_dim=1,
n_hid_lay=3, n_hid_nrn=20, act_func = "tanh", rhs_func = None):
"""
space_dim (int) -> Dimension of the space Omega where the PDE is defined.
time_dep (bool) -> True if the problem is time dependent.
output_dim (int) -> Dimension of the range of the solution to PDE.
n_hid_layer (int) -> Number of hidden layers in the neural network.
n_hid_nrn (int) -> Number of neurons in each hidden layer of the NN.
act_func (string) -> Activation functions for each of the hidden layers. Has to
be one of the members of keras.activations: could be one of
{"tanh", "sigmoid", "elu", "relu", "exponential"}
"""
super(ForwardModel, self).__init__()
#Defining class atributes
self.space_dim = space_dim
self.time_dep = time_dep
self.output_dim = output_dim
self.n_hid_lay = n_hid_lay
self.n_hid_nrn = n_hid_nrn
#Block of hidden layers
self.hidden_block = [keras.layers.Dense( self.n_hid_nrn, activation=act_func,
name="dense_"+str(i+1) ) for i in range(n_hid_lay)]
#Final output layer
self.final_layer = keras.layers.Dense(self.output_dim,
name="final_layer")
#Defining the rhs of PDE: P(u,delu) = f(x,t)
if rhs_func != None:
self.rhs_function = rhs_func
else:
self.rhs_function = lambda x: 0
def findGrad(self,func,input_space):
"""
Find gradient with respect to the domain Omega of the PDE.
(tensor, tensor) -> Keras.Lambda layer
arguments:
----------
func (tf tensor): function represented by tf tensor structure (Usually of size:
data_size x dim_output_previous_layer). The func is usually the final output (solution u)
coming out of a hidden layer
input_space: argument with respect to which we need the partial derrivatives of func. Usually a list of
input arguments representing the space dimension.
Output: Keras.Lambda layer. Note that output of such a lambda layer will be a list of tensors with
each element giving partial derrivative wrt to each element in argm.
See tf.Keras.Lambda and tf.gradients for more details.
"""
try:
return keras.layers.Lambda(lambda z: [tf.gradients(z[0],x_i,
unconnected_gradients='zero')
for x_i in z[1] ]) ([func, input_space])
except Exception as e:
raise Exception("Error occured in finding the time derrivative lambda layer of type {} as follows: \n{}".format(type(e)),e)
def findTimeDer(self,func,input_time):
"""
(tensor, tensor) -> Keras.Lambda layer
arguments:
----------
func (tf tensor): function represented by tf tensor structure (Usually of size:
data_size x dim_output_previous_layer). The func is usually the final output (solution u)
coming out of a hidden layer
input_time: TensorFlow tensor. This should be the element of the input list which corresponds to the time
dimension. Used only if the problem is time_dependent.
Output: Keras.Lambda layer. Note that output of such a lambda layer will be a tensor of size m x 1
representing the time derrivative of output func.
See tf.Keras.Lambda and tf.gradients for more details.
"""
assert (self.time_dep), "Tried taking time derrivative even though the problem is not time dependent."
try:
return keras.layers.Lambda(lambda z: tf.gradients(z[0],z[1],
unconnected_gradients='zero') [0]) ([func, input_time])
except Exception as e:
raise Exception("Error occured in find gradient lambda layer of type {} as follows: \n{} ".format(type(e)),e)
def findLaplace(self,first_der,input_space):
"""
(tensor, tensor) -> Keras.Lambda layer
Returns lambda layer to find the laplacian of the solution to pde.
arguments:
----------
first_der (tf tensor): function represented by tf tensor structure (Usually of size:
data_size x dim_output_previous_layer). The func is
input_space: argument with respect to which we need the partial derrivatives of func. Usually a list of
input arguments representing the space dimension.
Output: Keras.Lambda layer. This lambda layer outputs the laplacian of solution function u.
See tf.Keras.Lambda and tf.gradients for more details.
"""
try:
# list containng diagonal entries of hessian matrix. Note that tf.gradients
#returns a list of tensors and hence thats why we have a [0] at the end of
#the tf.gradients fucntion as tf.gradients(func,argm) [0]
del_sq_layer = keras.layers.Lambda( lambda z: [ tf.gradients(z[0][i], z[1][i],
unconnected_gradients='zero') [0]
for i in range(len(z[1])) ] ) ([first_der,input_space])
return sum(del_sq_layer)
except Exception as e:
raise Exception("Error occured in find laplacian lambda layer of type {} as follows: \n{}".format(type(e)),e)
#final layer representing the lhs P(x,t) of PDE P(x,t)=0
def findPdeLayer(self, laplacian, input_arg, time_der=0):
"""
(tensor, tensor, tensor) -> Keras.Lambda layer
Returns lambda layer to find the actual pde P(u,delu,x,t) such that P(u,delu,x,t)=0.
arguments:
----------
laplacian (tf tensor): laplacian with respect to space dim .
input_arg: list of inputs corresponding to both space and time dimension. Last elemetn of
the list corresponds to the temporal dimension.
Output: Keras.Lambda layer. This lambda layer outputs the PDE P(u,delu, x,t).
See tf.Keras.Lambda and tf.gradients for more details.
"""
try:
# return keras.layers.Lambda(lambda z: z[0] - z[1] - tf.sin(z[2][0]+z[2][1]) -
# 2*z[2][2]*tf.sin(z[2][0]+z[2][1])) ([time_der, laplacian, input_arg])
return keras.layers.Lambda(lambda z: z[0] - z[1] - self.rhs_function(input_arg)) ([time_der, laplacian, input_arg])
except Exception as e:
raise Exception("Error occured in finding pde lambda layer of type {} as follows: \n{}".format(type(e)),e)
def get_config(self):
#getting basic config using the parent model class
base_config = super().get_config()
return {**base_config, "space_dim": self.space_dim,
"time_dep": self.time_dep, "output_dim": self.output_dim,
"n_hid_lay": self.n_hid_lay, "n_hid_nrn": self.n_hid_nrn,
"act_func": self.act_func }
def from_config(self, config, custom_objects):
super().from_config(config)
def call(self, inputs, training=False):
"""
Call function which wll be used while training, prediciton and evaluation of the ForwardModel.
arguments:
----------
inputs (list of tensors) -> last element of the list corresponds to temporal diimension if
self.time_dep = True. If possible, always feed the data from the
data processing method in flowDataProcess module.
training (bool) -> True if calling the function for training. False for prediction and evaluation.
Value of triainng will be automatically taken care of by Keras.
Note that inputs should always be given as a list with the last element of the list representing the
dimension corresponding to time.
"""
if self.time_dep:
try:
assert(len(inputs) > 1)
input_space = inputs[:-1]
input_time = inputs[-1]
except Exception as e:
raise Exception("Error occured while separating spacial and temporal data from inputs, make sure that spacio-temporal data is being used to for training and x=[space_dim1,..,space_dimn,time_dim]. More details on error below:\n", type(e), e)
else:
input_space = inputs
#concatening all the input data (space and time dimensions) making it
#read to be passed to the hidden layers
hidden_output = keras.layers.concatenate(inputs)
#hidden layers
for layer_id in range(self.n_hid_lay):
hidden_output = self.hidden_block[layer_id] (hidden_output)
#output layer, this is typically the solution function
output_layer = self.final_layer(hidden_output)
if training:
#pde specific layers
grad_layer = self.findGrad(output_layer, input_space)
laplace_layer = self.findLaplace(grad_layer, input_space)
if self.time_dep:
time_der_layer = self.findTimeDer(output_layer, input_time)
else:
time_der_layer=0
pde_layer = self.findPdeLayer(laplace_layer, inputs, time_der_layer)
return output_layer, pde_layer
elif not training: #only outputting the function value if not tranining.
return output_layer
# In[3]:
class Poission(ForwardModel):
"""
Doc string goes here
"""
def __init__(self, space_dim=1, perm_tensor=None, output_dim=1,
n_hid_lay=3, n_hid_nrn=20, act_func = "tanh", rhs_func = None):
"""
talk about super initialization
"""
super().__init__(space_dim=space_dim, time_dep=False, output_dim=output_dim,
n_hid_lay=n_hid_lay, n_hid_nrn=n_hid_nrn, act_func = act_func, rhs_func = rhs_func)
self._perm_tensor = perm_tensor if perm_tensor else tf.eye(space_dim)
#final layer representing the lhs P(x) of PDE P(x)=0
def findPdeLayer(self, laplacian, input_arg):
"""
(tensor, tensor, tensor) -> Keras.Lambda layer
Returns lambda layer to find the actual pde P(u,delu,x,t) such that P(u,delu,x,t)=0.
arguments:
----------
laplacian (tf tensor): laplacian with respect to space dim .
input_arg: list of inputs corresponding to both space and time dimension. Last elemetn of
the list corresponds to the temporal | |
next_case_line_number: int = 0
for line in code_except_decorator:
# print(f'{line_number} : {line}')
if "case " in line:
next_case_line_number = line_number
next_case = line.strip()
if "done()" in line:
match_obligations.add(line_number, next_case_line_number, next_case)
line_number += 1
# print(match_obligations)
def new_transition(self, event):
if not hasattr(self, '__data_object__') or self.__data_object__ is None:
self.__data_object__ = match_obligations
result = transition_function(self, event)
if isinstance(result, int):
# print(f'++> {self.__data_object__}')
self.__data_object__.remove(result)
# print(f'--> {self.__data_object__}')
if self.__data_object__.empty():
return ok # we are done
else:
return self # we are not done yet
else:
return result # something else (ok, error, or another state)
return new_transition
def initial(state_class: State) -> State:
"""
Decorator function which introduces the Boolean variable 'is_initial' (assigning it the value True,
although that is not important) in its argument state.
It allows us to annotate states as follows:
@initial
class Init(State):
...
This has the same effect as:
class Init(State):
...
Init.is_initial = True
:param state_class: the state to decorate.
:return: the decorated state.
"""
state_class.is_initial = True
return state_class
def mk_state_vector(arg: State | List[State]) -> List[State]:
"""
Turns a state or a list of states into a list of states.
If the argument is a single state s, the singular state vector [s] is returned.
If the argument is a list of states, it is returned unchanged.
:param arg: a state or a list of states.
:return: the list of states.
"""
if isinstance(arg, list):
return arg
else:
return [arg]
class Monitor:
"""
Any user defined monitor class must extend this class. It defines a monitor.
"""
def __init__(self):
"""
monitors:
A monitor can have sub-monitors, stored in this variable.
is_top_monitor:
Is True iff. this monitor is the topmost monitor in a monitor
hierarchy, that is: not a sub-monitor of another monitor.
Used for printing purposes in that only the topmost monitor prints
out certain debugging information.
states:
The state vector of the monitor: the set of all active states.
states_indexed:
Indexed states, used for slicing.
errors:
Detected errors during monitoring.
event_count:
Counts the events as they come in.
option_show_state_event:
When True, state and event will be printed on transition errors.
option_print_summary:
When True, a summary of the analysis is printed for the top monitor.
"""
self.monitors: List[Monitor] = []
self.is_top_monitor: bool = True
self.states: Set[State] = set([])
self.states_indexed : Dict[object, Set[State]] = {}
self.errors: List[str] = []
self.event_count: int = 0
self.option_show_state_event: bool = True
self.option_print_summary: bool = True
# Create always state if outermost transitions exist
outer_transitions = inspect.getmembers(self, predicate=is_transition_method)
if len(outer_transitions) > 0:
always = type("Always", (AlwaysState,), {})
setattr(always, "is_initial", True)
(name, method) = outer_transitions[0]
setattr(always, name, method.__func__)
setattr(self, "Always", always)
# Locate all state classes (subclassing State):
state_classes = inspect.getmembers(self, predicate=is_state_class)
if len(state_classes) > 0:
# Add initial states:
initial_state_found = False
for (state_name, state_class) in state_classes:
if hasattr(state_class, 'is_initial'):
self.add_state_to_state_vector(self.states, state_class())
initial_state_found = True
if not initial_state_found:
(name, the_first_class) = state_classes[0]
self.add_state_to_state_vector(self.states, the_first_class())
# Debug initial states
# print(f'Initial states of {self.get_monitor_name()}:')
# for state in self.states:
# print(state)
# print()
def set_event_count(self, initial_value: int):
"""
Sets the initial value of `event_count` to a different value than 0.
This is used for example when processing CSV files, where there is a header
row, which should be counted as an 'event' so that `event_count` will
correspond to row number in the CSV file.
:param initial_value: the initial value of `event_count`.
"""
self.event_count = initial_value
def get_monitor_name(self) -> str:
return self.__class__.__name__
def key(self, event) -> Optional[object]:
"""
Returns indexing key of event. Returns None by default but can be
overwritten by user.
:param event: event to extract index from.
:return: the index of the event.
"""
return None
def monitor_this(self, *monitors: "Monitor"):
"""
Records one or more monitors as sub-monitors of this monitor.
Each event submitted to this monitor is also submitted to the
sub-monitors. Likewise when end() is called on this monitor,
end() is also called on the sub-monitors.
:param monitors: the monitors to record as sub-monitors.
"""
for monitor in monitors:
monitor.is_top_monitor = False
self.monitors.append(monitor)
def is_relevant(self, event: Event) -> bool:
"""
Returns True if the event should be monitored. By default all submitted events
are monitored. This method is meant to be overridden by the user.
:param event: the incoming event.
:return: True if the event should be monitored.
"""
return True
def eval(self, event: Event):
"""
This method is used to submit events to the monitor.
The monitor evaluates the event against the states in the state vector.
The eval method is called recursively on sub-monitors, so it is only
necessary to call it on the topmost monitor.
:param event: the submitted event.
"""
global __monitor__
__monitor__ = self
self.event_count += 1
if DEBUG_PROGRESS and self.is_top_monitor and self.event_count % DEBUG_PROGRESS == 0:
debug(f'---------------------> {self.event_count}')
if DEBUG and self.is_top_monitor:
debug_frame("=", f'Event {self.event_count} {event}')
for monitor in self.monitors:
monitor.eval(event)
if DEBUG:
debug_frame("#", f'Monitor {self.get_monitor_name()}')
if self.is_relevant(event):
index = self.key(event)
if index is None:
new_states = self.eval_states(event, self.states)
if new_states is not None:
self.states = new_states
for (idx, states) in self.states_indexed.items():
new_states = self.eval_states(event, states)
if new_states is not None:
self.states_indexed[idx] = new_states
else:
if index in self.states_indexed:
states = self.states_indexed[index]
else:
states = self.states
new_states = self.eval_states(event, states)
if new_states is not None:
self.states_indexed[index] = new_states
if DEBUG:
debug(f'\n{self}')
def eval_states(self, event: Event, states: Set[State]) -> Optional[Set[State]]:
"""
Evaluates an event on each state in a set of states.
:param event: the event to evaluate.
:param states: the set of states to evaluate it on.
:return: the resulting set of states. None is returned if no transitions fired.
"""
transition_triggered = False
states_to_remove = set([])
states_to_add = set([])
new_states = set([])
for source_state in states:
resulting_states = source_state.eval(event) # returns None or a list of states
if DEBUG:
debug(f'{source_state} results in {mk_string("[",", ","]", resulting_states)}')
if resulting_states is not None:
transition_triggered = True
states_to_remove.add(source_state)
for target_state in resulting_states:
if target_state == ok:
pass
elif isinstance(target_state, ErrorState):
self.report_transition_error(source_state, event, target_state.message)
elif isinstance(target_state, InfoState):
self.report_transition_information(source_state, event, target_state.message)
else:
self.add_state_to_state_vector(states_to_add, target_state)
if transition_triggered:
new_states = states
new_states = new_states - states_to_remove
new_states = new_states.union(states_to_add)
return new_states
else:
return None
def end(self):
"""
Terminates monitoring for the monitor. This includes looking for hot states
of type HotState, which should not occur, and then printing out a summary
of the verification. The end() method is called recursively on sub-monitors,
so it only needs to be called on the top-most monitor.
"""
if self.is_top_monitor:
print()
print('Terminating monitoring!')
print()
for monitor in self.monitors:
monitor.end()
print_frame("+", f'Terminating monitor {self.get_monitor_name()}')
for state in self.get_all_states():
if isinstance(state, HotState) or isinstance(state, HotNextState):
self.report_end_error(f'terminates in hot state {state}')
if self.is_top_monitor and self.option_print_summary:
self.print_summary()
def verify(self, trace: List[Event]):
'''
Verifies a trace, which is a list of events.
It calls eval on each event and calls end() at the
end of the trace.
:param trace: the trace.
'''
for event in trace:
self.eval(event)
self.end()
def __str__(self) -> str:
monitor_name = self.__class__.__name__
suffix = " states:"
bar_length = len(monitor_name) + len(suffix)
result = f'{"-" * bar_length}\n'
result += monitor_name + suffix + "\n"
for state in self.states:
result += f'{state}\n'
for (index, states) in self.states_indexed.items():
if states:
result += f'index {index}:\n'
for state in states:
result += f' {state}\n'
result += f'{"-" * bar_length}\n'
return result
def add_state_to_state_vector(self, states: Set[State], state: State):
"""
Adds a state to a state vector. Also sets the monitor field of the state
to self (the monitor the state is part of).
:param states: the state vector to add state to.
:param state: the state to become initial states.
"""
state.set_monitor_to(self)
states.add(state)
def report_transition_error(self, state: State, event: Event, msg: str):
"""
Reports an error caused by taking a transition that results in an ErrorState.
:param state: the state in which the transition is taken.
:param event: the event that causes the transition to be taken.
:param msg: the error message provided by user.
"""
message = | |
is visible. When the last pending initializer is removed,
and no failing result is set, the initializers struct will be
set to nil and the object is considered as initialized and visible
to all clients.
type: list
contains:
name:
description:
- name of the process that is responsible for initializing this
object.
type: str
result:
description:
- If result is set with the Failure field, the object will be persisted
to storage and then deleted, ensuring that other clients can observe
the deletion.
type: complex
contains:
api_version:
description:
- APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to
the latest internal value, and may reject unrecognized values.
type: str
code:
description:
- Suggested HTTP return code for this status, 0 if not set.
type: int
details:
description:
- Extended data associated with the reason. Each reason may
define its own extended details. This field is optional and
the data returned is not guaranteed to conform to any schema
except that defined by the reason type.
type: complex
contains:
causes:
description:
- The Causes array includes more details associated with
the StatusReason failure. Not all StatusReasons may provide
detailed causes.
type: list
contains:
field:
description:
- 'The field of the resource that has caused this error,
as named by its JSON serialization. May include dot
and postfix notation for nested attributes. Arrays
are zero-indexed. Fields may appear more than once
in an array of causes due to fields having multiple
errors. Optional. Examples: "name" - the field "name"
on the current resource "items[0].name" - the field
"name" on the first array entry in "items"'
type: str
message:
description:
- A human-readable description of the cause of the error.
This field may be presented as-is to a reader.
type: str
reason:
description:
- A machine-readable description of the cause of the
error. If this value is empty there is no information
available.
type: str
group:
description:
- The group attribute of the resource associated with the
status StatusReason.
type: str
kind:
description:
- The kind attribute of the resource associated with the
status StatusReason. On some operations may differ from
the requested resource Kind.
type: str
name:
description:
- The name attribute of the resource associated with the
status StatusReason (when there is a single name which
can be described).
type: str
retry_after_seconds:
description:
- If specified, the time in seconds before the operation
should be retried.
type: int
uid:
description:
- UID of the resource. (when there is a single resource
which can be described).
type: str
kind:
description:
- Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint
the client submits requests to. Cannot be updated. In CamelCase.
type: str
message:
description:
- A human-readable description of the status of this operation.
type: str
metadata:
description:
- Standard list metadata.
type: complex
contains:
resource_version:
description:
- String that identifies the server's internal version of
this object that can be used by clients to determine when
objects have changed. Value must be treated as opaque
by clients and passed unmodified back to the server. Populated
by the system. Read-only.
type: str
self_link:
description:
- SelfLink is a URL representing this object. Populated
by the system. Read-only.
type: str
reason:
description:
- A machine-readable description of why this operation is in
the "Failure" status. If this value is empty there is no information
available. A Reason clarifies an HTTP status code but does
not override it.
type: str
status:
description:
- 'Status of the operation. One of: "Success" or "Failure".'
type: str
labels:
description:
- Map of string keys and values that can be used to organize and categorize
(scope and select) objects. May match selectors of replication controllers
and services.
type: complex
contains: str, str
name:
description:
- Name must be unique within a namespace. Is required when creating
resources, although some resources may allow a client to request the
generation of an appropriate name automatically. Name is primarily
intended for creation idempotence and configuration definition. Cannot
be updated.
type: str
namespace:
description:
- Namespace defines the space within each name must be unique. An empty
namespace is equivalent to the "default" namespace, but "default"
is the canonical representation. Not all objects are required to be
scoped to a namespace - the value of this field for those objects
will be empty. Must be a DNS_LABEL. Cannot be updated.
type: str
owner_references:
description:
- List of objects depended by this object. If ALL objects in the list
have been deleted, this object will be garbage collected. If this
object is managed by a controller, then an entry in this list will
point to this controller, with the controller field set to true. There
cannot be more than one managing controller.
type: list
contains:
api_version:
description:
- API version of the referent.
type: str
block_owner_deletion:
description:
- If true, AND if the owner has the "foregroundDeletion" finalizer,
then the owner cannot be deleted from the key-value store until
this reference is removed. Defaults to false. To set this field,
a user needs "delete" permission of the owner, otherwise 422 (Unprocessable
Entity) will be returned.
type: bool
controller:
description:
- If true, this reference points to the managing controller.
type: bool
kind:
description:
- Kind of the referent.
type: str
name:
description:
- Name of the referent.
type: str
uid:
description:
- UID of the referent.
type: str
resource_version:
description:
- An opaque value that represents the internal version of this object
that can be used by clients to determine when objects have changed.
May be used for optimistic concurrency, change detection, and the
watch operation on a resource or set of resources. Clients must treat
these values as opaque and passed unmodified back to the server. They
may only be valid for a particular resource or set of resources. Populated
by the system. Read-only. Value must be treated as opaque by clients
and .
type: str
self_link:
description:
- SelfLink is a URL representing this object. Populated by the system.
Read-only.
type: str
uid:
description:
- UID is the unique in time and space value for this object. It is typically
generated by the server on successful creation of a resource and is
not allowed to change on PUT operations. Populated by the system.
Read-only.
type: str
spec:
description:
- spec holds all the input necessary to produce a new build, and the conditions
when to trigger them.
type: complex
contains:
completion_deadline_seconds:
description:
- completionDeadlineSeconds is an optional duration in seconds, counted
from the time when a build pod gets scheduled in the system, that
the build may be active on a node before the system actively tries
to terminate the build; value must be positive integer
type: int
failed_builds_history_limit:
description:
- failedBuildsHistoryLimit is the number of old failed builds to retain.
If not specified, all failed builds are retained.
type: int
node_selector:
description:
- nodeSelector is a selector which must be true for the build pod to
fit on a node If nil, it can be overridden by default build nodeselector
values for the cluster. If set to an empty map or a map with any values,
default build nodeselector values are ignored.
type: complex
contains: str, str
output:
description:
- output describes the Docker image the Strategy should produce.
type: complex
contains:
image_labels:
description:
- imageLabels define a list of labels that are applied to the resulting
image. If there are multiple labels with the same name then the
last one in the list is used.
type: list
contains:
name:
description:
- name defines the name of the label. It must have non-zero
length.
type: str
value:
description:
- value defines the | |
<reponame>misakadam97/cell_mrcnn
from os import listdir, mkdir, path
from os.path import join, isdir, basename, split
from glob import glob
import numpy as np
import skimage.draw
from skimage.io import imread
from cell_mrcnn.utils import correct_central_brightness, subtract_bg, convert_to_bit8, \
get_cell_mrcnn_path_from_config_file, get_image_description, \
preproc_pipeline, load_image
import read_roi
import gc
import pandas as pd
from datetime import datetime
from shutil import copyfile
# data directory
# data_dir = get_cell_mrcnn_path_from_config_file()
# dataset_dir = join(data_dir, 'annotated_datasets/')
# Import Mask RCNN
from cell_mrcnn import utils
from PIL import Image
def calculate_percentiles(im_paths):
# takes ~2mins
# on 2048x2048 images the below percentiles leave about _ pixels out:
# 99: 42k
# 99.9: 4.2k
# 99.99: 420
# 99.999: 42
# 99.9999: 4
# 100: 0 , this is equal to max
percentiles = [99, 99.9, 99.99, 99.999, 99.9999, 100]
perc_df = pd.DataFrame(columns=percentiles,
index=pd.RangeIndex(0, len(im_paths)))
for i, im_path in enumerate(im_paths):
im = imread(im_path)
im = correct_central_brightness(im.astype(np.float))
im = subtract_bg(im)
percentile_list = []
for perc in percentiles:
percentile_list.append(np.percentile(im, perc))
perc_df.loc[i, :] = percentile_list
perc_df.loc["min", :] = perc_df.min(axis=0)
perc_df.loc["max", :] = perc_df.loc[perc_df.index[0:-1], :].max(axis=0)
perc_df.loc["mean", :] = perc_df.loc[perc_df.index[0:-2], :].mean(axis=0)
perc_df.loc["median", :] = perc_df.loc[perc_df.index[0:-3], :].median(
axis=0)
perc_df.loc["var", :] = perc_df.loc[perc_df.index[0:-4], :].var(axis=0)
perc_df.loc["std", :] = perc_df.loc[perc_df.index[0:-5], :].std(axis=0)
return perc_df
def preprocess(channel_paths, output_folder, cutoffs):
"""
:param input_folder: folder of the raw images
:param output_folder: preprocessed images will be saved here
:param channel_paths: nested list of image paths. Can contain 1 or 2
lists. If it contains 2; 1st should be Venus, this is gona be the red
channel; 2nd should be Cerulean, this will be the blue channel;
and they should be sorted!
:param cutoffs: cutoff for 8 bit conversion (order same as in channel
paths)
:return:
"""
if not isdir(output_folder):
mkdir(output_folder)
# If only Venus channel
if len(channel_paths) == 1:
venus_paths = channel_paths[0]
for i, impath in enumerate(venus_paths):
im = imread(impath)
im_c = correct_central_brightness(im.astype(np.float16))
im_bg = subtract_bg(im_c)
im8 = convert_to_bit8(im_bg, cutoffs[0])
fname = impath.split()[1]
Image.fromarray(im8).save(join(output_folder, fname + '.png'))
# If Venus and Cerulean
elif len(channel_paths) == 2:
red_paths, blue_paths = channel_paths[0], channel_paths[1]
red_desc = [get_image_description(path)[1:] for path in red_paths]
blue_desc = [get_image_description(path)[1:] for path in blue_paths]
assert red_desc == blue_desc, 'images not sorted'
for i, (red_path, blue_path) in enumerate(zip(red_paths, blue_paths)):
print('\rCreating composite images: {}/{}' \
.format(i + 1, len(blue_paths)), end='...')
try:
red, blue = imread(red_path), imread(blue_path)
comp = preproc_pipeline(red, blue)
fname = split(red_path)[1].split('.')[0]
Image.fromarray(comp).save(join(output_folder, fname + '.png'))
except:
print(f'image {i} processing failed')
def transfer_w3_channel_images(data_dir):
cit_dir = join(data_dir, 'w3/cit')
cellmembrane_dir = join(data_dir, 'w3/cellmembrane')
if not isdir(cit_dir):
mkdir(cit_dit)
if not isdir(cellmembrane_dir):
mkdir(cellmembrane_dir)
# get the path to all tif images
im_paths = glob(join(data_dir, '**/*.tif'), recursive=True)
# select the non-thumbnail w3 images
for i, im_path in enumerate(im_paths):
print('\rSeparating cit. and cellmembrane w3 channel images: ', i + 1,
'/',
len(im_paths), end='')
if 'thumb' in im_path:
continue
if im_path.split('_')[-1][:2] != 'w3':
continue
im = imread(im_path)
im = convert_to_bit8(im)
im = Image.fromarray(im)
if 'cit' in im_path:
im.save(join(cit_dir, im_path.split('/')[-1].split('.')[0]
+ '.png'))
else:
im.save(join(cellmembrane_dir, im_path.split('/')[-1].split('.')[0]
+ '.png'))
def cell_groups_to_bg(image, roi_set):
cell_group_rois = [roi_set[key] for key in roi_set.keys() if 'cell_group'
in key]
im = np.copy(image)
for cg_roi in cell_group_rois:
rr, cc = skimage.draw.polygon(cg_roi['y'], cg_roi['x'])
mask = np.zeros((im.shape[0], im.shape[1]), dtype=np.uint8)
mask[rr, cc] = 1
rand_bg = np.random.randint(im.mean() - im.std(), im.mean() +
im.std(), size=mask.sum())
mask[rr, cc] = rand_bg
im[rr, cc] = mask[rr, cc]
return im
def read_roi_or_roiset(impath):
impath = impath.split('.')[0]
# if rois are in a zip (image contains multiple rois)
if path.isfile(join(impath + '.zip')):
rois = read_roi.read_roi_zip(
join(impath + '.zip'))
return rois
# if roi is in roi format(image only contains 1 roi)
elif path.isfile(join(impath + '.roi')):
rois = read_roi.read_roi_file(join(impath + '.roi'))
return rois
else:
print("rois couldn't be found for:", impath)
def calc_avg_pixel_value(image_paths, output_file=None):
image = load_image(image_paths[0])
channel_n = image.shape[2]
channel_mean_dict = {}
for i in range(channel_n):
channel_mean_dict[i] = []
for image_path in image_paths:
image = load_image(image_path)
for i in range(channel_n):
channel_mean_dict[i].append(image[:, :, i].mean())
for i in range(channel_n):
channel_mean_dict[i] = np.array(channel_mean_dict[i]).mean()
means = [mean for mean in channel_mean_dict.values()]
if output_file:
with open(output_file, 'w') as f:
for i, mean in enumerate(means):
f.write('Channel {} mean: {}'.format(i+1, means[i]))
return means
def copy_annotated(input_folder, output_folder):
"""
creates a folder named the current datetime; and copies the roi.zip files
and corresponding images from the input folder into it
:param input_folder:
:param output_folder:
:return:
"""
# get a list of roi paths
roi_paths = glob(join(input_folder, '*.zip'))
roi_paths.extend(glob(join(input_folder, '*.roi')))
# get the corresponding image paths
image_paths = []
for roi_path in roi_paths:
image_paths.append(roi_path.split('.')[0] + '.png')
date = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
output_folder = join(output_folder, date)
if not isdir(output_folder):
mkdir(output_folder)
for roi, im in zip(roi_paths, image_paths):
copyfile(roi, join(output_folder, basename(roi)))
copyfile(im, join(output_folder, basename(im)))
return output_folder
class CellTransformData(utils.Dataset):
def load_cell(self, dataset_dir):
"""Load the cell dataset resize the images and rois to a
uniform dimension.
dataset_dir: Root directory of the dataset.
subset: Subset to load: train or val
"""
# Add classes. We have only one class to add.
self.add_class("hulab", 1, "cell")
image_names = [file_name for file_name in listdir(dataset_dir) if
'png' in file_name]
# todo: revisit this naming convention, come up w/ a unifrom system
# just so that the orderly assigned id is in the utils. Dataset class
# is in the same order as the numbers in the image names
# image_names = [int(name.split('.png')[0]) for name in image_names]
# image_names.sort()
# image_names = [str(n) + '.png' for n in image_names]
# Add images
for image_name in image_names:
id = image_name.split('.')[0]
# Get the x, y coordinaets of points of the polygons that make up
# the outline of each object instance.
# RoI format:
# roi = {
# region1 : {
# 'type' = 'freehand'
# 'x' = [...]
# 'y' = [...]
# 'n' = 412
# 'width' = 0 # always 0, not informative
# 'name' = the region name (same as the key)
# 'position' = 0 # always 0, not informative
# }
# ...more regions
# }
rois = read_roi_or_roiset(join(dataset_dir, id))
im_path = join(dataset_dir, image_name)
# load_mask() needs the image size to convert polygons to masks.
# Unfortunately, RoI doesn't include it, so we must read
# the image. This is only managable since the dataset is tiny.
h, w = imread(im_path).shape[:2]
self.add_image(
"hulab",
image_id=id, # use file name as a unique image id
path=im_path,
width=w, height=h,
polygons=rois)
def polygon_to_mask(self, image_id):
"""Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a confonc dataset image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "hulab":
return super(self.__class__, self).load_mask(image_id)
# Convert polygons to a bitmap mask of shape
# [height, width, instance_count]
info = self.image_info[image_id]
# /proc/sys/vm/overcommit_memory has to be "1" for larger arrays
mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
dtype=np.bool)
for i, (key, vals) in enumerate(info["polygons"].items()):
# Get indexes of pixels inside the polygon and set them to 1
rr, cc = skimage.draw.polygon(vals['y'], vals['x'])
mask[rr, cc, i] = True
del (rr, cc)
gc.collect()
# Return mask, and array of class IDs of each instance. Since we have
# one class ID only, we return an array of 1s
return mask, np.ones([mask.shape[-1]], dtype=np.int32)
def split_to_trainval(self, dataset_dir):
# set up the output directory's subfolders
[mkdir(join(dataset_dir, folder)) for folder in ['train', 'val'] if
folder not in listdir(dataset_dir)]
np.random.seed(seed=54646)
train_set = np.random.choice(self.image_ids,
int(len(self.image_ids) * 0.7),
replace=False)
means = []
for image_id_ in self.image_ids:
info = self.image_info[image_id_]
im = self.load_image(image_id_)
means.append(im.mean(axis=(0, 1)))
mask = self.polygon_to_mask(image_id_)[0]
id_ = info['id']
if image_id_ in train_set:
output_dir = join(dataset_dir, 'train', str(id_))
else:
output_dir = join(dataset_dir, 'val', str(id_))
if not isdir(output_dir):
mkdir(output_dir)
for m in range(mask.shape[2]):
mask_ = Image.fromarray((mask[:, :, m] * 255).astype(np.uint8),
mode='L')
mask_ = mask_.convert(mode='1')
mask_.save(join(output_dir, str(id_) + '_mask_' + str(m) \
+ '.png'))
copyfile(info['path'], join(output_dir, str(id_) + '.png'))
means = np.array(means).mean(axis=0)
print('Average pixel value(s) is(/are): {}'.format(means))
return means
if __name__ == '__main__':
# load the dataset
dataset_path = join(dataset_dir, '2020_11_22_02_55_03')
ds = CellTransformData()
ds.load_cell(dataset_path)
ds.prepare()
ds.split_to_trainval(dataset_path)
show_example = False
if show_example:
# test
image_id = 12
image = ds.load_image(image_id)
mask, class_ids = ds.polygon_to_mask(image_id)
original_shape = image.shape
# Resize
image, | |
<gh_stars>1-10
from __future__ import print_function, division, absolute_import
import numpy as np
import scipy
from scipy.misc import imsave, imread, imresize
from sklearn.feature_extraction.image import reconstruct_from_patches_2d, extract_patches_2d
from scipy.ndimage.filters import gaussian_filter
from skimage.util.shape import view_as_windows
from keras import backend as K
import os
import time
#import cv2
'''
_image_scale_multiplier is a special variable which is used to alter image size.
The default image size is 32x32. If a true upscaling model is used, then the input image size is 16x16,
which not offer adequate training samples.
'''
_image_scale_multiplier = 1
img_size = 256 * _image_scale_multiplier
stride = 16 * _image_scale_multiplier
assert (img_size ** 2) % (stride ** 2) == 0, "Number of images generated from strided subsample of the image needs to be \n" \
"a positive integer. Change stride such that : \n" \
"(img_size ** 2) / (stride ** 2) is a positive integer."
input_path = r"input_images/"
validation_path = r"val_images/"
validation_set5_path = validation_path + "set5/"
validation_set14_path = validation_path + "set14/"
base_dataset_dir = os.path.expanduser("~") + "/Image Super Resolution Dataset/"
output_path = base_dataset_dir + "train_images/train/"
validation_output_path = base_dataset_dir + r"train_images/validation/"
if not os.path.exists(output_path):
os.makedirs(output_path)
def transform_images(directory, output_directory, scaling_factor=2, max_nb_images=-1, true_upscale=False):
index = 1
if not os.path.exists(output_directory + "X/"):
os.makedirs(output_directory + "X/")
if not os.path.exists(output_directory + "y/"):
os.makedirs(output_directory + "y/")
# For each image in input_images directory
nb_images = len([name for name in os.listdir(directory)])
if max_nb_images != -1:
print("Transforming %d images." % max_nb_images)
else:
assert max_nb_images <= nb_images, "Max number of images must be less than number of images in path"
print("Transforming %d images." % (nb_images))
if nb_images == 0:
print("Extract the training images or images from imageset_91.zip (found in the releases of the project) "
"into a directory with the name 'input_images'")
print("Extract the validation images or images from set5_validation.zip (found in the releases of the project) "
"into a directory with the name 'val_images'")
exit()
for file in os.listdir(directory):
img = imread(directory + file, mode='RGB')
# Resize to 256 x 256
img = imresize(img, (img_size, img_size))
img=scipy.misc.imfilter(img,ftype='sharpen')
# Create patches
hr_patch_size = (16 * scaling_factor * _image_scale_multiplier)
nb_hr_images = (img_size ** 2) // (stride ** 2)
hr_samples = np.empty((nb_hr_images, hr_patch_size, hr_patch_size, 3))
image_subsample_iterator = subimage_generator(img, stride, hr_patch_size, nb_hr_images)
stride_range = np.sqrt(nb_hr_images).astype(int)
i = 0
for j in range(stride_range):
for k in range(stride_range):
hr_samples[i, :, :, :] = next(image_subsample_iterator)
i += 1
lr_patch_size = 16 * _image_scale_multiplier
t1 = time.time()
# Create nb_hr_images 'X' and 'Y' sub-images of size hr_patch_size for each patch
for i in range(nb_hr_images):
ip = hr_samples[i]
# Save ground truth image X
imsave(output_directory + "/y/" + "%d_%d.png" % (index, i + 1), ip)
# Apply Gaussian Blur to Y
op = gaussian_filter(ip, sigma=0.5)
print("CVVVVVVVVVVVVVVVV")
#ip = np.array(ip, dtype=np.uint8)
#op = cv2.bilateralFilter(ip,15,125,125)
# Subsample by scaling factor to Y
op = imresize(op, (lr_patch_size, lr_patch_size), interp='bicubic')
if not true_upscale:
# Upscale by scaling factor to Y
op = imresize(op, (hr_patch_size, hr_patch_size), interp='bicubic')
# Save Y
imsave(output_directory + "/X/" + "%d_%d.png" % (index, i+1), op)
print("Finished image %d in time %0.2f seconds. (%s)" % (index, time.time() - t1, file))
index += 1
if max_nb_images > 0 and index >= max_nb_images:
print("Transformed maximum number of images. ")
break
print("Images transformed. Saved at directory : %s" % (output_directory))
def image_count():
return len([name for name in os.listdir(output_path + "X/")])
def val_image_count():
return len([name for name in os.listdir(validation_output_path + "X/")])
def subimage_generator(img, stride, patch_size, nb_hr_images):
for _ in range(nb_hr_images):
for x in range(0, img_size - patch_size, stride):
for y in range(0, img_size - patch_size, stride):
subimage = img[x : x + patch_size, y : y + patch_size, :]
yield subimage
def subimage_patch(img, stride, patch_size, nb_hr_images):
heightini, widthini = img.shape[:2]
#print(str(heightini)+'--'+str(widthini))
#j=0
for y in range(0, widthini , stride):
#for y in range(0, heightini - patch_size, stride):
for x in range(0, heightini , stride):
if (x + patch_size)<widthini and (y + patch_size) <heightini:
subimage = img[y : y + patch_size, x : x + patch_size, :]
#height, width = subimage.shape[:2]
#print(str(height)+'<<-->>'+str(width))
#print(str(x)+'--'+str(y)+'--'+str(x + patch_size)+'--'+str(y + patch_size))
#j += 1
yield subimage
def make_patches(x, scale, patch_size, upscale=True, verbose=1):
'''x shape: (num_channels, rows, cols)'''
height, width = x.shape[:2]
img_height =width * scale
img_width = height * scale
#x = imresize(x, (int(img_width/1.1), int(img_height/1.1) ))
#imsave("intermediate.jpg", x)
#x = imresize(x, (img_width,img_height))
#print("PATCH SIZE SIZE")
#imsave("intermediateafter.jpg", x)
#if upscale: x = imresize(x, (height * scale, width * scale), interp='bicubic')
#if upscale: x = imresize(x, (height * scale, width * scale))
patches = extract_patches_2d(x, (patch_size, patch_size))
return patches
def make_patchesOrig(x, scale, patch_size, upscale=False, verbose=1):
'''x shape: (num_channels, rows, cols)'''
height, width = x.shape[:2]
if upscale: x = imresize(x, (height * scale, width * scale))
patches = extract_patches_2dv2(x, (patch_size, patch_size))
return patches
def make_patchesStep(x, scale, patch_size, upscale=False,extraction_step=24, verbose=1):
'''x shape: (num_channels, rows, cols)'''
height, width = x.shape[:2]
if upscale: x = imresize(x, (height * scale, width * scale))
patches = extract_patches_Step(x, (patch_size, patch_size),extraction_step)
return patches
def combine_patches(in_patches, out_shape, scale):
'''Reconstruct an image from these `patches`'''
print("wpatch")
recon = reconstruct_from_patches_2d(in_patches, out_shape)
return recon
from itertools import product
def reconstruct_from_patches_2dloc(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image : array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
# generate an overlap count map directly, this is fast
Y, X = np.ogrid[0:i_h, 0:i_w]
x_h = min(p_w, i_w - p_w)
y_h = min(p_h, i_h - p_h)
overlap_cnt = ((np.minimum(np.minimum(X+1,x_h), np.minimum(i_w-X,x_h)))
*(np.minimum(np.minimum(Y+1,y_h), np.minimum(i_h-Y,y_h))),1)
return img/overlap_cnt
def subimage_build_patch_global(img, stride, patch_size, nb_hr_images):
heightini, widthini = img.shape[:2]
print("///////////------")
print(img.shape)
#print(str(heightini)+'--'+str(widthini))
i=0
for y in range(0, widthini , stride):
#for y in range(0, heightini - patch_size, stride):
for x in range(0, heightini , stride):
if (x + patch_size)<widthini and (y + patch_size) <heightini:
i += 1
subimages= np.empty((i, patch_size, patch_size, 3))
j=0
for y in range(0, widthini , stride):
#for y in range(0, heightini - patch_size, stride):
for x in range(0, heightini , stride):
if (x + patch_size)<widthini and (y + patch_size) <heightini:
subimages[j, :, :, :] = img[y : y + patch_size, x : x + patch_size, :]
#height, width = subimage.shape[:2]
#print(str(height)+'<<-->>'+str(width))
#print(str(x)+'--'+str(y)+'--'+str(x + patch_size)+'--'+str(y + patch_size))
j += 1
#yield subimage
print(i)
return subimages
def subimage_combine_patches_global(imgtrue , patches, stride, patch_size, scale):
heighttrue, widthtrue = imgtrue.shape[:2]
img = imresize(imgtrue, (heighttrue*scale, widthtrue*scale), interp='bicubic')
heightini, widthini = img.shape[:2]
print("///////////------")
print(img.shape)
print(patches.shape)
j=0
for y in range(0, widthini , stride):
#for y in range(0, heightini - patch_size, stride):
for x in range(0, heightini , stride):
if (x + patch_size)<widthini and (y + patch_size) <heightini:
#subimages[j, :, :, :] = img[y : y + patch_size, x : x + patch_size, :]
img[y : y + patch_size, x : x + patch_size, :]=patches[j, :, :, :]
#height, width = subimage.shape[:2]
#print(str(height)+'<<-->>'+str(width))
#print(str(x)+'--'+str(y)+'--'+str(x + patch_size)+'--'+str(y + patch_size))
j += 1
print(j)
return img
def image_generator(directory, scale_factor=2, target_shape=None, channels=3, small_train_images=False, shuffle=True,
batch_size=32, seed=None):
if not target_shape:
if small_train_images:
if K.image_dim_ordering() == "th":
image_shape = (channels, 16 * _image_scale_multiplier, 16 * _image_scale_multiplier)
y_image_shape = (channels, 16 * scale_factor * | |
import logging
import pickle
from typing import Dict, List, Optional, Set, Union
import click
import hail as hl
from gnomad.resources.grch38.gnomad import (
COHORTS_WITH_POP_STORED_AS_SUBPOP,
HGDP_POPS,
TGP_POPS,
TGP_POP_NAMES,
POPS,
SEXES,
SUBSETS,
)
from gnomad.sample_qc.ancestry import POP_NAMES
from gnomad.utils.filtering import remove_fields_from_constant
from gnomad.utils.vcf import (
add_as_info_dict,
adjust_vcf_incompatible_types,
ALLELE_TYPE_FIELDS,
AS_FIELDS,
AS_VQSR_FIELDS,
create_label_groups,
ENTRIES,
FAF_POPS,
FORMAT_DICT,
HISTS,
INFO_DICT,
IN_SILICO_ANNOTATIONS_INFO_DICT,
make_info_dict,
make_vcf_filter_dict,
REGION_FLAG_FIELDS,
RF_FIELDS,
SITE_FIELDS,
VQSR_FIELDS,
)
from gnomad.variant_qc.pipeline import INBREEDING_COEFF_HARD_CUTOFF
from gnomad.utils.annotations import region_flag_expr
from joint_calling import utils, _version, resources
from joint_calling.utils import get_validation_callback
logger = logging.getLogger(__file__)
logger.setLevel('INFO')
# Add new site fields
# NEW_SITE_FIELDS = [
# 'monoallelic',
# 'transmitted_singleton',
# ]
# SITE_FIELDS.extend(NEW_SITE_FIELDS)
# Remove original alleles for containing non-releasable alleles
MISSING_ALLELE_TYPE_FIELDS = ['original_alleles', 'has_star']
ALLELE_TYPE_FIELDS = remove_fields_from_constant(
ALLELE_TYPE_FIELDS, MISSING_ALLELE_TYPE_FIELDS
)
# Remove SB (not included in VCF) and SOR (doesn't exist in v3.1) from site fields
MISSING_SITES_FIELDS = ['SOR', 'SB']
SITE_FIELDS = remove_fields_from_constant(SITE_FIELDS, MISSING_SITES_FIELDS)
# Remove AS_VarDP from AS fields
MISSING_AS_FIELDS = ['AS_VarDP']
AS_FIELDS = remove_fields_from_constant(AS_FIELDS, MISSING_AS_FIELDS)
# Make subset list (used in properly filling out VCF header descriptions and naming VCF info fields)
SUBSET_LIST_FOR_VCF = SUBSETS.copy()
SUBSET_LIST_FOR_VCF.append('')
# Remove cohorts that have subpop frequencies stored as pop frequencies
# Inclusion of these subsets significantly increases the size of storage in the VCFs because of the many subpops
SUBSET_LIST_FOR_VCF = remove_fields_from_constant(
SUBSET_LIST_FOR_VCF, COHORTS_WITH_POP_STORED_AS_SUBPOP
)
# Remove decoy from region field flag
MISSING_REGION_FIELDS = ['decoy']
REGION_FLAG_FIELDS = remove_fields_from_constant(
REGION_FLAG_FIELDS, MISSING_REGION_FIELDS
)
# All missing fields to remove from vcf info dict
MISSING_INFO_FIELDS = (
MISSING_ALLELE_TYPE_FIELDS
+ MISSING_AS_FIELDS
+ MISSING_REGION_FIELDS
+ MISSING_SITES_FIELDS
+ RF_FIELDS
)
# Remove unnecessary pop names from POP_NAMES dict
POPS = {pop: POP_NAMES[pop] for pop in POPS}
# Remove unnecessary pop names from FAF_POPS dict
FAF_POPS = {pop: POP_NAMES[pop] for pop in FAF_POPS}
# Get HGDP + TGP(KG) subset pop names
HGDP_TGP_KEEP_POPS = TGP_POPS + HGDP_POPS
HGDP_TGP_POPS = {}
for pop in HGDP_TGP_KEEP_POPS:
if pop in TGP_POP_NAMES:
HGDP_TGP_POPS[pop] = TGP_POP_NAMES[pop]
else:
HGDP_TGP_POPS[pop] = pop.capitalize()
# Used for HGDP + TGP subset MT VCF output only
FORMAT_DICT.update(
{
'RGQ': {
'Number': '1',
'Type': 'Integer',
'Description': 'Unconditional reference genotype confidence, encoded as a phred quality -10*log10 p(genotype call is wrong)',
}
}
)
@click.command()
@click.version_option(_version.__version__)
@click.option(
'--mt',
'mt_path',
required=True,
callback=get_validation_callback(ext='mt', must_exist=True),
help='path to the raw sparse Matrix Table generated by combine_gvcfs.py',
)
@click.option(
'--out-ht',
'out_ht_path',
required=True,
callback=get_validation_callback(ext='ht'),
help='path to write Hail Table',
)
@click.option(
'--out-vcf-header-txt',
'out_vcf_header_txt_path',
required=True,
callback=get_validation_callback(ext='txt'),
)
@click.option(
'--public-subset',
'is_public_subset',
is_flag=True,
help='create a subset',
)
@click.option(
'--test',
'is_test',
is_flag=True,
required=True,
help='Create release files using only 2 partitions on chr20, chrX, '
'and chrY for testing purposes'
)
@click.option(
'--local-tmp-dir',
'local_tmp_dir',
help='local directory for temporary files and Hail logs (must be local).',
)
def main(
mt_path: str,
out_ht_path: str,
out_vcf_header_txt_path: str,
is_test: bool,
is_public_subset: bool,
local_tmp_dir: str,
): # pylint: disable=missing-function-docstring
utils.init_hail(__file__, local_tmp_dir)
mt = hl.read_matrix_table(mt_path)
ht = mt.rows()
if is_test:
ht = filter_to_test(ht)
# Setup of parameters and Table/MatrixTable
parameter_dict = _build_parameter_dict(ht, is_public_subset)
vcf_ht = _prepare_vcf_ht(ht, is_subset=is_public_subset)
if is_public_subset:
logger.info(
'Loading subset MT and annotating with the prepared VCF HT for VCF export...'
)
entries_ht = mt.select_rows().select_entries(*ENTRIES)
vcf_ht = ht.annotate_rows(**vcf_ht[entries_ht.row_key])
logger.info('Cleaning up the VCF HT for final export...')
vcf_ht = _cleanup_ht_for_vcf_export(vcf_ht)
vcf_ht = vcf_ht.checkpoint(out_ht_path, overwrite=True)
vcf_ht.describe()
_prepare_vcf_header_dict(
ht=ht,
vcf_ht=vcf_ht,
vcf_header_txt_path=out_vcf_header_txt_path,
parameter_dict=parameter_dict,
is_public_subset=is_public_subset,
)
def _prepare_vcf_header_dict(
ht,
vcf_ht,
vcf_header_txt_path,
parameter_dict,
is_public_subset,
):
logger.info('Making histogram bin edges...')
header_dict = prepare_vcf_header_dict(
vcf_ht,
subset_list=parameter_dict['subsets'],
pops=parameter_dict['pops'],
filtering_model_field=parameter_dict['filtering_model_field'],
inbreeding_coeff_cutoff=ht.inbreeding_coeff_cutoff,
)
if not is_public_subset:
header_dict.pop('format')
logger.info('Saving header dict to pickle...')
with hl.hadoop_open(vcf_header_txt_path, 'wb') as p:
pickle.dump(header_dict, p, protocol=pickle.HIGHEST_PROTOCOL)
def populate_subset_info_dict(
subset: str,
description_text: str,
pops: Dict[str, str] = POPS,
faf_pops: Dict[str, str] = FAF_POPS,
sexes: List[str] = SEXES,
label_delimiter: str = '_',
) -> Dict[str, Dict[str, str]]:
"""
Call `make_info_dict` to populate INFO dictionary with specific sexes, population names, and filtering allele
frequency (faf) pops for the requested subset.
:param subset: Sample subset in dataset.
:param description_text: Text describing the sample subset that should be added to the INFO description.
:param pops: Dict of sample global population names for gnomAD genomes. Default is POPS.
:param faf_pops: Dict with faf pop names (keys) and descriptions (values). Default is FAF_POPS.
:param sexes: gnomAD sample sexes used in VCF export. Default is SEXES.
:param label_delimiter: String to use as delimiter when making group label combinations. Default is '_'.
:return: Dictionary containing Subset specific INFO header fields.
"""
vcf_info_dict = {}
faf_label_groups = create_label_groups(pops=faf_pops, sexes=sexes)
for label_group in faf_label_groups:
vcf_info_dict.update(
make_info_dict(
prefix=subset,
prefix_before_metric=True if 'gnomad' in subset else False,
pop_names=faf_pops,
label_groups=label_group,
label_delimiter=label_delimiter,
faf=True,
description_text=description_text,
)
)
label_groups = create_label_groups(pops=pops, sexes=sexes)
for label_group in label_groups:
vcf_info_dict.update(
make_info_dict(
prefix=subset,
prefix_before_metric=True if 'gnomad' in subset else False,
pop_names=pops,
label_groups=label_group,
label_delimiter=label_delimiter,
description_text=description_text,
)
)
# Add popmax to info dict
vcf_info_dict.update(
make_info_dict(
prefix=subset,
label_delimiter=label_delimiter,
pop_names=pops,
popmax=True,
description_text=description_text,
)
)
return vcf_info_dict
def populate_info_dict(
info_dict: Dict[str, Dict[str, str]] = INFO_DICT,
subset_list: List[str] = SUBSETS,
subset_pops: Dict[str, str] = POPS,
gnomad_pops: Dict[str, str] = POPS,
faf_pops: Dict[str, str] = FAF_POPS,
sexes: List[str] = SEXES,
in_silico_dict: Dict[str, Dict[str, str]] = IN_SILICO_ANNOTATIONS_INFO_DICT,
label_delimiter: str = '_',
bin_edges: Dict[str, str] = None,
age_hist_data: str = None,
) -> Dict[str, Dict[str, str]]:
"""
Call `make_info_dict` and `make_hist_dict` to populate INFO dictionary with specific sexes, population names,
and filtering allele frequency (faf) pops.
Used during VCF export.
Creates:
- INFO fields for age histograms (bin freq, n_smaller, and n_larger for heterozygous and homozygous variant carriers)
- INFO fields for popmax AC, AN, AF, nhomalt, and popmax population
- INFO fields for AC, AN, AF, nhomalt for each combination of sample population, sex both for adj and raw data
- INFO fields for filtering allele frequency (faf) annotations
- INFO fields for variant histograms (hist_bin_freq for each histogram and hist_n_larger for DP histograms)
:param bin_edges: Dictionary of variant annotation histograms and their associated bin edges.
:param age_hist_data: Pipe-delimited string of age histograms, from `get_age_distributions`.
:param info_dict: INFO dict to be populated.
:param subset_list: List of sample subsets in dataset. Default is SUBSETS.
:param subset_pops: Dict of sample global population names to use for all subsets in `subset_list` unless the subset
is 'gnomad', in that case `gnomad_pops` is used. Default is POPS.
:param gnomad_pops: Dict of sample global population names for gnomAD genomes. Default is POPS.
:param faf_pops: Dict with faf pop names (keys) and descriptions (values). Default is FAF_POPS.
:param sexes: gnomAD sample sexes used in VCF export. Default is SEXES.
:param in_silico_dict: Dictionary of in silico predictor score descriptions.
:param label_delimiter: String to use as delimiter when making group label combinations.
:return: Updated INFO dictionary for VCF export.
"""
vcf_info_dict = info_dict.copy()
# Remove MISSING_INFO_FIELDS from info dict
for field in MISSING_INFO_FIELDS:
vcf_info_dict.pop(field, None)
# Add allele-specific fields to info dict, including AS_VQSR_FIELDS
vcf_info_dict.update(
add_as_info_dict(info_dict=info_dict, as_fields=AS_FIELDS + AS_VQSR_FIELDS)
)
for subset in subset_list:
if subset == 'gnomad':
description_text = ' in gnomAD'
pops = gnomad_pops
else:
description_text = '' if subset == '' else f' in {subset} subset'
pops = subset_pops
vcf_info_dict.update(
populate_subset_info_dict(
subset=subset,
description_text=description_text,
pops=pops,
faf_pops=faf_pops,
sexes=sexes,
label_delimiter=label_delimiter,
)
)
if age_hist_data:
age_hist_data = '|'.join(str(x) for x in age_hist_data)
vcf_info_dict.update(
make_info_dict(
prefix='',
label_delimiter=label_delimiter,
bin_edges=bin_edges,
popmax=True,
age_hist_data=age_hist_data,
)
)
# Add in silico prediction annotations to info_dict
vcf_info_dict.update(in_silico_dict)
return vcf_info_dict
def make_info_expr(
t: Union[hl.MatrixTable, hl.Table], hist_prefix: str = '',
) -> Dict[str, hl.expr.Expression]:
"""
Make Hail expression for variant annotations to be included in VCF INFO field.
:param t: Table/MatrixTable containing variant annotations to be reformatted for VCF export.
:param hist_prefix: Prefix to use for histograms.
:return: Dictionary containing Hail expressions for relevant INFO annotations.
:rtype: Dict[str, hl.expr.Expression]
"""
vcf_info_dict = {}
# Add site-level annotations to vcf_info_dict
for field in SITE_FIELDS:
vcf_info_dict[field] = t['release_ht_info'][f'{field}']
# Add AS annotations to info dict
for field in AS_FIELDS:
vcf_info_dict[field] = t['release_ht_info'][f'{field}']
for field in VQSR_FIELDS:
vcf_info_dict[field] = t['vqsr'][f'{field}']
# Add region_flag and allele_info fields to info dict
for field in ALLELE_TYPE_FIELDS:
vcf_info_dict[field] = t['allele_info'][f'{field}']
for field in REGION_FLAG_FIELDS:
vcf_info_dict[field] = t['region_flag'][f'{field}']
# Add underscore to hist_prefix if it isn't empty
if hist_prefix != '':
hist_prefix += '_'
# Histograms to export are:
# gq_hist_alt, gq_hist_all, dp_hist_alt, dp_hist_all, ab_hist_alt
# We previously dropped:
# _n_smaller for all hists
# _bin_edges for all hists
# _n_larger for all hists EXCEPT DP hists
for hist in HISTS:
hist_type = f'{hist_prefix}qual_hists'
hist_dict = {
f'{hist}_bin_freq': hl.delimit(t[hist_type][hist].bin_freq, delimiter='|'),
}
vcf_info_dict.update(hist_dict)
if 'dp' in hist:
vcf_info_dict.update({f'{hist}_n_larger': t[hist_type][hist].n_larger},)
# Add in silico annotations to info dict
vcf_info_dict['cadd_raw_score'] = t['cadd']['raw_score']
vcf_info_dict['cadd_phred'] | |
<reponame>jeffvan-netsia/voltha_doc<filename>voltha/coordinator.py
#
# Copyright 2017 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Consul-based coordinator services """
from consul import ConsulException
from consul.twisted import Consul
from requests import ConnectionError
from structlog import get_logger
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue, Deferred
from twisted.internet.error import DNSLookupError
from zope.interface import implementer
from leader import Leader
from common.utils.asleep import asleep
from common.utils.message_queue import MessageQueue
from voltha.registry import IComponent
from worker import Worker
from simplejson import dumps, loads
from common.utils.deferred_utils import DeferredWithTimeout, TimeOutError
log = get_logger()
class StaleMembershipEntryException(Exception):
pass
@implementer(IComponent)
class Coordinator(object):
"""
An app shall instantiate only one Coordinator (singleton).
A single instance of this object shall take care of all external
with consul, and via consul, all coordination activities with its
clustered peers. Roles include:
- registering an ephemeral membership entry (k/v record) in consul
- participating in a symmetric leader election, and potentially assuming
the leader's role. What leadership entails is not a concern for the
coordination, it simply instantiates (and shuts down) a leader class
when it gains (or looses) leadership.
"""
CONNECT_RETRY_INTERVAL_SEC = 1
RETRY_BACKOFF = [0.05, 0.1, 0.2, 0.5, 1, 2, 5]
# Public methods:
def __init__(self,
internal_host_address,
external_host_address,
instance_id,
rest_port,
config,
consul='localhost:8500',
container_name_regex='^.*\.([0-9]+)\..*$'):
log.info('initializing-coordinator')
self.config = config['coordinator']
self.worker_config = config['worker']
self.leader_config = config['leader']
self.membership_watch_relatch_delay = config.get(
'membership_watch_relatch_delay', 0.1)
self.tracking_loop_delay = self.config.get(
'tracking_loop_delay', 1)
self.session_renewal_timeout = self.config.get(
'session_renewal_timeout', 5)
self.session_renewal_loop_delay = self.config.get(
'session_renewal_loop_delay', 3)
self.membership_maintenance_loop_delay = self.config.get(
'membership_maintenance_loop_delay', 5)
self.session_time_to_live = self.config.get(
'session_time_to_live', 10)
self.prefix = self.config.get('voltha_kv_prefix', 'service/voltha')
self.leader_prefix = '/'.join((self.prefix, self.config.get(
self.config['leader_key'], 'leader')))
self.membership_prefix = '/'.join((self.prefix, self.config.get(
self.config['membership_key'], 'members'), ''))
self.assignment_prefix = '/'.join((self.prefix, self.config.get(
self.config['assignment_key'], 'assignments'), ''))
self.workload_prefix = '/'.join((self.prefix, self.config.get(
self.config['workload_key'], 'work'), ''))
self.core_store_prefix = '/'.join((self.prefix, self.config.get(
self.config['core_store_key'], 'data/core')))
self.core_store_assignment_key = self.core_store_prefix + \
'/assignment'
self.core_storage_suffix = 'core_store'
self.retries = 0
self.instance_id = instance_id
self.internal_host_address = internal_host_address
self.external_host_address = external_host_address
self.rest_port = rest_port
self.membership_record_key = self.membership_prefix + self.instance_id
self.session_id = None
self.i_am_leader = False
self.leader_id = None # will be the instance id of the current leader
self.shutting_down = False
self.leader = None
self.membership_callback = None
self.worker = Worker(self.instance_id, self)
self.host = consul.split(':')[0].strip()
self.port = int(consul.split(':')[1].strip())
# TODO need to handle reconnect events properly
self.consul = Consul(host=self.host, port=self.port)
self.container_name_regex = container_name_regex
self.wait_for_leader_deferreds = []
self.peers_mapping_queue = MessageQueue()
def start(self):
log.debug('starting')
reactor.callLater(0, self._async_init)
log.info('started')
return self
@inlineCallbacks
def stop(self):
log.debug('stopping')
self.shutting_down = True
yield self._delete_session() # this will delete the leader lock too
yield self.worker.stop()
if self.leader is not None:
yield self.leader.stop()
self.leader = None
log.info('stopped')
def wait_for_a_leader(self):
"""
Async wait till a leader is detected/elected. The deferred will be
called with the leader's instance_id
:return: Deferred.
"""
d = Deferred()
if self.leader_id is not None:
d.callback(self.leader_id)
return d
else:
self.wait_for_leader_deferreds.append(d)
return d
# Wait for a core data id to be assigned to this voltha instance
@inlineCallbacks
def get_core_store_id_and_prefix(self):
core_store_id = yield self.worker.get_core_store_id()
returnValue((core_store_id, self.core_store_prefix))
def recv_peers_map(self):
return self.peers_mapping_queue.get()
def publish_peers_map_change(self, msg):
self.peers_mapping_queue.put(msg)
# Proxy methods for consul with retry support
def kv_get(self, *args, **kw):
return self._retry('GET', *args, **kw)
def kv_put(self, *args, **kw):
return self._retry('PUT', *args, **kw)
def kv_delete(self, *args, **kw):
return self._retry('DELETE', *args, **kw)
# Methods exposing key membership information
@inlineCallbacks
def get_members(self):
"""Return list of all members"""
_, members = yield self.kv_get(self.membership_prefix, recurse=True)
returnValue([member['Key'][len(self.membership_prefix):]
for member in members])
# Private (internal) methods:
@inlineCallbacks
def _async_init(self):
yield self._create_session()
yield self._create_membership_record()
yield self._start_leader_tracking()
yield self.worker.start()
def _backoff(self, msg):
wait_time = self.RETRY_BACKOFF[min(self.retries,
len(self.RETRY_BACKOFF) - 1)]
self.retries += 1
log.info(msg, retry_in=wait_time)
return asleep(wait_time)
def _clear_backoff(self):
if self.retries:
log.info('reconnected-to-consul', after_retries=self.retries)
self.retries = 0
@inlineCallbacks
def _create_session(self):
@inlineCallbacks
def _create_session():
consul = yield self.get_consul()
# create consul session
self.session_id = yield consul.session.create(
behavior='release', ttl=self.session_time_to_live,
lock_delay=1)
log.info('created-consul-session', session_id=self.session_id)
self._start_session_tracking()
yield self._retry(_create_session)
@inlineCallbacks
def _delete_session(self):
try:
yield self.consul.session.destroy(self.session_id)
except Exception as e:
log.exception('failed-to-delete-session',
session_id=self.session_id)
@inlineCallbacks
def _create_membership_record(self):
yield self._do_create_membership_record_with_retries()
reactor.callLater(0, self._maintain_membership_record)
@inlineCallbacks
def _maintain_membership_record(self):
try:
while 1:
valid_membership = yield self._assert_membership_record_valid()
if not valid_membership:
log.info('recreating-membership-before',
session=self.session_id)
yield self._do_create_membership_record_with_retries()
log.info('recreating-membership-after',
session=self.session_id)
else:
log.debug('valid-membership', session=self.session_id)
# Async sleep before checking the membership record again
yield asleep(self.membership_maintenance_loop_delay)
except Exception, e:
log.exception('unexpected-error-leader-trackin', e=e)
finally:
# except in shutdown, the loop must continue (after a short delay)
if not self.shutting_down:
reactor.callLater(self.membership_watch_relatch_delay,
self._maintain_membership_record)
def _create_membership_record_data(self):
member_record = dict()
member_record['status'] = 'alive'
member_record['host_address'] = self.external_host_address
return member_record
@inlineCallbacks
def _assert_membership_record_valid(self):
try:
log.info('membership-record-before')
is_timeout, (_, record) = yield \
self.consul_get_with_timeout(
key=self.membership_record_key,
index=0,
timeout=5)
if is_timeout:
returnValue(False)
log.info('membership-record-after', record=record)
if record is None or \
'Session' not in record or \
record['Session'] != self.session_id:
log.info('membership-record-change-detected',
old_session=self.session_id,
record=record)
returnValue(False)
else:
returnValue(True)
except Exception as e:
log.exception('membership-validation-exception', e=e)
returnValue(False)
@inlineCallbacks
def _do_create_membership_record_with_retries(self):
while 1:
log.info('recreating-membership', session=self.session_id)
result = yield self._retry(
'PUT',
self.membership_record_key,
dumps(self._create_membership_record_data()),
acquire=self.session_id)
if result:
log.info('new-membership-record-created',
session=self.session_id)
break
else:
log.warn('cannot-create-membership-record')
yield self._backoff('stale-membership-record')
def _start_session_tracking(self):
reactor.callLater(0, self._session_tracking_loop)
@inlineCallbacks
def _session_tracking_loop(self):
@inlineCallbacks
def _redo_session():
log.info('_redo_session-before')
yield self._delete_session()
# Create a new consul connection/session with a TTL of 25 secs
try:
self.consul = Consul(host=self.host, port=self.port)
self.session_id = yield self.consul.session.create(
behavior='release',
ttl=self.session_time_to_live,
lock_delay=1)
log.info('new-consul-session', session=self.session_id)
except Exception as e:
log.exception('could-not-create-a-consul-session', e=e)
@inlineCallbacks
def _renew_session(m_callback):
try:
log.debug('_renew_session-before')
consul_ref = self.consul
result = yield consul_ref.session.renew(
session_id=self.session_id)
log.info('just-renewed-session', result=result)
if not m_callback.called:
# Triggering callback will cancel the timeout timer
log.info('trigger-callback-to-cancel-timout-timer')
m_callback.callback(result)
else:
# Timeout event has already been called. Just ignore
# this event
log.info('renew-called-after-timout',
new_consul_ref=self.consul,
old_consul_ref=consul_ref)
except Exception, e:
# Let the invoking method receive a timeout
log.exception('could-not-renew-session', e=e)
try:
while 1:
log.debug('session-tracking-start')
rcvd = DeferredWithTimeout(
timeout=self.session_renewal_timeout)
_renew_session(rcvd)
try:
_ = yield rcvd
except TimeOutError as e:
log.info('session-renew-timeout', e=e)
# Redo the session
yield _redo_session()
except Exception as e:
log.exception('session-renew-exception', e=e)
else:
log.debug('successfully-renewed-session')
# Async sleep before the next session tracking
yield asleep(self.session_renewal_loop_delay)
except Exception as e:
log.exception('renew-exception', e=e)
finally:
reactor.callLater(self.session_renewal_loop_delay,
self._session_tracking_loop)
def _start_leader_tracking(self):
reactor.callLater(0, self._leadership_tracking_loop)
@inlineCallbacks
def _leadership_tracking_loop(self):
try:
# Attempt to acquire leadership lock. True indicates success;
# False indicates there is already a leader. It's instance id
# is then the value under the leader key service/voltha/leader.
# attempt acquire leader lock
log.info('leadership-attempt-before')
result = yield self._retry('PUT',
self.leader_prefix,
self.instance_id,
acquire=self.session_id)
log.info('leadership-attempt-after')
# read it back before being too happy; seeing our session id is a
# proof and now we have the change id that we can use to reliably
# track any changes. In an unlikely scenario where the leadership
# key gets wiped out administratively since the previous line,
# the returned record can be None. Handle it.
(index, record) = yield self._retry('GET',
self.leader_prefix)
log.info('leader-prefix',
i_am_leader=result, index=index, record=record)
if record is not None:
if result is True:
if record['Session'] == self.session_id:
yield self._assert_leadership()
else:
pass # confusion; need to retry leadership
else:
leader_id = record['Value']
yield self._assert_nonleadership(leader_id)
# if record was none, we shall try leadership again
last = record
while last is not None:
# this shall return only when update is made to leader key
# or expires after 5 seconds wait
is_timeout, (tmp_index, updated) = yield \
self.consul_get_with_timeout(
key=self.leader_prefix,
index=index,
timeout=5)
# Timeout means either there is a lost connectivity to
# consul or there are no change to that key. Do nothing.
if is_timeout:
continue
# After timeout event the index returned from
# consul_get_with_timeout is None. If we are here it's not a
# timeout, therefore the index is a valid one.
index=tmp_index
if updated is None or updated != last:
log.info('leader-key-change',
index=index, updated=updated, last=last)
# leadership has changed or vacated (or forcefully
# removed), apply now
# If I was previoulsy the leader then assert a non
# leadership role before going for election
if self.i_am_leader:
log.info('leaving-leaderdhip',
leader=self.instance_id)
yield self._assert_nonleadership(self.instance_id)
break
last = updated
except Exception, e:
log.exception('unexpected-error-leader-trackin', e=e)
finally:
# except in shutdown, the loop must continue (after a | |
# -*- coding: utf-8 -*-
#
# Copyright © Simphony Project Contributors
# Licensed under the terms of the MIT License
# (see simphony/__init__.py for details)
import pytest
import os
from simphony.plugins.siepic.parser import load_spi
#==============================================================================
# Test the parser
#==============================================================================
EBeam_sequoiap_A_v2_result = {
'circuits': [
{
'name': 'EBeam_sequoiap_A_v2',
'ports': [
'ebeam_gc_te1550$1_laser',
'ebeam_gc_te1550$1_detector1'
],
'subcircuits': 'EBeam_sequoiap_A_v2',
'params': [
{
'name': 'sch_x',
'value': -1.0
},
{
'name': 'sch_y',
'value': -1.0
}
]
}
],
'subcircuits': [
{
'name': 'EBeam_sequoiap_A_v2',
'ports': ['ebeam_gc_te1550$1_laser', 'ebeam_gc_te1550$1_detector1'],
'components': [
{
'name': 'ebeam_y_1550_67',
'model': 'ebeam_y_1550',
'ports': ['N$80', 'N$81', 'N$82'],
'params': {
'library': 'Design kits/ebeam',
'lay_x': 0.00010077000000000001,
'lay_y': 0.00013824,
'sch_x': 8.339586207,
'sch_y': 11.440551724
}
},
{
'name': 'ebeam_gc_te1550_68',
'model': 'ebeam_gc_te1550',
'ports': ['ebeam_gc_te1550$1_laser', 'N$80'],
'params': {
'library': 'Design kits/ebeam',
'lay_x': 7.687e-05,
'lay_y': 0.00013824,
'sch_x': 6.361655172,
'sch_y': 11.440551724
}
},
{
'name': 'ebeam_gc_te1550_69',
'model': 'ebeam_gc_te1550',
'ports': ['ebeam_gc_te1550$1_detector1', 'N$83'],
'params': {
'library': 'Design kits/ebeam',
'lay_x': 7.687e-05,
'lay_y': 1.1240000000000002e-05,
'sch_x': 6.361655172,
'sch_y': 0.930206897
}
},
{
'name': 'ebeam_y_1550_70',
'model': 'ebeam_y_1550',
'ports': ['N$83', 'N$85', 'N$84'],
'params': {
'library': 'Design kits/ebeam',
'lay_x': 0.00010077000000000001,
'lay_y': 1.1240000000000002e-05,
'sch_x': 8.339586207,
'sch_y': 0.930206897
}
},
{
'name': 'ebeam_wg_integral_1550_72',
'model': 'ebeam_wg_integral_1550',
'ports': ['N$81', 'N$84'],
'params': {
'library': 'Design kits/ebeam',
'wg_length': 0.000189995,
'wg_width': 5e-07,
'points': '[[108.17,140.99],[138.469,140.99],[138.469,8.49],[108.17,8.49]]',
'radius': 5.0,
'lay_x': 0.000123694,
'lay_y': 7.474e-05,
'sch_x': 10.236744828,
'sch_y': 6.18537931
}
},
{
'name': 'ebeam_wg_integral_1550_83',
'model': 'ebeam_wg_integral_1550',
'ports': ['N$82', 'N$85'],
'params': {
'library': 'Design kits/ebeam',
'wg_length': 0.000149995,
'wg_width': 5e-07,
'points': '[[104.92,389.16],[120.719,389.16],[120.719,267.66],[104.92,267.66]]',
'radius': 5.0,
'lay_x': 0.000116444,
'lay_y': 7.474e-05,
'sch_x': 9.636744828,
'sch_y': 6.18537931
}
}
],
'params': {
'MC_uniformity_width': 0.0,
'MC_uniformity_thickness': 0.0,
'MC_resolution_x': 100.0,
'MC_resolution_y': 100.0,
'MC_grid': 1e-05,
'MC_non_uniform': 99.0
}
}
],
'analyses': [
{
'definition': {
'input_unit': 'wavelength',
'input_parameter': 'start_and_stop'
},
'params': {
'minimum_loss': 80.0,
'analysis_type':
'scattering_data',
'multithreading':
'user_defined',
'number_of_threads': 1.0,
'orthogonal_identifier': 1.0,
'start': 1.5e-06,
'stop': 1.6e-06,
'number_of_points': 3000.0,
'input': ['EBeam_sequoiap_A_v2,ebeam_gc_te1550$1_detector1'],
'output': 'EBeam_sequoiap_A_v2,ebeam_gc_te1550$1_laser'
}
}
]
}
MZI4_result = {
'circuits': [
{
'name': 'MZI4',
'ports': ['ebeam_gc_te1550_detector2', 'ebeam_gc_te1550_laser1'],
'subcircuits': 'MZI4',
'params': [{'name': 'sch_x', 'value': -1.0}, {'name': 'sch_y', 'value': -1.0}]
}
],
'subcircuits': [
{
'name': 'MZI4',
'ports': ['ebeam_gc_te1550_detector2', 'ebeam_gc_te1550_laser1'],
'components': [
{
'name': 'ebeam_y_1550_0',
'model': 'ebeam_y_1550',
'ports': ['N$0', 'N$2', 'N$1'],
'params': {
'library': 'Design kits/ebeam',
'lay_x': 7.4e-06,
'lay_y': 0.000127,
'sch_x': 0.478534829,
'sch_y': 8.212692343
}
},
{
'name': 'ebeam_gc_te1550_1',
'model': 'ebeam_gc_te1550',
'ports': ['ebeam_gc_te1550_detector2', 'N$0'],
'params': {
'library': 'Design kits/ebeam',
'lay_x': -1.6500000000000005e-05,
'lay_y': 0.000127,
'sch_x': -1.067003336,
'sch_y': 8.212692343
}
},
{
'name': 'ebeam_gc_te1550_2',
'model': 'ebeam_gc_te1550',
'ports': ['ebeam_gc_te1550_laser1', 'N$3'],
'params': {
'library': 'Design kits/ebeam',
'lay_x': -1.6500000000000005e-05,
'lay_y': 0.000254,
'sch_x': -1.067003336,
'sch_y': 16.425384686
}
},
{
'name': 'ebeam_y_1550_3',
'model': 'ebeam_y_1550',
'ports': ['N$6', 'N$5', 'N$4'],
'params': {
'library': 'Design kits/ebeam',
'lay_x': 8.993e-05,
'lay_y': 0.000127,
'sch_x': 5.815491515,
'sch_y': 8.212692343,
'sch_f': 'true'
}
},
{
'name': 'ebeam_wg_integral_1550_4',
'model': 'ebeam_wg_integral_1550',
'ports': ['N$1', 'N$4'],
'params': {
'library': 'Design kits/ebeam',
'wg_length': 6.773e-05,
'wg_width': 5e-07,
'points': '[[14.8,124.25],[82.53,124.25]]',
'radius': 5.0,
'lay_x': 4.866500000000001e-05,
'lay_y': 0.00012425,
'sch_x': 3.147013172,
'sch_y': 8.034858453
}
},
{
'name': 'ebeam_wg_integral_1550_5',
'model': 'ebeam_wg_integral_1550',
'ports': ['N$2', 'N$5'],
'params': {
'library': 'Design kits/ebeam',
'wg_length': 0.000297394,
'wg_width': 5e-07,
'points': '[[14.8,129.75],[28.64,129.75],[28.64,247.68],[75.36,247.68],[75.36,129.75],[82.53,129.75]]',
'radius': 5.0,
'lay_x': 4.866500000000001e-05,
'lay_y': 0.000188715,
'sch_x': 3.147013172,
'sch_y': 12.203608153
}
},
{
'name': 'ebeam_wg_integral_1550_6',
'model': 'ebeam_wg_integral_1550',
'ports': ['N$6', 'N$3'],
'params': {
'library': 'Design kits/ebeam',
'wg_length': 0.000256152,
'wg_width': 5e-07,
'points': '[[97.33,127.0],[114.79,127.0],[114.79,254.0],[0.0,254.0]]',
'radius': 5.0,
'lay_x': 5.777e-05,
'lay_y': 0.0001905,
'sch_x': 3.735805013,
'sch_y': 12.319038514
}
}
],
'params': {
'MC_uniformity_width': 0.0,
'MC_uniformity_thickness': 0.0,
'MC_resolution_x': 100.0,
'MC_resolution_y': 100.0,
'MC_grid': 1e-05,
'MC_non_uniform': 99.0
}
}
],
'analyses': [
{
'definition': {
'input_unit': 'wavelength',
'input_parameter': 'start_and_stop'
},
'params': {
'minimum_loss': 80.0,
'analysis_type': 'scattering_data',
'multithreading': 'user_defined',
'number_of_threads': 1.0,
'orthogonal_identifier': 1.0,
'start': 1.5e-06,
'stop': 1.6e-06,
'number_of_points': 2000.0,
'input': ['MZI4,ebeam_gc_te1550_detector2'],
'output': 'MZI4,ebeam_gc_te1550_laser1'
}
}
]
}
top_result = {
'circuits': [
{
'name': 'top',
'ports': ['ebeam_gc_te1550_laser1', 'ebeam_gc_te1550_detector2', 'ebeam_gc_te1550_detector4', 'ebeam_gc_te1550_detector3'],
'subcircuits': 'top',
'params': [
{'name': 'sch_x', 'value': -1.0},
{'name': 'sch_y', 'value': -1.0}
]
}
],
'subcircuits': [
{
'name': 'top',
'ports': ['ebeam_gc_te1550_laser1', 'ebeam_gc_te1550_detector2', 'ebeam_gc_te1550_detector4', 'ebeam_gc_te1550_detector3'],
'components': [
{
'name': 'ebeam_dc_te1550_0',
'model': 'ebeam_dc_te1550',
'ports': ['N$0', 'N$1', 'N$3', 'N$2'],
'params': {'library': 'Design kits/ebeam', 'wg_width': 5e-07, 'gap': 2e-07, 'radius': 5e-06, 'Lc': 1.5e-05, 'lay_x': 2.36e-06, 'lay_y': 1.2e-07, 'sch_x': 0.082235221, 'sch_y': 0.004181452}
},
{
'name': 'ebeam_gc_te1550_1',
'model': 'ebeam_gc_te1550',
'ports': ['ebeam_gc_te1550_laser1', 'N$4'],
'params': {'library': 'Design kits/ebeam', 'lay_x': -0.00013533, 'lay_y': 1.475e-05, 'sch_x': -4.715632378, 'sch_y': 0.513970129}
},
{
'name': 'ebeam_gc_te1550_2',
'model': 'ebeam_gc_te1550',
'ports': ['ebeam_gc_te1550_detector2', 'N$5'],
'params': {'library': 'Design kits/ebeam', 'lay_x': -0.00012984, 'lay_y': -7.662e-05, 'sch_x': -4.524330954, 'sch_y': -2.669857037}
},
{
'name': 'ebeam_gc_te1550_3',
'model': 'ebeam_gc_te1550',
'ports': ['ebeam_gc_te1550_detector4', 'N$6'],
'params': {'library': 'Design kits/ebeam', 'lay_x': 9.456e-05, 'lay_y': -8.471e-05, 'sch_x': 3.294984096, 'sch_y': -2.951756586, 'sch_r': 180.0}
},
{
'name': 'ebeam_gc_te1550_4',
'model': 'ebeam_gc_te1550',
'ports': ['ebeam_gc_te1550_detector3', 'N$7'],
'params': {'library': 'Design kits/ebeam', 'lay_x': 0.00013005, 'lay_y': 3.253e-05, 'sch_x': 4.531648495, 'sch_y': 1.133521919, 'sch_r': 180.0}
},
{
'name': 'ebeam_wg_integral_1550_5',
'model': 'ebeam_wg_integral_1550',
'ports': ['N$0', 'N$5'],
'params': {'library': 'Design kits/ebeam', 'wg_length': 0.000173487, 'wg_width': 5e-07, 'points': '[[-11.14,-2.23],[-40.45,-2.23],[-40.45,-76.62],[-113.34,-76.62]]', 'radius': 5.0, 'lay_x': -6.224e-05, 'lay_y': -3.9425e-05, 'sch_x': -2.168779718, 'sch_y': -1.373781176}
},
{
'name': 'ebeam_wg_integral_1550_6',
'model': 'ebeam_wg_integral_1550',
'ports': ['N$4', 'N$1'],
'params': {'library': 'Design kits/ebeam', 'wg_length': 0.000116867, 'wg_width': 5e-07, 'points': '[[-118.83,14.75],[-26.47,14.75],[-26.47,2.47],[-11.14,2.47]]', 'radius': 5.0, 'lay_x': -6.4985e-05, 'lay_y': 8.61e-06, 'sch_x': -2.26443043, 'sch_y': 0.300019174}
},
{
'name': 'ebeam_wg_integral_1550_7',
'model': 'ebeam_wg_integral_1550',
'ports': ['N$8', 'N$2'],
'params': {'library': 'Design kits/ebeam', 'wg_length': 7.4217e-05, 'wg_width': 5e-07, 'points': '[[65.87,29.78],[36.16,29.78],[36.16,2.47],[15.86,2.47]]', 'radius': 5.0, 'lay_x': 4.0865e-05, 'lay_y': 1.6125e-05, 'sch_x': 1.423958598, 'sch_y': 0.561882599}
},
{
'name': 'ebeam_wg_integral_1550_8',
'model': 'ebeam_wg_integral_1550',
'ports': ['N$3', 'N$6'],
'params': {'library': 'Design kits/ebeam', 'wg_length': 0.000141577, 'wg_width': 5e-07, 'points': '[[15.86,-2.23],[35.04,-2.23],[35.04,-84.71],[78.06,-84.71]]', 'radius': 5.0, 'lay_x': 4.696e-05, 'lay_y': -4.347000000000001e-05, 'sch_x': 1.636341509, 'sch_y': -1.51473095}
},
{
'name': 'ebeam_y_1550_9',
'model': 'ebeam_y_1550',
'ports': ['N$8', 'N$10', 'N$9'],
'params': {'library': 'Design kits/ebeam', 'lay_x': 7.327e-05, 'lay_y': 2.978e-05, 'sch_x': 2.553124838, 'sch_y': 1.037696979}
},
{
'name': 'ebeam_terminator_te1550_10',
'model': 'ebeam_terminator_te1550',
'ports': ['N$11'],
'params': {'library': 'Design kits/ebeam', 'lay_x': 9.14e-05, 'lay_y': 2.7e-07, 'sch_x': 3.184872529, 'sch_y': 0.009408267, 'sch_r': 270.0}
},
{
'name': 'ebeam_wg_integral_1550_11',
'model': 'ebeam_wg_integral_1550',
'ports': ['N$9', 'N$11'],
'params': {'library': 'Design kits/ebeam', 'wg_length': 3.0488e-05, 'wg_width': 5e-07, 'points': '[[80.67,27.03],[91.4,27.03],[91.4,5.72]]', 'radius': 5.0, 'lay_x': 8.641e-05, 'lay_y': 1.675e-05, 'sch_x': 3.010993821, 'sch_y': 0.5836609940000002}
},
{
'name': 'ebeam_wg_integral_1550_12',
'model': 'ebeam_wg_integral_1550',
'ports': ['N$10', 'N$7'],
'params': {
'library': 'Design kits/ebeam',
'wg_length': 3.288e-05,
'wg_width': 5e-07,
'points': '[[80.67,32.53],[113.55,32.53]]',
'radius': 5.0,
'lay_x': 9.711e-05,
'lay_y': 3.253e-05,
'sch_x': 3.383839949,
'sch_y': 1.133521919
}
}
],
'params': {
'MC_uniformity_width': 0.0,
'MC_uniformity_thickness': 0.0,
'MC_resolution_x': 100.0,
'MC_resolution_y': 100.0,
'MC_grid': 1e-05,
'MC_non_uniform': 99.0
}
}
],
'analyses': [
{
'definition': {
'input_unit': 'wavelength',
'input_parameter': 'start_and_stop'
},
'params': {
'minimum_loss': 80.0,
'analysis_type': 'scattering_data',
'multithreading': 'user_defined',
'number_of_threads': 1.0,
'orthogonal_identifier': 1.0,
'start': 1.5e-06,
'stop': 1.6e-06,
'number_of_points': 2000.0,
'input': ['top,ebeam_gc_te1550_detector2', 'top,ebeam_gc_te1550_detector3', 'top,ebeam_gc_te1550_detector4'],
'output': 'top,ebeam_gc_te1550_laser1'
}
}
]
}
def test_EBeam_sequoiap_A_v2():
filename = os.path.join(os.path.dirname(__file__), 'spice', 'EBeam_sequoiap_A_v2', 'EBeam_sequoiap_A_v2_main.spi')
res = load_spi(filename)
assert res == EBeam_sequoiap_A_v2_result
def test_MZI4():
filename = os.path.join(os.path.dirname(__file__), 'spice', 'MZI4', 'MZI4_main.spi')
res = load_spi(filename)
assert res == MZI4_result
def test_top():
filename = | |
"resource_field_ref")
@property
@pulumi.getter(name="secretKeyRef")
def secret_key_ref(self) -> Optional['outputs.SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromSecretKeyRef']:
"""
Selects a key of a secret in the pod's namespace
"""
return pulumi.get(self, "secret_key_ref")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromConfigMapKeyRef(dict):
"""
Selects a key of a ConfigMap.
"""
def __init__(__self__, *,
key: str,
name: Optional[str] = None,
optional: Optional[bool] = None):
"""
Selects a key of a ConfigMap.
:param str key: The key to select.
:param str name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param bool optional: Specify whether the ConfigMap or its key must be defined
"""
pulumi.set(__self__, "key", key)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def key(self) -> str:
"""
The key to select.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def optional(self) -> Optional[bool]:
"""
Specify whether the ConfigMap or its key must be defined
"""
return pulumi.get(self, "optional")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromFieldRef(dict):
"""
Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
"""
def __init__(__self__, *,
field_path: str,
api_version: Optional[str] = None):
"""
Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
:param str field_path: Path of the field to select in the specified API version.
:param str api_version: Version of the schema the FieldPath is written in terms of, defaults to "v1".
"""
pulumi.set(__self__, "field_path", field_path)
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
@property
@pulumi.getter(name="fieldPath")
def field_path(self) -> str:
"""
Path of the field to select in the specified API version.
"""
return pulumi.get(self, "field_path")
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[str]:
"""
Version of the schema the FieldPath is written in terms of, defaults to "v1".
"""
return pulumi.get(self, "api_version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromResourceFieldRef(dict):
"""
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
"""
def __init__(__self__, *,
resource: str,
container_name: Optional[str] = None,
divisor: Optional['outputs.SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromResourceFieldRefDivisor'] = None):
"""
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
:param str resource: Required: resource to select
:param str container_name: Container name: required for volumes, optional for env vars
:param 'SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromResourceFieldRefDivisorArgs' divisor: Specifies the output format of the exposed resources, defaults to "1"
"""
pulumi.set(__self__, "resource", resource)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if divisor is not None:
pulumi.set(__self__, "divisor", divisor)
@property
@pulumi.getter
def resource(self) -> str:
"""
Required: resource to select
"""
return pulumi.get(self, "resource")
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[str]:
"""
Container name: required for volumes, optional for env vars
"""
return pulumi.get(self, "container_name")
@property
@pulumi.getter
def divisor(self) -> Optional['outputs.SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromResourceFieldRefDivisor']:
"""
Specifies the output format of the exposed resources, defaults to "1"
"""
return pulumi.get(self, "divisor")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromResourceFieldRefDivisor(dict):
def __init__(__self__):
pass
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromSecretKeyRef(dict):
"""
Selects a key of a secret in the pod's namespace
"""
def __init__(__self__, *,
key: str,
name: Optional[str] = None,
optional: Optional[bool] = None):
"""
Selects a key of a secret in the pod's namespace
:param str key: The key of the secret to select from. Must be a valid secret key.
:param str name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param bool optional: Specify whether the Secret or its key must be defined
"""
pulumi.set(__self__, "key", key)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def key(self) -> str:
"""
The key of the secret to select from. Must be a valid secret key.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def optional(self) -> Optional[bool]:
"""
Specify whether the Secret or its key must be defined
"""
return pulumi.get(self, "optional")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsSvcOrchSpecResources(dict):
"""
ResourceRequirements describes the compute resource requirements.
"""
def __init__(__self__, *,
limits: Optional[Mapping[str, 'outputs.SeldonDeploymentSpecPredictorsSvcOrchSpecResourcesLimits']] = None,
requests: Optional[Mapping[str, 'outputs.SeldonDeploymentSpecPredictorsSvcOrchSpecResourcesRequests']] = None):
"""
ResourceRequirements describes the compute resource requirements.
:param Mapping[str, 'SeldonDeploymentSpecPredictorsSvcOrchSpecResourcesLimitsArgs'] limits: Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
:param Mapping[str, 'SeldonDeploymentSpecPredictorsSvcOrchSpecResourcesRequestsArgs'] requests: Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
if limits is not None:
pulumi.set(__self__, "limits", limits)
if requests is not None:
pulumi.set(__self__, "requests", requests)
@property
@pulumi.getter
def limits(self) -> Optional[Mapping[str, 'outputs.SeldonDeploymentSpecPredictorsSvcOrchSpecResourcesLimits']]:
"""
Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "limits")
@property
@pulumi.getter
def requests(self) -> Optional[Mapping[str, 'outputs.SeldonDeploymentSpecPredictorsSvcOrchSpecResourcesRequests']]:
"""
Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "requests")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsSvcOrchSpecResourcesLimits(dict):
def __init__(__self__):
pass
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsSvcOrchSpecResourcesRequests(dict):
def __init__(__self__):
pass
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentStatus(dict):
"""
SeldonDeploymentStatus defines the observed state of SeldonDeployment
"""
def __init__(__self__, *,
address: Optional['outputs.SeldonDeploymentStatusAddress'] = None,
deployment_status: Optional[Mapping[str, 'outputs.SeldonDeploymentStatusDeploymentStatus']] = None,
description: Optional[str] = None,
replicas: Optional[int] = None,
service_status: Optional[Mapping[str, 'outputs.SeldonDeploymentStatusServiceStatus']] = None,
state: Optional[str] = None):
"""
SeldonDeploymentStatus defines the observed state of SeldonDeployment
:param 'SeldonDeploymentStatusAddressArgs' address: Addressable placeholder until duckv1 issue is fixed: https://github.com/kubernetes-sigs/controller-tools/issues/391
"""
if address is not None:
pulumi.set(__self__, "address", address)
if deployment_status is not None:
pulumi.set(__self__, "deployment_status", deployment_status)
if description is not None:
pulumi.set(__self__, "description", description)
if replicas is not None:
pulumi.set(__self__, "replicas", replicas)
if service_status is not None:
pulumi.set(__self__, "service_status", service_status)
if state is not None:
pulumi.set(__self__, "state", state)
@property
@pulumi.getter
def address(self) -> Optional['outputs.SeldonDeploymentStatusAddress']:
"""
Addressable placeholder until duckv1 issue is fixed: https://github.com/kubernetes-sigs/controller-tools/issues/391
"""
return pulumi.get(self, "address")
@property
@pulumi.getter(name="deploymentStatus")
def deployment_status(self) -> Optional[Mapping[str, 'outputs.SeldonDeploymentStatusDeploymentStatus']]:
return pulumi.get(self, "deployment_status")
@property
@pulumi.getter
def description(self) -> Optional[str]:
return pulumi.get(self, "description")
@property
@pulumi.getter
def replicas(self) -> Optional[int]:
return pulumi.get(self, "replicas")
@property
@pulumi.getter(name="serviceStatus")
def service_status(self) -> Optional[Mapping[str, 'outputs.SeldonDeploymentStatusServiceStatus']]:
return pulumi.get(self, "service_status")
@property
@pulumi.getter
def state(self) -> Optional[str]:
return pulumi.get(self, "state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentStatusAddress(dict):
"""
Addressable placeholder until duckv1 issue is fixed: https://github.com/kubernetes-sigs/controller-tools/issues/391
"""
def __init__(__self__, *,
url: Optional[str] = None):
"""
Addressable placeholder until duckv1 issue is fixed: https://github.com/kubernetes-sigs/controller-tools/issues/391
"""
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter
def url(self) -> Optional[str]:
return pulumi.get(self, "url")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentStatusDeploymentStatus(dict):
def __init__(__self__, *,
available_replicas: Optional[int] = None,
description: Optional[str] = None,
explainer_for: Optional[str] = None,
name: Optional[str] = None,
replicas: Optional[int] = None,
status: Optional[str] = None):
if available_replicas is not None:
pulumi.set(__self__, "available_replicas", available_replicas)
if description is not None:
pulumi.set(__self__, "description", description)
if explainer_for is not None:
pulumi.set(__self__, "explainer_for", explainer_for)
if name is not None:
pulumi.set(__self__, "name", name)
if replicas is not None:
pulumi.set(__self__, "replicas", replicas)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="availableReplicas")
def available_replicas(self) -> Optional[int]:
return pulumi.get(self, "available_replicas")
@property
@pulumi.getter
def description(self) -> Optional[str]:
return pulumi.get(self, "description")
@property
@pulumi.getter(name="explainerFor")
def explainer_for(self) -> Optional[str]:
return pulumi.get(self, "explainer_for")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def replicas(self) -> Optional[int]:
return pulumi.get(self, "replicas")
@property
@pulumi.getter
def status(self) -> Optional[str]:
return pulumi.get(self, "status")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentStatusServiceStatus(dict):
def __init__(__self__, *,
explainer_for: Optional[str] = None,
grpc_endpoint: Optional[str] = None,
http_endpoint: Optional[str] = None,
| |
# importando a biblioteca PySimpleGUI para a interface
import PySimpleGUI as sg
# Procurando brechas sobre como os usuários podem responder as perguntas.
yes = ["S", "s", "sim"]
no = ["N", "n", "nao", "não"]
# Objetos
espada = 0
flor = 0
# Primeira janela
def janela_inicial():
sg.theme('Reddit')
layout = [
[sg.Text('Seja bem vindo!')],
[sg.Text('')],
[sg.Text('Você está a entrar em uma aventura.')],
[sg.Text('')],
[sg.Text('Clique em ok para continuar')],
[sg.Button('Sair'), sg.Button('Ok')]
]
return sg.Window("Vamos ??", layout=layout, finalize=True, size=(300, 180))
# segunda janela
def janela_comeco():
sg.theme('Reddit')
layout = [
[sg.Text("Você é um plebeu e, em um dia, decide sair com seus amigos.\nVocês vão na taverna “O Bode Dourado”, na cidade de Odin.\nEsta cidade é um movimentado entreposto comercial a\nmeio caminho de todas as principais rotas mercantis\nentre os reinos do leste")],
[sg.Text('...')],
[sg.Text("Depois de uma noite de bebedeira com amigos,\nvocê acorda na manhã seguinte\nem uma floresta densa e úmida. "
"\nCabeça girando e lutando contra a vontade de vomitar,\nvocê se levanta e se maravilha com o seu novo ambiente desconhecido. "
"\nA paz desaparece rapidamente quando você ouve um som grotesco \nemitido atrás de você.\nUm Orc babando está correndo em sua direção. Você irá:")],
[sg.Text("Pegue uma pedra próxima e jogue-a no Orc "), sg.Button('A')],
[sg.Text("Deite-se e espere ser atacado. "), sg.Button('B')],
[sg.Text("Correr! "), sg.Button('C')]
]
return sg.Window("A história começa", layout=layout, finalize=True, size=(450, 400))
# escolha jogar pedra
def janela_choicePedra():
sg.theme("Reddit")
layout = [
[sg.Text("\nO Orc fica atordoado, mas recupera o controle.\nEle começa a correr em sua direção novamente. Você vai: ")],
[sg.Text('Esconder-se atrás da rocha '), sg.Button('A')],
[sg.Text('Jogar outra pedra'), sg.Button('B')],
[sg.Text('Correr para uma caverna próxima'), sg.Button('C')]
]
return sg.Window("Uma pedra ??", layout=layout, finalize=True)
# escolha deitar e esperar ser atacado
def janela_choiceDeitar():
sg.theme("Reddit")
layout = [
[sg.Text('Essa foi rápida...\nVocê morreu')],
[sg.Text('')],
[sg.Button('Recomeçar??'), sg.Button('Sair..')]
]
return sg.Window("Rápido como o vento", layout=layout, finalize=True, size=(300, 100))
# escolha correr
def janela_choiceCorrer():
sg.theme("Reddit")
layout = [
[sg.Text("\nVocê corre o mais rápido possível,\nmas a "
"a velocidade do Orc é muito grande. Você irá: ")],
[sg.Text('Esconder-se atrás da rocha '), sg.Button('A')],
[sg.Text('Fazer uma armadilha, e então lutar'), sg.Button('B')],
[sg.Text('Corra em direção a um cidade'), sg.Button('C')]
]
return sg.Window("The Flash ?", layout=layout, finalize=True)
# esxolha de jogar a pedra e acabar morrendo
def janela_choicePedraF():
sg.theme("Reddit")
layout = [
[sg.Text("\nVocê decide jogar outra pedra. "
" A primeira pedra não deu muito dano.\nA pedra ricocheteou na cabeça e você erra. \nVocê morreu! ")],
[sg.Text('')],
[sg.Button('Recomeçar??'), sg.Button('Sair..')]
]
return sg.Window("Não foi dessa vez", layout=layout, finalize=True)
# escolha da caverna
def janela_choiceCaverna():
sg.theme("Reddit")
layout = [
[sg.Text("\nVocê estava hesitante, já que a caverna estava escura e sinistra. "
" \nAntes de entrar totalmente, você nota você nota um báu parcialmente enterrado no chão. "
" \nAo abrir, você encontra uma espada cintilante. Deseja pega-lá? Sim / Não? ")],
[sg.Text('')],
[sg.Input(key='resposta')],
[sg.Button('Ok')]
]
return sg.Window("A caverna pegar ou largar ", layout=layout, finalize=True)
# escolha depois de pegar a espada
def janela_choiceEspada():
sg.theme("Reddit")
layout = [
[sg.Text(
"\n O Orc continua a te perseguir, você está em apuros. O que fará em seguida? ")],
[sg.Text('Se esconder em silêncio'), sg.Button('A')],
[sg.Text('Lutar'), sg.Button('B')],
[sg.Text('Correr'), sg.Button('C')]
]
return sg.Window("E agora ??", layout=layout, finalize=True)
# escolha de lutar após pegar a espada
def janela_choiceEspada1():
sg.theme("Reddit")
layout = [
[sg.Text("\nVocê ficou esperando. A espada cintilante atraiu "
"o Orc,\nque pensou que você não era páreo.\nEnquanto ele caminhava "
"cada vez mais perto,\nseu coração bate mais rápido. Como o Orc "
"estendeu a mão para agarrar a espada,\nvocê perfurou a lâmina em "
"seu peito. \n \n Você sobreviveu! ")],
[sg.Button('Recomeçar??'), sg.Button('Sair..')]
]
return sg.Window("Grande Final", layout=layout, finalize=True)
# escolha de lutar sem ter pego a espada
def janela_choiceEspada0():
sg.theme("Reddit")
layout = [
[sg.Text(
"\n Você deveria ter pegado aquela espada.\nVocê está indefeso. \n Você morreu! ")],
[sg.Button('Recomeçar??'), sg.Button('Sair..')]
]
return sg.Window("Por uma espada..", layout=layout, finalize=True)
# escolha de ficar no escuro
def janela_choiceEscuro():
sg.theme("Reddit")
layout = [
[sg.Text("\nMesmo? Você vai se esconder no escuro?\nEu penso que "
"Orcs podem ver muito bem no escuro, certo?\nNão tenho certeza, mas "
"Vou com SIM, então ... \n Você morreu! ")],
[sg.Button('Recomeçar??'), sg.Button('Sair..')]
]
return sg.Window("Não muito inteligente ", layout=layout, finalize=True)
# escolha de se esconder atrás da rocha
def janela_choiceRocha():
sg.theme("Reddit")
layout = [
[sg.Text('Você foi facilmente visto')],
[sg.Text('Você morreu!')],
[sg.Button('Recomeçar??'), sg.Button('Sair..')]
]
return sg.Window("Não foi dessa vez", layout=layout, finalize=True, size=(270, 100))
# escolha de fazer armadilha
def janela_choiceArmadilha():
sg.theme("Reddit")
layout = [
[sg.Text('Você não é pareo para o orc')],
[sg.Text('Você morreu')],
[sg.Button('Recomeçar??'), sg.Button('Sair..')]
]
return sg.Window("Não foi dessa vez", layout=layout, finalize=True, size=(270, 100))
# escolha de ir para cidade
def janela_choiceCidade():
sg.theme("Reddit")
layout = [
[sg.Text(
"Você tenta acalmar sua respiração pesada enquanto se esconde "
"\natrás de um edifício delapitado, esperando o Orc chegar "
"\ncorrendo na esquina. Você nota uma flor roxa "
"\nperto do seu pé. Você a pega? Sim / Não?")],
[sg.Input(key='resposta')],
[sg.Button('Ok')]
]
return sg.Window("A cidade", layout=layout, finalize=True)
# escolha de pegar a flor
def janela_choiceFlor1():
sg.theme("Reddit")
layout = [
[sg.Text("\nVocê rapidamente pega a flor roxa, de alguma forma "
"\nesperando que isso pare o Orc. Pare! \nO Orc estava apenas procurando "
"por amor. "
"\nIsso foi bem estranho... Mas você sobreviveu! ")],
[sg.Button('Recomeçar??'), sg.Button('Sair..')]
]
return sg.Window("You win!!", layout=layout, finalize=True)
# escolha de não pegar a flor
def janela_choiceFlor0():
sg.theme("Reddit")
layout = [
[sg.Text("\nTalvez você devesse ter pego a flor... "
"\nVocê morre esmagado pelo Orc. ")],
[sg.Button('Recomeçar??'), sg.Button('Sair..')]
]
return sg.Window("You lose..", layout=layout, finalize=True)
# definindo o valor das variaveis
janela1, janela2 = janela_inicial(), None
janela3 = None
janela4 = None
janela5 = None
janela4 = None
janela6 = None
janelaX = None
janelaC = None
janelaAA = None
# utilizando listas para definir o valor das variaveis
yes = ["S", "s", "sim", "Sim"]
no = ["N", "n", "nao", "não"]
# estrutura de repetição
while True:
# definindo para as variaveis serem lidas em todas as janelas / verificando condições e escolhas
window, event, values = sg.read_all_windows()
# Escolha página inicial
if window == janela1 and event == sg.WIN_CLOSED:
break
if window == janela1 and event == 'Sair':
break
if window == janela1 and event == 'Ok':
janela2 = janela_comeco()
janela1.hide()
# inicio da história / segunda janela
if window == janela2 and event == sg.WIN_CLOSED:
break
if window == janela2 and event == 'A':
janelaAA = janela_choicePedra()
janela2.hide()
if window == janela2 and event == 'B':
janela3 = janela_choiceDeitar()
janela2.hide()
if window == janela2 and event == 'C':
janela3 = janela_choiceCorrer()
janela2.hide()
# janela choice A
if window == janelaAA and event == sg.WIN_CLOSED:
break
if window == janelaAA and event == 'A':
janela4 = janela_choiceRocha()
janelaAA.hide()
if window == janelaAA and event == 'B':
janela4 = janela_choicePedraF()
janelaAA.hide()
if window == janelaAA and event == 'C':
janela4 = janela_choiceCaverna()
janelaAA.hide()
# janela 4
if window == janela3 and event == sg.WIN_CLOSED:
break
if window == janela3 and event == 'A':
janela5 = janela_choiceRocha()
janela3.hide()
if window == janela3 and event == 'B':
janela5 = janela_choiceArmadilha()
janela3.hide()
if window == janela3 and event == 'C':
janelaC = janela_choiceCidade()
janela3.hide()
#morte choice rocha c -> a
if window == janela5 and event == sg.WIN_CLOSED:
break
if window == janela5 and event == ('Recomeçar??'):
janela1 = janela_inicial()
janela3.hide()
if window == janela5 and event == ('Sair..'):
break
# morte choice rocha
if window == janela4 and event == sg.WIN_CLOSED:
break
if window == janela4 and event == ('Recomeçar??'):
janela1 = janela_inicial()
janela4.hide()
if window == janela4 and event == ('Sair..'):
break
# choice cidade
if window == janelaC and event == sg.WIN_CLOSED:
break
if event == 'Ok':
if window == janelaC and values['resposta'] == yes[0]:
janela6 = janela_choiceFlor1()
janelaC.hide()
flor = 1 # adiciona a flor a seu inventario
if window == janelaC and values['resposta'] == yes[1]:
janela6 = janela_choiceFlor1()
janelaC.hide()
flor = 1
if window == janelaC and values['resposta'] == yes[2]:
janela6 = janela_choiceFlor1()
janelaC.hide()
flor = 1
if window == janelaC and values['resposta'] == yes[3]:
janela6 = janela_choiceFlor1()
janelaC.hide()
flor = 1
if window == janelaC and values['resposta'] == no[0]:
flor = 0
janela6 = janela_choiceFlor0()
janelaC.hide()
if window == janelaC and | |
+ m.x94 == 0)
m.c49 = Constraint(expr= - 40*m.x24 + m.x96 == 0)
m.c50 = Constraint(expr= - 40*m.x25 + m.x98 == 0)
m.c51 = Constraint(expr= - 40*m.x26 + m.x100 == 0)
m.c52 = Constraint(expr= - 40*m.x2 + m.x53 == 0)
m.c53 = Constraint(expr= - 40*m.x3 + m.x55 == 0)
m.c54 = Constraint(expr= - 40*m.x4 + m.x57 == 0)
m.c55 = Constraint(expr= - 40*m.x5 + m.x59 == 0)
m.c56 = Constraint(expr= - 40*m.x6 + m.x61 == 0)
m.c57 = Constraint(expr= - 40*m.x7 + m.x63 == 0)
m.c58 = Constraint(expr= - 40*m.x8 + m.x65 == 0)
m.c59 = Constraint(expr= - 40*m.x9 + m.x67 == 0)
m.c60 = Constraint(expr= - 40*m.x10 + m.x69 == 0)
m.c61 = Constraint(expr= - 40*m.x11 + m.x71 == 0)
m.c62 = Constraint(expr= - 40*m.x12 + m.x73 == 0)
m.c63 = Constraint(expr= - 40*m.x13 + m.x75 == 0)
m.c64 = Constraint(expr= - 40*m.x14 + m.x77 == 0)
m.c65 = Constraint(expr= - 40*m.x15 + m.x79 == 0)
m.c66 = Constraint(expr= - 40*m.x16 + m.x81 == 0)
m.c67 = Constraint(expr= - 40*m.x17 + m.x83 == 0)
m.c68 = Constraint(expr= - 40*m.x18 + m.x85 == 0)
m.c69 = Constraint(expr= - 40*m.x19 + m.x87 == 0)
m.c70 = Constraint(expr= - 40*m.x20 + m.x89 == 0)
m.c71 = Constraint(expr= - 40*m.x21 + m.x91 == 0)
m.c72 = Constraint(expr= - 40*m.x22 + m.x93 == 0)
m.c73 = Constraint(expr= - 40*m.x23 + m.x95 == 0)
m.c74 = Constraint(expr= - 40*m.x24 + m.x97 == 0)
m.c75 = Constraint(expr= - 40*m.x25 + m.x99 == 0)
m.c76 = Constraint(expr= - 40*m.x26 + m.x101 == 0)
m.c77 = Constraint(expr=m.x27*(m.x130 - m.x127) - m.x102 == 0)
m.c78 = Constraint(expr=m.x28*(-0.574695771132691*m.x127 - 0.287347885566345*m.x128 + 0.766261028176921*m.x129 +
0.574695771132691*m.x136 + 0.287347885566345*m.x137 - 0.766261028176921*m.x138) - m.x103 == 0)
m.c79 = Constraint(expr=m.x29*(0.574695771132691*m.x130 - 0.287347885566345*m.x131 + 0.766261028176921*m.x132 -
0.574695771132691*m.x133 + 0.287347885566345*m.x134 - 0.766261028176921*m.x135) - m.x104 == 0)
m.c80 = Constraint(expr=m.x30*(0.287347885566345*m.x128 - 0.574695771132691*m.x127 + 0.766261028176921*m.x129 +
0.574695771132691*m.x139 - 0.287347885566345*m.x140 - 0.766261028176921*m.x141) - m.x105 == 0)
m.c81 = Constraint(expr=m.x31*(0.574695771132691*m.x130 + 0.287347885566345*m.x131 + 0.766261028176921*m.x132 -
0.574695771132691*m.x142 - 0.287347885566345*m.x143 - 0.766261028176921*m.x144) - m.x106 == 0)
m.c82 = Constraint(expr=m.x32*(0.936329177569045*m.x132 - 0.351123441588392*m.x131 + 0.351123441588392*m.x137 -
0.936329177569045*m.x138) - m.x107 == 0)
m.c83 = Constraint(expr=m.x33*(0.351123441588392*m.x131 + 0.936329177569045*m.x132 - 0.351123441588392*m.x140 -
0.936329177569045*m.x141) - m.x108 == 0)
m.c84 = Constraint(expr=m.x34*(0.936329177569045*m.x129 - 0.351123441588392*m.x128 + 0.351123441588392*m.x134 -
0.936329177569045*m.x135) - m.x109 == 0)
m.c85 = Constraint(expr=m.x35*(0.351123441588392*m.x128 + 0.936329177569045*m.x129 - 0.351123441588392*m.x143 -
0.936329177569045*m.x144) - m.x110 == 0)
m.c86 = Constraint(expr=m.x36*(m.x134 - m.x143) - m.x111 == 0)
m.c87 = Constraint(expr=m.x37*(m.x137 - m.x140) - m.x112 == 0)
m.c88 = Constraint(expr=m.x38*(m.x136 - m.x133) - m.x113 == 0)
m.c89 = Constraint(expr=m.x39*(m.x139 - m.x142) - m.x114 == 0)
m.c90 = Constraint(expr=m.x40*(0.345032779671177*m.x133 + 0.75907211527659*m.x134 + 0.552052447473883*m.x135) - m.x115
== 0)
m.c91 = Constraint(expr=m.x41*(0.345032779671177*m.x142 - 0.75907211527659*m.x143 + 0.552052447473883*m.x144) - m.x116
== 0)
m.c92 = Constraint(expr=m.x42*(0.75907211527659*m.x137 - 0.345032779671177*m.x136 + 0.552052447473883*m.x138) - m.x117
== 0)
m.c93 = Constraint(expr=m.x43*(-0.345032779671177*m.x139 - 0.75907211527659*m.x140 + 0.552052447473883*m.x141) - m.x118
== 0)
m.c94 = Constraint(expr=m.x44*(0.75907211527659*m.x136 - 0.345032779671177*m.x137 + 0.552052447473883*m.x138) - m.x119
== 0)
m.c95 = Constraint(expr=m.x45*(-0.75907211527659*m.x133 - 0.345032779671177*m.x134 + 0.552052447473883*m.x135) - m.x120
== 0)
m.c96 = Constraint(expr=m.x46*(0.75907211527659*m.x139 + 0.345032779671177*m.x140 + 0.552052447473883*m.x141) - m.x121
== 0)
m.c97 = Constraint(expr=m.x47*(0.345032779671177*m.x143 - 0.75907211527659*m.x142 + 0.552052447473883*m.x144) - m.x122
== 0)
m.c98 = Constraint(expr=m.x48*(0.468292905790847*m.x142 + 0.468292905790847*m.x143 + 0.749268649265355*m.x144) - m.x123
== 0)
m.c99 = Constraint(expr=m.x49*(0.468292905790847*m.x133 - 0.468292905790847*m.x134 + 0.749268649265355*m.x135) - m.x124
== 0)
m.c100 = Constraint(expr=m.x50*(-0.468292905790847*m.x136 - 0.468292905790847*m.x137 + 0.749268649265355*m.x138)
- m.x125 == 0)
m.c101 = Constraint(expr=m.x51*(0.468292905790847*m.x140 - 0.468292905790847*m.x139 + 0.749268649265355*m.x141) - m.x126
== 0)
m.c102 = Constraint(expr= - m.x102 - 0.574695771132691*m.x103 - 0.574695771132691*m.x105 == 1)
m.c103 = Constraint(expr= - 0.287347885566345*m.x103 + 0.287347885566345*m.x105 - 0.351123441588392*m.x109
+ 0.351123441588392*m.x110 == 10)
m.c104 = Constraint(expr= 0.766261028176921*m.x103 + 0.766261028176921*m.x105 + 0.936329177569045*m.x109
+ 0.936329177569045*m.x110 == -10)
m.c105 = Constraint(expr= m.x102 + 0.574695771132691*m.x104 + 0.574695771132691*m.x106 == 0)
m.c106 = Constraint(expr= - 0.287347885566345*m.x104 + 0.287347885566345*m.x106 - 0.351123441588392*m.x107
+ 0.351123441588392*m.x108 == 10)
m.c107 = Constraint(expr= 0.766261028176921*m.x104 + 0.766261028176921*m.x106 + 0.936329177569045*m.x107
+ 0.936329177569045*m.x108 == -10)
m.c108 = Constraint(expr= - 0.574695771132691*m.x104 - m.x113 + 0.345032779671177*m.x115 - 0.75907211527659*m.x120
+ 0.468292905790847*m.x124 == 0.5)
m.c109 = Constraint(expr= 0.287347885566345*m.x104 + 0.351123441588392*m.x109 + m.x111 + 0.75907211527659*m.x115
- 0.345032779671177*m.x120 - 0.468292905790847*m.x124 == 0)
m.c110 = Constraint(expr= - 0.766261028176921*m.x104 - 0.936329177569045*m.x109 + 0.552052447473883*m.x115
+ 0.552052447473883*m.x120 + 0.749268649265355*m.x124 == 0)
m.c111 = Constraint(expr= 0.574695771132691*m.x103 + m.x113 - 0.345032779671177*m.x117 + 0.75907211527659*m.x119
- 0.468292905790847*m.x125 == 0)
m.c112 = Constraint(expr= 0.287347885566345*m.x103 + 0.351123441588392*m.x107 + m.x112 + 0.75907211527659*m.x117
- 0.345032779671177*m.x119 - 0.468292905790847*m.x125 == 0)
m.c113 = Constraint(expr= - 0.766261028176921*m.x103 - 0.936329177569045*m.x107 + 0.552052447473883*m.x117
+ 0.552052447473883*m.x119 + 0.749268649265355*m.x125 == 0)
m.c114 = Constraint(expr= 0.574695771132691*m.x105 + m.x114 - 0.345032779671177*m.x118 + 0.75907211527659*m.x121
- 0.468292905790847*m.x126 == 0)
m.c115 = Constraint(expr= - 0.287347885566345*m.x105 - 0.351123441588392*m.x108 - m.x112 - 0.75907211527659*m.x118
+ 0.345032779671177*m.x121 + 0.468292905790847*m.x126 == 0)
m.c116 = Constraint(expr= - 0.766261028176921*m.x105 - 0.936329177569045*m.x108 + 0.552052447473883*m.x118
+ 0.552052447473883*m.x121 + 0.749268649265355*m.x126 == 0)
m.c117 = Constraint(expr= - 0.574695771132691*m.x106 - m.x114 + 0.345032779671177*m.x116 - 0.75907211527659*m.x122
+ 0.468292905790847*m.x123 == 0.6)
m.c118 = Constraint(expr= - 0.287347885566345*m.x106 - 0.351123441588392*m.x110 - m.x111 - 0.75907211527659*m.x116
+ 0.345032779671177*m.x122 + 0.468292905790847*m.x123 == 0)
m.c119 = Constraint(expr= - 0.766261028176921*m.x106 - 0.936329177569045*m.x110 + 0.552052447473883*m.x116
+ 0.552052447473883*m.x122 + 0.749268649265355*m.x123 == 0)
m.c120 = Constraint(expr= - m.x52 + m.x102 <= 0)
m.c121 = Constraint(expr= - m.x53 - m.x102 <= 0)
m.c122 = Constraint(expr= - m.x54 + m.x103 <= 0)
m.c123 = Constraint(expr= - m.x55 - m.x103 <= 0)
m.c124 = Constraint(expr= - m.x56 + m.x104 <= 0)
m.c125 = Constraint(expr= - m.x57 - m.x104 <= 0)
m.c126 = Constraint(expr= - m.x58 + m.x105 <= 0)
m.c127 = Constraint(expr= - m.x59 - m.x105 <= 0)
m.c128 = Constraint(expr= - m.x60 + m.x106 <= 0)
m.c129 = Constraint(expr= - m.x61 - m.x106 <= 0)
m.c130 = Constraint(expr= - m.x62 + m.x107 <= 0)
m.c131 = Constraint(expr= - m.x63 - m.x107 <= 0)
m.c132 = Constraint(expr= - m.x64 + m.x108 <= 0)
m.c133 = Constraint(expr= - m.x65 - m.x108 <= 0)
m.c134 = Constraint(expr= - m.x66 + m.x109 <= 0)
m.c135 = Constraint(expr= - m.x67 - m.x109 <= 0)
m.c136 = Constraint(expr= - m.x68 + m.x110 <= 0)
m.c137 = Constraint(expr= - m.x69 - m.x110 <= 0)
m.c138 = Constraint(expr= - m.x70 + m.x111 <= 0)
m.c139 = Constraint(expr= - m.x71 - m.x111 <= 0)
m.c140 = Constraint(expr= - m.x72 + m.x112 <= 0)
m.c141 = Constraint(expr= - m.x73 - m.x112 <= 0)
m.c142 = Constraint(expr= - m.x74 + m.x113 <= 0)
m.c143 = Constraint(expr= - m.x75 - m.x113 <= 0)
m.c144 = Constraint(expr= - m.x76 + m.x114 <= 0)
m.c145 = Constraint(expr= - m.x77 - m.x114 <= 0)
m.c146 = Constraint(expr= - m.x78 + m.x115 <= 0)
m.c147 = Constraint(expr= - m.x79 - m.x115 <= 0)
m.c148 = Constraint(expr= - m.x80 + m.x116 <= 0)
m.c149 = Constraint(expr= - m.x81 - m.x116 <= 0)
m.c150 = Constraint(expr= - m.x82 + m.x117 <= 0)
m.c151 = Constraint(expr= - m.x83 - m.x117 <= 0)
m.c152 = Constraint(expr= - m.x84 + m.x118 <= 0)
m.c153 = Constraint(expr= - m.x85 - m.x118 <= 0)
m.c154 = Constraint(expr= - m.x86 + m.x119 <= 0)
m.c155 = Constraint(expr= - m.x87 - m.x119 <= 0)
m.c156 = Constraint(expr= - m.x88 + m.x120 <= 0)
m.c157 = Constraint(expr= - m.x89 - m.x120 <= 0)
m.c158 = Constraint(expr= - m.x90 + m.x121 <= 0)
m.c159 = Constraint(expr= - m.x91 - m.x121 <= 0)
m.c160 = Constraint(expr= - m.x92 + m.x122 <= 0)
m.c161 = Constraint(expr= - m.x93 - m.x122 <= 0)
m.c162 = Constraint(expr= - m.x94 + m.x123 <= 0)
m.c163 = Constraint(expr= - m.x95 - m.x123 <= 0)
m.c164 = Constraint(expr= - m.x96 + m.x124 <= 0)
m.c165 = Constraint(expr= - m.x97 - m.x124 <= 0)
m.c166 = Constraint(expr= - m.x98 + m.x125 <= 0)
m.c167 = Constraint(expr= - m.x99 - m.x125 <= 0)
m.c168 = Constraint(expr= - m.x100 + m.x126 <= 0)
m.c169 = Constraint(expr= - m.x101 - m.x126 <= 0)
m.c170 = Constraint(expr= - m.x2 + 0.1*m.b145 + 0.2*m.b146 + 0.3*m.b147 + 0.4*m.b148 + 0.5*m.b149 + 0.6*m.b150
+ 0.7*m.b151 + 0.8*m.b152 + 0.9*m.b153 + m.b154 + 1.1*m.b155 + 1.2*m.b156 + 1.3*m.b157
+ 1.4*m.b158 + 1.5*m.b159 + 1.6*m.b160 + 1.7*m.b161 + 1.8*m.b162 + 1.9*m.b163 + 2*m.b164
+ 2.1*m.b165 + 2.2*m.b166 + 2.3*m.b167 + 2.4*m.b168 + 2.5*m.b169 + 2.6*m.b170 + 2.8*m.b171
+ 3*m.b172 + 3.2*m.b173 + 3.4*m.b174 == 0)
m.c171 = Constraint(expr= - m.x3 + 0.1*m.b175 + 0.2*m.b176 + 0.3*m.b177 + 0.4*m.b178 + 0.5*m.b179 + 0.6*m.b180
+ 0.7*m.b181 + 0.8*m.b182 + 0.9*m.b183 + m.b184 + 1.1*m.b185 + 1.2*m.b186 + 1.3*m.b187
+ 1.4*m.b188 + 1.5*m.b189 + 1.6*m.b190 + 1.7*m.b191 + 1.8*m.b192 + 1.9*m.b193 + 2*m.b194
+ 2.1*m.b195 + 2.2*m.b196 + 2.3*m.b197 + 2.4*m.b198 + 2.5*m.b199 + 2.6*m.b200 + 2.8*m.b201
+ 3*m.b202 + 3.2*m.b203 + 3.4*m.b204 == 0)
m.c172 = Constraint(expr= - | |
<filename>scripts/addons/uvpackmaster2/panel_base.py<gh_stars>1-10
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import multiprocessing
from .prefs import get_prefs
from .operator import *
from .operator_islands import *
from .operator_box import *
from .operator_uv import *
from .utils import *
from .presets import *
from .labels import UvpLabels
from .register import UVP2_OT_SelectUvpEngine
import bpy
class UVP2_OT_SetRotStep(bpy.types.Operator):
bl_idname = 'uvpackmaster2.set_rot_step'
bl_label = 'Set Rotation Step'
bl_description = "Set Rotation Step to one of the suggested values"
rot_step : IntProperty(
name='Rotation Step',
description='',
default=90)
def execute(self, context):
scene_props = context.scene.uvp2_props
scene_props.rot_step = self.rot_step
return {'FINISHED'}
class UVP2_MT_SetRotStep(bpy.types.Menu):
bl_idname = "UVP2_MT_SetRotStep"
bl_label = "Set Rotation Step"
STEPS = [1, 2, 3, 5, 6, 9, 10, 15, 18, 30, 45, 90, 180]
def draw(self, context):
layout = self.layout
for step in self.STEPS:
operator = layout.operator(UVP2_OT_SetRotStep.bl_idname, text=str(step))
operator.rot_step = step
class UVP2_UL_DeviceList(bpy.types.UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
dev_name = str(item.name)
row = layout.row()
icon_id = 'NONE'
if not item.supported:
dev_name += ' ' + UvpLabels.FEATURE_NOT_SUPPORTED_MSG
icon_id = UvpLabels.FEATURE_NOT_SUPPORTED_ICON
row.label(text=dev_name, icon=icon_id)
class UVP2_PT_Generic(bpy.types.Panel):
def draw(self, context):
self.prefs = get_prefs()
self.scene_props = context.scene.uvp2_props
self.draw_specific(context)
def handle_prop(self, prop_name, supported, not_supported_msg, ui_elem):
if supported:
ui_elem.prop(self.scene_props, prop_name)
else:
ui_elem.enabled = False
split = ui_elem.split(factor=0.4)
col_s = split.column()
col_s.prop(self.scene_props, prop_name)
col_s = split.column()
col_s.label(text=not_supported_msg)
def handle_prop_enum(self, prop_name, prop_label, supported, not_supported_msg, ui_elem):
prop_label_colon = prop_label + ':'
if supported:
ui_elem.label(text=prop_label_colon)
else:
split = ui_elem.split(factor=0.4)
col_s = split.column()
col_s.label(text=prop_label_colon)
col_s = split.column()
col_s.label(text=not_supported_msg)
ui_elem.prop(self.scene_props, prop_name, text='')
ui_elem.enabled = supported
def messages_in_boxes(self, ui_elem, messages):
for msg in messages:
box = ui_elem.box()
msg_split = split_by_chars(msg, 60)
if len(msg_split) > 0:
# box.separator()
for msg_part in msg_split:
box.label(text=msg_part)
# box.separator()
class UVP2_PT_MainBase(UVP2_PT_Generic):
bl_idname = 'UVP2_PT_MainBase'
bl_label = 'UVPackmaster2'
bl_context = ''
def draw_specific(self, context):
layout = self.layout
demo_suffix = " (DEMO)" if self.prefs.FEATURE_demo else ''
row = layout.row()
row.label(text=self.prefs.label_message)
if not self.prefs.uvp_initialized:
row.operator(UVP2_OT_UvpSetupHelp.bl_idname, icon='HELP', text='')
row = layout.row()
row2 = row.row()
row2.enabled = False
row2.prop(self.prefs, 'uvp_path')
select_icon = 'FILEBROWSER' if is_blender28() else 'FILE_FOLDER'
row.operator(UVP2_OT_SelectUvpEngine.bl_idname, icon=select_icon, text='')
col = layout.column(align=True)
col.separator()
if in_debug_mode():
box = col.box()
col2 = box.column(align=True)
col2.label(text="Debug options:")
row = col2.row(align=True)
row.prop(self.prefs, "write_to_file")
row = col2.row(align=True)
row.prop(self.prefs, "wait_for_debugger")
row = col2.row(align=True)
row.prop(self.prefs, "seed")
row = col2.row(align=True)
row.prop(self.prefs, "test_param")
col.separator()
col.label(text="Engine operations:")
row = col.row(align=True)
row.enabled = self.prefs.FEATURE_overlap_check
row.operator(UVP2_OT_OverlapCheckOperator.bl_idname)
if not self.prefs.FEATURE_overlap_check:
row.label(text=UvpLabels.FEATURE_NOT_SUPPORTED_MSG)
col.operator(UVP2_OT_MeasureAreaOperator.bl_idname)
# Validate operator
row = col.row(align=True)
row.enabled = self.prefs.FEATURE_validation
row.operator(UVP2_OT_ValidateOperator.bl_idname, text=UVP2_OT_ValidateOperator.bl_label + demo_suffix)
if not self.prefs.FEATURE_validation:
row.label(text=UvpLabels.FEATURE_NOT_SUPPORTED_MSG)
row = col.row(align=True)
row.scale_y = 1.75
row.operator(UVP2_OT_PackOperator.bl_idname, text=UVP2_OT_PackOperator.bl_label + demo_suffix)
col.label(text="Last operation status:")
box = col.box()
box.label(text=self.prefs['op_status'] if self.prefs['op_status'] != '' else '------')
col.separator()
if len(self.prefs['op_warnings']) > 0:
row = col.row()
row.label(text="WARNINGS:", icon=UvpLabels.FEATURE_NOT_SUPPORTED_ICON)
self.messages_in_boxes(col, self.prefs['op_warnings'])
col.separator()
col.separator()
col.label(text="Other operations:")
row = col.row(align=True)
row.operator(UVP2_OT_SplitOverlappingIslands.bl_idname)
row = col.row(align=True)
row.operator(UVP2_OT_UndoIslandSplit.bl_idname)
row = col.row(align=True)
row.operator(UVP2_OT_AdjustScaleToUnselected.bl_idname)
if in_debug_mode():
row = col.row(align=True)
row.operator(UVP2_OT_DebugIslands.bl_idname)
col.separator()
col.label(text='Option presets:')
row = col.row(align=True)
row.operator(UVP2_OT_SavePreset.bl_idname)
row.operator(UVP2_OT_LoadPreset.bl_idname)
row = col.row(align=True)
row.operator(UVP2_OT_ResetToDefaults.bl_idname)
class UVP2_PT_PackingDeviceBase(UVP2_PT_Generic):
bl_label = 'Packing Devices'
bl_context = ''
bl_options = {'DEFAULT_CLOSED'}
def draw_specific(self, context):
layout = self.layout
col = layout.column(align=True)
col.template_list("UVP2_UL_DeviceList", "", self.prefs, "dev_array",
self.prefs, "sel_dev_idx")
box = col.box()
box.label(text=UvpLabels.PACKING_DEVICE_WARNING)
box.enabled = False
# Multi device
box = col.box()
box.enabled = self.prefs.FEATURE_multi_device_pack
row = box.row()
self.handle_prop("multi_device_pack", self.prefs.FEATURE_multi_device_pack, UvpLabels.FEATURE_NOT_SUPPORTED_MSG, row)
class UVP2_PT_BasicOptionsBase(UVP2_PT_Generic):
bl_label = 'Basic Options'
bl_context = ''
def draw_specific(self, context):
layout = self.layout
col = layout.column(align=True)
col.prop(self.prefs, "thread_count")
col.prop(self.scene_props, "margin")
col.prop(self.scene_props, "precision")
# Rotation Resolution
box = col.box()
box.enabled = self.prefs.FEATURE_island_rotation
row = box.row()
# TODO: missing feature check
row.prop(self.scene_props, "rot_enable")
# box = col.box()
row = box.row()
row.enabled = self.scene_props.rot_enable
row.prop(self.scene_props, "prerot_disable")
row = col.row(align=True)
row.enabled = self.scene_props.rot_enable
split = row.split(factor=0.8, align=True)
col_s = split.row(align=True)
col_s.prop(self.scene_props, "rot_step")
col_s = split.row(align=True)
col_s.menu(UVP2_MT_SetRotStep.bl_idname, text='Set')
# Pre validate
pre_validate_name = UvpLabels.PRE_VALIDATE_NAME
if self.prefs.FEATURE_demo:
pre_validate_name += ' (DEMO)'
box = col.box()
box.enabled = self.prefs.FEATURE_validation
row = box.row()
self.handle_prop("pre_validate", self.prefs.FEATURE_validation, UvpLabels.FEATURE_NOT_SUPPORTED_MSG, row)
class UVP2_PT_IslandRotStepBase(UVP2_PT_Generic):
bl_label = 'Island Rotation Step'
bl_context = ''
bl_options = {'DEFAULT_CLOSED'}
def draw_specific(self, context):
layout = self.layout
panel_enabled = True
if not self.prefs.FEATURE_island_rotation_step:
layout.label(text=UvpLabels.FEATURE_NOT_SUPPORTED_MSG)
panel_enabled = False
elif not self.scene_props.rot_enable:
layout.label(text='Island rotations must be enabled to activate this panel', icon='ERROR')
panel_enabled = False
col = layout.column(align=True)
col.enabled = panel_enabled
box = col.box()
row = box.row()
row.prop(self.scene_props, "island_rot_step_enable")
row.operator(UVP2_OT_IslandRotStepHelp.bl_idname, icon='HELP', text='')
col2 = col.column(align=True)
col2.enabled = self.scene_props.island_rot_step_enable
row = col2.row(align=True)
row.prop(self.scene_props, "island_rot_step")
row = col2.row(align=True)
row.operator(UVP2_OT_SetRotStepIslandParam.bl_idname)
col2.separator()
row = col2.row(align=True)
row.operator(UVP2_OT_ResetRotStepIslandParam.bl_idname)
row = col2.row(align=True)
row.operator(UVP2_OT_ShowRotStepIslandParam.bl_idname)
class UVP2_PT_ManualGroupingBase(UVP2_PT_Generic):
bl_label = 'Manual Grouping'
bl_context = ''
bl_options = {'DEFAULT_CLOSED'}
def draw_specific(self, context):
layout = self.layout
panel_enabled = True
if not self.prefs.FEATURE_grouping:
layout.label(text=UvpLabels.FEATURE_NOT_SUPPORTED_MSG)
panel_enabled = False
col = layout.column(align=True)
col.enabled = panel_enabled
container = col
row = container.row(align=True)
row.prop(self.scene_props, "manual_group_num")
row.operator(UVP2_OT_ManualGroupingHelp.bl_idname, icon='HELP', text='')
row = container.row(align=True)
row.operator(UVP2_OT_SetManualGroupIslandParam.bl_idname)
container.separator()
container.label(text="Select islands assigned to a group:")
row = container.row(align=True)
op = row.operator(UVP2_OT_SelectManualGroupIslandParam.bl_idname, text="Select")
op.select = True
op = row.operator(UVP2_OT_SelectManualGroupIslandParam.bl_idname, text="Deselect")
op.select = False
# container.separator()
# row = container.row(align=True)
# row.operator(UVP2_OT_ResetManualGroupIslandParam.bl_idname)
row = container.row(align=True)
row.operator(UVP2_OT_ShowManualGroupIslandParam.bl_idname)
class UVP2_PT_LockGroupsBase(UVP2_PT_Generic):
bl_label = 'Lock Groups'
bl_context = ''
bl_options = {'DEFAULT_CLOSED'}
def draw_specific(self, context):
layout = self.layout
panel_enabled = True
if not self.prefs.FEATURE_lock_overlapping:
layout.label(text=UvpLabels.FEATURE_NOT_SUPPORTED_MSG)
panel_enabled = False
col = layout.column(align=True)
col.enabled = panel_enabled
box = col.box()
row = box.row()
row.prop(self.scene_props, "lock_groups_enable")
container = col.column(align=True)
container.enabled = self.scene_props.lock_groups_enable
row = container.row(align=True)
row.prop(self.scene_props, "lock_group_num")
row = container.row(align=True)
row.operator(UVP2_OT_SetLockGroupIslandParam.bl_idname)
row = container.row(align=True)
row.operator(UVP2_OT_SetFreeLockGroupIslandParam.bl_idname)
container.separator()
container.label(text="Select islands assigned to a lock group:")
row = container.row(align=True)
op = row.operator(UVP2_OT_SelectLockGroupIslandParam.bl_idname, text="Select")
op.select = True
op = row.operator(UVP2_OT_SelectLockGroupIslandParam.bl_idname, text="Deselect")
op.select = False
# row = container.row(align=True)
# row.operator(UVP2_OT_SelectNonDefaultLockGroupIslandParam.bl_idname)
row = container.row(align=True)
row.operator(UVP2_OT_ResetLockGroupIslandParam.bl_idname)
row = container.row(align=True)
row.operator(UVP2_OT_ShowLockGroupIslandParam.bl_idname)
class UVP2_PT_HeuristicBase(UVP2_PT_Generic):
bl_label = 'Heuristic'
bl_context = ''
bl_options = {'DEFAULT_CLOSED'}
def draw_specific(self, context):
layout = self.layout
heurstic_supported, not_supported_msg = self.prefs.heuristic_supported(self.scene_props)
col = layout.column(align=True)
col.enabled = heurstic_supported
# Heuristic search
box = col.box()
box.enabled = self.prefs.FEATURE_heuristic_search
row = box.row()
self.handle_prop("heuristic_enable", heurstic_supported, not_supported_msg, row)
row.operator(UVP2_OT_HeuristicSearchHelp.bl_idname, icon='HELP', text='')
col2 = col.column(align=True)
col2.enabled = self.prefs.heuristic_enabled(self.scene_props)
row = col2.row(align=True)
row.prop(self.scene_props, "heuristic_search_time")
row = col2.row(align=True)
row.prop(self.scene_props, "heuristic_max_wait_time")
# Advanced Heuristic
box = col2.box()
box.enabled = self.prefs.advanced_heuristic_available(self.scene_props)
row = box.row()
self.handle_prop("advanced_heuristic", self.prefs.FEATURE_advanced_heuristic, UvpLabels.FEATURE_NOT_SUPPORTED_MSG, row)
class UVP2_PT_NonSquarePackingBase(UVP2_PT_Generic):
bl_label = 'Non-Square Packing'
bl_context = ''
bl_options = {'DEFAULT_CLOSED'}
def draw_specific(self, context):
layout = self.layout
col = layout.column(align=True)
# Tex ratio
box = col.box()
row = box.row()
self.handle_prop("tex_ratio", self.prefs.pack_ratio_supported(), UvpLabels.FEATURE_NOT_SUPPORTED_MSG, row)
col.separator()
row = col.row(align=True)
row.operator(UVP2_OT_AdjustIslandsToTexture.bl_idname)
row.operator(UVP2_OT_NonSquarePackingHelp.bl_idname, icon='HELP', text='')
row = col.row(align=True)
row.operator(UVP2_OT_UndoIslandsAdjustemntToTexture.bl_idname)
class UVP2_PT_AdvancedOptionsBase(UVP2_PT_Generic):
bl_label = 'Advanced Options'
bl_context = ''
bl_options = {'DEFAULT_CLOSED'}
def draw_specific(self, context):
layout = self.layout
demo_suffix = " (DEMO)" if self.prefs.FEATURE_demo else ''
col = layout.column(align=True)
# Grouped pack
box = col.box()
col2 = box.column(align=True)
col2.label(text=UvpLabels.PACK_MODE_NAME + ':')
row = col2.row(align=True)
row.prop(self.scene_props, "pack_mode", text='')
if self.prefs.tiles_enabled(self.scene_props):
row.operator(UVP2_OT_UdimSupportHelp.bl_idname, icon='HELP', text='')
box = col2.box()
box.prop(self.scene_props, "use_blender_tile_grid")
tile_col = col2.column(align=True)
tile_col.enabled = not self.scene_props.use_blender_tile_grid
if self.prefs.pack_to_tiles(self.scene_props):
tile_col.prop(self.scene_props, "tile_count")
tile_col.prop(self.scene_props, "tiles_in_row")
if self.prefs.pack_groups_together(self.scene_props):
row = col2.row(align=True)
row.prop(self.scene_props, "group_compactness")
if self.prefs.grouping_enabled(self.scene_props):
box = col.box()
col3 = box.column()
col3.label(text=UvpLabels.GROUP_METHOD_NAME + ':')
row = col3.row(align=True)
row.prop(self.scene_props, "group_method", text='')
if self.scene_props.group_method == UvGroupingMethod.MANUAL.code:
row.operator(UVP2_OT_ManualGroupingHelp.bl_idname, icon='HELP', text='')
# col2.separator()
# Pack to others
box = col.box()
pto_supported, pto_not_supported_msg = self.prefs.pack_to_others_supported(self.scene_props)
row = box.row()
self.handle_prop("pack_to_others", pto_supported, pto_not_supported_msg, row)
# Fixed Scale
box = col.box()
fs_supported, fs_not_supported_msg = self.prefs.fixed_scale_supported(self.scene_props)
row = box.row()
self.handle_prop("fixed_scale", fs_supported, fs_not_supported_msg, row)
# box = col.box()
col2 = box.column()
col2.enabled = self.prefs.fixed_scale_enabled(self.scene_props)
col2.label(text=UvpLabels.FIXED_SCALE_STRATEGY_NAME + ':')
row = col2.row(align=True)
row.prop(self.scene_props, "fixed_scale_strategy", text='')
# Normalize islands
box = col.box()
norm_supported, norm_not_supported_msg = self.prefs.normalize_islands_supported(self.scene_props)
row = box.row()
self.handle_prop("normalize_islands", norm_supported, norm_not_supported_msg, | |
from collections import namedtuple
from itertools import islice
import numpy as np
import pandas as pd
from dataclasses import dataclass
@dataclass
class BinningInfo(object):
"""Docstring for BinningInfo."""
variable_extents: tuple
step: float
num_bins: int
bin_indicies: np.ndarray
def build_spanning_grid_matrix(x_values, y_values, debug_print=False):
""" builds a 2D matrix with entries spanning x_values across axis 0 and spanning y_values across axis 1.
For example, used to build a grid of position points from xbins and ybins.
Usage:
all_positions_matrix, flat_all_positions_matrix, original_data_shape = build_all_positions_matrix(active_one_step_decoder.xbin_centers, active_one_step_decoder.ybin_centers)
"""
num_rows = len(y_values)
num_cols = len(x_values)
original_data_shape = (num_cols, num_rows) # original_position_data_shape: (64, 29)
if debug_print:
print(f'original_position_data_shape: {original_data_shape}')
x_only_matrix = np.repeat(np.expand_dims(x_values, 1).T, num_rows, axis=0).T
# np.shape(x_only_matrix) # (29, 64)
flat_x_only_matrix = np.reshape(x_only_matrix, (-1, 1))
if debug_print:
print(f'np.shape(x_only_matrix): {np.shape(x_only_matrix)}, np.shape(flat_x_only_matrix): {np.shape(flat_x_only_matrix)}') # np.shape(x_only_matrix): (64, 29), np.shape(flat_x_only_matrix): (1856, 1)
y_only_matrix = np.repeat(np.expand_dims(y_values, 1), num_cols, axis=1).T
# np.shape(y_only_matrix) # (29, 64)
flat_y_only_matrix = np.reshape(y_only_matrix, (-1, 1))
# flat_all_positions_matrix = np.array([np.append(an_x, a_y) for (an_x, a_y) in zip(flat_x_only_matrix, flat_y_only_matrix)])
flat_all_entries_matrix = [tuple(np.append(an_x, a_y)) for (an_x, a_y) in zip(flat_x_only_matrix, flat_y_only_matrix)] # a list of position tuples (containing two elements)
# reconsitute its shape:
all_entries_matrix = np.reshape(flat_all_entries_matrix, (original_data_shape[0], original_data_shape[1], 2))
if debug_print:
print(f'np.shape(all_positions_matrix): {np.shape(all_entries_matrix)}') # np.shape(all_positions_matrix): (1856, 2) # np.shape(all_positions_matrix): (64, 29, 2)
print(f'flat_all_positions_matrix[0]: {flat_all_entries_matrix[0]}\nall_positions_matrix[0,0,:]: {all_entries_matrix[0,0,:]}')
return all_entries_matrix, flat_all_entries_matrix, original_data_shape
def safe_get(list, index, fallback_value):
"""Similar to dict's .get(key, fallback) function but for lists. Returns a fallback/default value if the index is not valid for the list, otherwise returns the value at that index.
Args:
list (_type_): a list-like object
index (_type_): an index into the list
fallback_value (_type_): any value to be returned when the indexing fails
Returns:
_type_: the value in the list, or the fallback_value is the index is not valid for the list.
"""
try:
return list[index]
except IndexError:
return fallback_value
def safe_pandas_get_group(dataframe_group, key):
""" returns an empty dataframe if the key isn't found in the group."""
if key in dataframe_group.groups.keys():
return dataframe_group.get_group(key)
else:
original_df = dataframe_group.obj
return original_df.drop(original_df.index)
# class MatrixFlattenTransformer(object):
# """ Supposed to allow easy transformation of data from a flattened representation to the original.
# Usage:
# trans = MatrixFlattenTransformer(original_data_shape)
# test_all_positions_matrix = trans.unflatten(flat_all_positions_matrix)
# print(f'np.shape(test_all_positions_matrix): {np.shape(test_all_positions_matrix)}')
# """
# """ TODO: does not yet work. for MatrixFlattenTransformer."""
# def __init__(self, original_data_shape):
# super(MatrixFlattenTransformer, self).__init__()
# self.original_data_shape = original_data_shape
# def flatten(self, data):
# data_shape = np.shape(data)
# original_flat_shape = np.prod(self.original_data_shape)
# # assert np.shape(data) == self.original_data_shape, f"data passed in to flatten (with shape {np.shape(data)}) is not equal to the original data shape: {self.original_data_shape}"
# assert data_shape == original_flat_shape, f"data passed in to flatten (with shape {data_shape}) is not equal to the original shape's number of items (shape: {self.original_data_shape}, original_flat_shape: {original_flat_shape}"
# return np.reshape(data, (-1, 1))
# def unflatten(self, flat_data):
# flat_data_shape = np.shape(flat_data)
# original_data_shape_ndim = len(self.original_data_shape)
# # assert (flat_data_shape[:original_data_shape_ndim] == self.original_data_shape), f"data passed in to unflatten (with shape {flat_data_shape}) must match the original data shape ({self.original_data_shape}), at least up to the number of dimensions in the original"
# additional_dimensions = flat_data_shape[original_data_shape_ndim:]
# return np.reshape(flat_data, (self.original_data_shape[0], self.original_data_shape[1], *additional_dimensions))
def build_spanning_bins(variable_values, max_bin_size:float, debug_print=False):
""" DEPRICATED! out_digitized_variable_bins include both endpoints (bin edges)
Args:
variable_values ([type]): [description]
max_bin_size (float): [description]
debug_print (bool, optional): [description]. Defaults to False.
Returns:
out_digitized_variable_bins [type]: [description]
out_binning_info [BinningInfo]: contains info about how the binning was conducted
"""
raise DeprecationWarning
# compute extents:
curr_variable_extents = (np.nanmin(variable_values), np.nanmax(variable_values))
num_subdivisions = int(np.ceil((curr_variable_extents[1] - curr_variable_extents[0])/max_bin_size)) # get the next integer size above float_bin_size
actual_subdivision_step_size = (curr_variable_extents[1] - curr_variable_extents[0]) / float(num_subdivisions) # the actual exact size of the bin
if debug_print:
print(f'for max_bin_size: {max_bin_size} -> num_subdivisions: {num_subdivisions}, actual_subdivision_step_size: {actual_subdivision_step_size}')
# out_bin_indicies = np.arange(num_subdivisions)
out_binning_info = BinningInfo(curr_variable_extents, actual_subdivision_step_size, num_subdivisions, np.arange(num_subdivisions))
out_digitized_variable_bins = np.linspace(curr_variable_extents[0], curr_variable_extents[1], num_subdivisions, dtype=float)#.astype(float)
assert out_digitized_variable_bins[-1] == out_binning_info.variable_extents[1], "out_digitized_variable_bins[-1] should be the maximum variable extent!"
assert out_digitized_variable_bins[0] == out_binning_info.variable_extents[0], "out_digitized_variable_bins[0] should be the minimum variable extent!"
# All above arge the bin_edges
return out_digitized_variable_bins, out_binning_info
def compute_spanning_bins(variable_values, num_bins:int=None, bin_size:float=None):
"""[summary]
Args:
variable_values ([type]): [description]
num_bins (int, optional): [description]. Defaults to None.
bin_size (float, optional): [description]. Defaults to None.
debug_print (bool, optional): [description]. Defaults to False.
Raises:
ValueError: [description]
Returns:
[type]: [description]
Usage:
## Binning with Fixed Number of Bins:
xbin, ybin, bin_info = compute_spanning_bins(pos_df.x.to_numpy(), bin_size=active_config.computation_config.grid_bin[0]) # bin_size mode
print(bin_info)
## Binning with Fixed Bin Sizes:
xbin, ybin, bin_info = compute_spanning_bins(pos_df.x.to_numpy(), num_bins=num_bins) # num_bins mode
print(bin_info)
"""
assert (num_bins is None) or (bin_size is None), 'You cannot constrain both num_bins AND bin_size. Specify only one or the other.'
assert (num_bins is not None) or (bin_size is not None), 'You must specify either the num_bins XOR the bin_size.'
curr_variable_extents = (np.nanmin(variable_values), np.nanmax(variable_values))
if num_bins is not None:
## Binning with Fixed Number of Bins:
mode = 'num_bins'
xnum_bins = num_bins
xbin, xstep = np.linspace(curr_variable_extents[0], curr_variable_extents[1], num=num_bins, retstep=True) # binning of x position
elif bin_size is not None:
## Binning with Fixed Bin Sizes:
mode = 'bin_size'
xstep = bin_size
xbin = np.arange(curr_variable_extents[0], (curr_variable_extents[1] + xstep), xstep, ) # binning of x position
# the interval does not include this value, except in some cases where step is not an integer and floating point round-off affects the length of out.
xnum_bins = len(xbin)
else:
raise ValueError
return xbin, BinningInfo(curr_variable_extents, xstep, xnum_bins, np.arange(xnum_bins))
def compute_position_grid_size(*any_1d_series, num_bins:tuple):
""" Computes the required bin_sizes from the required num_bins (for each dimension independently)
Usage:
out_grid_bin_size, out_bins, out_bins_infos = compute_position_grid_size(curr_kdiba_pipeline.sess.position.x, curr_kdiba_pipeline.sess.position.y, num_bins=(64, 64))
active_grid_bin = tuple(out_grid_bin_size)
print(f'active_grid_bin: {active_grid_bin}') # (3.776841861770752, 1.043326930905373)
"""
assert (len(any_1d_series)) == len(num_bins), f'(len(other_1d_series)) must be the same length as the num_bins tuple! But (len(other_1d_series)): {(len(any_1d_series))} and len(num_bins): {len(num_bins)}!'
num_series = len(num_bins)
out_bins = []
out_bins_info = []
out_bin_grid_step_size = np.zeros((num_series,))
for i in np.arange(num_series):
xbins, xbin_info = compute_spanning_bins(any_1d_series[i], num_bins=num_bins[i])
out_bins.append(xbins)
out_bins_info.append(xbin_info)
out_bin_grid_step_size[i] = xbin_info.step
return out_bin_grid_step_size, out_bins, out_bins_info
def get_bin_centers(bin_edges):
""" For a series of 1D bin edges given by bin_edges, returns the center of the bins. Output will have one less element than bin_edges. """
return (bin_edges[:-1] + np.diff(bin_edges) / 2.0)
def get_bin_edges(bin_centers):
""" TODO: CHECK
For a series of 1D bin centers given by bin_centers, returns the edges of the bins.
Reciprocal of get_bin_centers(bin_edges)
"""
half_bin_width = float((bin_centers[1] - bin_centers[0])) / 2.0 # TODO: assumes fixed bin width
bin_starts = bin_centers - half_bin_width
bin_ends = bin_centers + half_bin_width
return interleave_elements(bin_starts, bin_ends)
def build_pairwise_indicies(target_indicies, debug_print=False):
""" Builds pairs of indicies from a simple list of indicies, for use in computing pairwise operations.
Example:
target_indicies = np.arange(5) # [0, 1, 2, 3, 4]
out_pair_indicies = build_pairwise_indicies(target_indicies)
> out_pair_indicies: [(0, 1), (1, 2), (2, 3), (3, 4)]
Args:
target_indicies ([type]): [description]
debug_print (bool, optional): [description]. Defaults to False.
Returns:
[type]: [description]
Usage:
target_indicies = np.arange(5)
out_pair_indicies = build_pairwise_indicies(target_indicies)
# out_pair_indicies = list(out_pair_indicies)
# print(f'out_pair_indicies: {list(out_pair_indicies)}')
print(f'out_pair_indicies: {list(out_pair_indicies)}')
for i, pair in enumerate(list(out_pair_indicies)):
# first_item_lap_idx, next_item_lap_idx
print(f'i: {i}, pair: {pair}')
"""
start_pairs = target_indicies[0:-1] # all but the last index
end_pairs = target_indicies[1:] # from the second to the last index
out_pair_indicies = list(zip(start_pairs, end_pairs)) # want to wrap in list so it isn't consumed
if debug_print:
print(f'target_indicies: {target_indicies}\nstart_pairs: {start_pairs}\nend_pairs: {end_pairs}')
return out_pair_indicies
def interleave_elements(start_points, end_points):
""" Given two equal sized arrays, produces an output array of double that size that contains elements of start_points interleaved with elements of end_points
Example:
a_starts = ['A','B','C','D']
a_ends = ['a','b','c','d']
a_interleaved = interleave_elements(a_starts, a_ends)
>> a_interleaved: ['A','a','B','b','C','c','D','d']
"""
assert np.shape(start_points) == np.shape(end_points), f"start_points and end_points must be the same shape. np.shape(start_points): {np.shape(start_points)}, np.shape(end_points): {np.shape(end_points)}"
start_points = np.atleast_2d(start_points)
end_points = np.atleast_2d(end_points)
all_points_shape = (np.shape(start_points)[0] * 2, np.shape(start_points)[1]) # it's double the length of the start_points
all_points = np.zeros(all_points_shape)
all_points[np.arange(0, all_points_shape[0], 2), :] = start_points # fill the even elements
all_points[np.arange(1, all_points_shape[0], 2), :] = end_points # fill the odd elements
assert np.shape(all_points)[0] == (np.shape(start_points)[0] * 2), f"newly created all_points is not of corrrect size! np.shape(all_points): {np.shape(all_points)}"
return all_points
def get_dict_subset(a_dict, included_keys=None, require_all_keys=False):
"""Gets a subset of a dictionary from a list of keys (included_keys)
Args:
a_dict ([type]): [description]
included_keys ([type], optional): [description]. Defaults to None.
require_all_keys: Bool, if True, requires all keys in included_keys to be in the dictionary (a_dict)
Returns:
[type]: [description]
| |
import random
import pygame
import constants
import utils
from graphics_environment import Environment, Triggers
import os, sys
import time
from graphics_fauna import Player, Npcs
from dialogs import DialogFight, DialogText, DialogPlayerInventory, \
DialogInput, DialogPlayerInfo, DialogText, DialogGoodbye, \
DialogUseItemInInventory, DialogShowQuests
from NEW_inventory import Conversation
# -----------------------------------------------------------
# class Game
# -----------------------------------------------------------
class Game:
def __init__(self):
user_info = utils.get_user_data()
self.player_name = user_info["character_name"]
self.zone_name = ""
self.map_name = ""
# ----
self.environment = None
self.npcs = None
self.player = None
self.screen = None
self.init_pygame()
# ----
self.all_sprites = pygame.sprite.Group()
self.keep_looping = True
self.current_monster = None
# -------------------------------------
# self.quest_histories = None
def read_data(self):
user_data = utils.get_user_data()
self.zone_name = user_data["zone_name"]
self.map_name = user_data["map_name"]
if self._exception01() == True: self._change01()
pygame.display.set_caption("Enter {} | ({})".format(constants.TITLE, self.map_name))
# print(user_data)
# ----
self.environment = Environment(self.zone_name, self.map_name)
self.environment.read_data()
self.npcs = Npcs(self.zone_name, self.map_name)
self.npcs.read_data()
self.player = Player(self.player_name, self.zone_name, self.map_name)
self.player.read_data()
# ----
if self.player.is_dead() == True:
myresult = self.player.resurrect_player()
# ----
if myresult == "y":
utils.copy_original_player_files(self.player.profession, self.player.name)
self.player.read_data()
self.init_pygame()
elif myresult == "n":
self.keep_looping = False
elif len(myresult) == 0:
# esc was pressed; I'm going to take it that the player wishes to quit.
self.keep_looping = False
else:
raise ValueError("Error!")
def init_pygame(self):
pygame.init()
self.BG_COLOR = constants.BG_COLOR
self.clock = pygame.time.Clock()
pygame.display.set_caption("Enter {}".format(constants.TITLE))
self.screen = pygame.display.set_mode((constants.SCREEN_WIDTH, constants.SCREEN_HEIGHT))
self.font = pygame.font.Font(None, 40)
# self.font = pygame.font.SysFont(constants.FONT_NAME, constants.FONT_SIZE)
def player_died(self):
s = "You're dead! Game over."
mydialog = DialogText(s)
mydialog.main()
self.player.image = self.player.image_dead
self.init_pygame()
self.keep_looping = False
def there_is_a_monster_on_this_tile(self, x, y):
this_monster = self.npcs.get_npc_if_monster(x, y)
if this_monster is None: return False
return True
# def there_is_an_angel_on_this_tile(self, x, y):
# raise NotImplemented
# this_angel = self.npcs.get_npc_if_angel(x, y)
# if this_angel is None: return False
# return True
# def this_npc_is_a_questgiver(self, x, y):
# current_npc = self.npcs.get_npc(self.player.x, self.player.y)
# if current_npc is None:
# s = "Error! There should be an NPC here, but there isn't."
# raise ValueError(s)
# if current_npc.is_a_questgiver(self.zone_name, self.map_name) == True:
# return True
# return False
def there_is_an_npc_on_this_tile(self, x, y):
this_npc = self.npcs.get_npc(x, y)
if this_npc is None: return False
return True
def there_is_an_action_on_this_tile(self, x, y):
print("Testing to see whether there is an action on tile x,y: ({},{})".format(x, y))
this_action = self.environment.actions.get_action(x, y)
if this_action is None: return False
return True
def there_is_a_persistent_object_on_this_tile(self, x, y):
if len(self.environment.persistents) == 0: return False
this_persistent = self.environment.persistents.get_persistent_object(x, y)
if this_persistent is None: return False
return True
def debugging_info(self):
# ---- Debugging (after) (top)----
mylist = []
mylist.append("character_name (from player): {}".format(self.player.name))
mylist.append("x,y: ({},{})".format(self.player.x, self.player.y))
mylist.append("zone_name: {}".format(self.zone_name))
mylist.append("map_name: {}".format(self.map_name))
mylist.append("------------")
mylist.append("From file:")
mydict = utils.get_user_data()
mylist.append("character_name: {}".format(mydict["character_name"]))
mylist.append("zone_name: {}".format(mydict["zone_name"]))
mylist.append("map_name: {}".format(mydict["map_name"]))
mylist.append("------------")
mylist.append("From Player:")
mylist.append("x,y: ({},{})".format(self.player.x, self.player.y))
mylist.append("zone_name: {}".format(self.player.zone_name))
mylist.append("map_name: {}".format(self.player.map_name))
print("******************** Debugging (begin) ********************")
print("-----------------------------------------------------------")
print(mylist)
print("---------------------------------------------------------")
print("******************** Debugging (end) ********************")
# ---- Debugging (after) (bottom) ----
def npc_encounter(self):
# The user wants an encounter with the NPC they clicked on.
current_npc = self.npcs.get_npc(self.player.x, self.player.y)
if current_npc is None:
s = "Error! There should be an NPC here, but there isn't."
raise ValueError(s)
print("This is the amount of gold the player has before he has the INTERACTION: {}".format(self.player.gold))
result, myinventory, player_gold = current_npc.have_interaction(self.environment.events, self.player)
# ----
# The following line is executed when the user exists out of an NPC encounter.
if result is None and myinventory is None: return False
if result is None or myinventory is None:
raise ValueError("Error")
print("This is the amount of gold the player has BEFORE: {}".format(self.player.gold))
self.player.gold = player_gold
print("This is the amount of gold the player has AFTER: {}".format(self.player.gold))
self.player.inventory = myinventory
# ----
if result == "end game":
self.player_died()
elif result == "load next map":
self.map_name = utils.get_next_map_name(self.map_name)
utils.set_user_data(self.player.name, self.zone_name, self.map_name, self.player.profession)
self.read_data()
elif result in ["end conversation", "continue", "completed"]:
# player goes on about their day
pass
else:
s = "I don't understand this: {}".format(result)
raise ValueError(s)
def handle_events(self):
# catch all events here
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.keep_looping = False
self.save_data()
return True
if event.type == pygame.KEYDOWN:
print("self.player coords: x,y: {},{}: ".format(self.player.x, self.player.y))
if event.key == pygame.K_ESCAPE:
self.keep_looping = False
self.save_data()
return True
if self.player.is_dead() == True: return False
if event.key == pygame.K_LEFT or event.key == pygame.K_a:
if self.player.move(dx=-1, dy=0, obstacles=self.environment.obstacles) == True:
# self.player.image = self.player.image_left
self.player.direction = constants.LEFT
self.player.my_update_image()
elif event.key == pygame.K_RIGHT or event.key == pygame.K_d:
if self.player.move(dx=1, obstacles=self.environment.obstacles) == True:
self.player.direction = constants.RIGHT
self.player.my_update_image()
elif event.key == pygame.K_DOWN or event.key == pygame.K_s:
if self.player.move(dy=1, obstacles=self.environment.obstacles) == True:
self.player.direction = constants.DOWN
self.player.my_update_image()
elif event.key == pygame.K_UP or event.key == pygame.K_w:
if self.player.move(dy=-1, obstacles=self.environment.obstacles) == True:
self.player.direction = constants.UP
self.player.my_update_image()
# ===============================================
elif event.key == pygame.K_h:
if self.there_is_an_npc_on_this_tile(self.player.x, self.player.y) == True:
message, self.player.inventory = self.npcs.do_something(self.player)
if message == "load next map":
print("current map name: {}".format(self.map_name))
self.map_name = utils.get_next_map_name(self.map_name)
print("moving to map name: {}".format(self.map_name))
self.load_map(zone_name=self.zone_name,
map_name=self.map_name,
dialog_text="")
elif self.there_is_an_action_on_this_tile(self.player.x, self.player.y) == True:
# After an action is used, the tile is removed.
print("There is an action on this tile.")
if self.environment.actions.conditions_passed(self.player) == False:
return False
current_action = self.environment.actions.get_action(self.player.x, self.player.y)
self.do_action(current_action)
self.environment.actions.remove_tile(self.player.x, self.player.y)
self.init_pygame()
self.all_sprites = pygame.sprite.Group()
return ""
elif self.there_is_a_persistent_object_on_this_tile(self.player.x, self.player.y) == True:
# After a persistent object is used, it remains.
# (I could probably just make 'persistent' and 'temporary' values of a
# field for an Actions object rather than make a separate class.)
print("This is a persistent object on this tile.")
# if self.environment.persistents.conditions_passed(self.player, self.environment.events) == False:
# return False
if self.environment.persistents.conditions_passed(self.player) == False:
return False
persistent_object = self.environment.persistents.get_persistent_object(self.player.x, self.player.y)
if persistent_object is None: raise ValueError("Error")
self.do_persistent(persistent_object)
self.init_pygame()
return ""
# ===============================================
elif event.key == pygame.K_i: # Inventory
if len(self.player.inventory) == 0:
raise ValueError("The player has lost their inventory!")
mydialog = DialogPlayerInventory(self.player)
mydialog.main()
self.init_pygame()
elif event.key == pygame.K_p:
mydialog = DialogPlayerInfo(self.player)
mydialog.main()
self.init_pygame()
# elif event.key == pygame.K_u:
# mydialog = DialogUseItemInInventory(self.player)
# mydialog.main()
# self.init_pygame()
elif event.key == pygame.K_q:
mydialog = DialogShowQuests()
mydialog.read_data()
mydialog.main()
else:
print("I don't recognize this event.key in handle_events: {}".format(event.key))
# ------------------------------------------------------
self.check_for_trigger()
def check_for_trigger(self):
if self.environment.triggers is None:
print("There is no TRIGGER on this tile.")
return False
current_trigger = self.environment.triggers.get_trigger(self.player.x, self.player.y)
if not current_trigger is None:
self.do_trigger(current_trigger)
# self.npcs.debug_print()
# raise NotImplemented
def show_text(self, current_trigger):
# print("debugging: in def load_map(self, current_trigger")
filename = "{}.txt".format(current_trigger.data)
filepath = os.path.join("data", "zones", self.zone_name, self.map_name, "texts", filename)
mydict = utils.read_file(filepath)[0]
for key, value in mydict.items():
# print(key, value)
mytextdialog = DialogText(value)
mytextdialog.main()
self.init_pygame()
def fire_attack(self, attack_strength):
self.player.hit_points -= attack_strength
if self.player.hit_points <= 0:
self.player_died()
def do_trigger(self, current_trigger):
# print("There is a current trigger")
# print("current_trigger.command = {}".format(current_trigger.command))
# current_trigger.debug_print()
if current_trigger.command == "load_map":
if current_trigger.conditions_fulfilled(self.zone_name,
self.map_name,
self.player.inventory) == False:
return False
# ----
self.save_data()
# print("************* dkdkkd")
zone_name, map_name = current_trigger.parse_data()
self.zone_name = zone_name
self.map_name = map_name
utils.set_user_data(self.player.name, self.zone_name, self.map_name, self.player.profession)
# ----
self.read_data()
elif current_trigger.command == "show_text":
self.show_text(current_trigger)
elif current_trigger.command == "fire_attack":
if not utils.is_int(current_trigger.data):
raise ValueError("Error")
self.fire_attack(int(current_trigger.data))
elif current_trigger.command == "fire_attack_big":
if not utils.is_int(current_trigger.data):
raise ValueError("Error")
self.fire_attack(int(current_trigger.data))
elif current_trigger.command == "change_npc_passive":
# check to make sure that the name given in data is that of an npc
# so check the npc_name_lookup.txt file in the zone directory.
# if this goes through then turn that npc passive.
if utils.npc_exists_in_zone(npc_name=current_trigger.data,
zone_name=self.zone_name,
map_name=self.map_name) == False:
s = "Error! That npc ({}) does not exist in this zone ({})."
s = s.format(current_trigger.data.replace(".txt", ""), self.zone_name)
raise ValueError(s)
raise NotImplemented
elif current_trigger.command == "change_npc_agro":
# check to make sure that the name given in data is that of an npc
# so check the npc_name_lookup.txt file in the zone directory.
# if this goes through then turn that npc agro.
if utils.npc_exists_in_zone(npc_name=current_trigger.data,
zone_name=self.zone_name,
map_name=self.map_name) == False:
s = "Error! That npc ({}) does not exist in this zone ({}).".format(current_trigger.data, self.zone_name)
raise ValueError(s)
the_npc = self.npcs.get_npc_by_name(current_trigger.data)
if the_npc is None:
s = "This name ({}) is not the name of a current npc.".format(current_trigger.data.replace(" ", "_"))
print(s)
return False
# raise ValueError(s)
the_npc.agro_level = "agro"
else:
current_trigger.debug_print()
s = "I couldn't find that: {}".format(current_trigger.command)
raise ValueError(s)
return True
def load_map(self, zone_name, map_name, dialog_text=""):
"""This loads both a zone and a map."""
if not zone_name in constants.ZONE_NAMES:
s | |
<filename>leiaapi/generated/api/application_admin_api.py<gh_stars>0
# coding: utf-8
"""
LEIA RESTful API for AI
Leia API # noqa: E501
OpenAPI spec version: 1.0.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from leiaapi.generated.api_client import ApiClient
class ApplicationAdminApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def admin_create_application(self, token, **kwargs): # noqa: E501
"""Adds a new application to the system (admin only) # noqa: E501
Adds a new application to the system. This method is only accessible to admins. An API key will be generated for the new application when calling this method. Note or store it carefully, it will not be recoverable after this call. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.admin_create_application(token, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: The login token obtained via GET /login/{api_key} (required)
:param Application body:
:return: Application
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.admin_create_application_with_http_info(token, **kwargs) # noqa: E501
else:
(data) = self.admin_create_application_with_http_info(token, **kwargs) # noqa: E501
return data
def admin_create_application_with_http_info(self, token, **kwargs): # noqa: E501
"""Adds a new application to the system (admin only) # noqa: E501
Adds a new application to the system. This method is only accessible to admins. An API key will be generated for the new application when calling this method. Note or store it carefully, it will not be recoverable after this call. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.admin_create_application_with_http_info(token, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: The login token obtained via GET /login/{api_key} (required)
:param Application body:
:return: Application
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['token', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method admin_create_application" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'token' is set
if ('token' not in params or
params['token'] is None):
raise ValueError("Missing the required parameter `token` when calling `admin_create_application`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'token' in params:
header_params['token'] = params['token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/admin/application', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Application', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def admin_delete_always_on_schedule(self, token, application_id, always_on_schedule_id, **kwargs): # noqa: E501
"""Removes a schedule from an application # noqa: E501
Removes a schedule from an application # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.admin_delete_always_on_schedule(token, application_id, always_on_schedule_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: The login token obtained via GET /login/{api_key} (required)
:param str application_id: The id of the application (required)
:param str always_on_schedule_id: The id of the schedule to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.admin_delete_always_on_schedule_with_http_info(token, application_id, always_on_schedule_id, **kwargs) # noqa: E501
else:
(data) = self.admin_delete_always_on_schedule_with_http_info(token, application_id, always_on_schedule_id, **kwargs) # noqa: E501
return data
def admin_delete_always_on_schedule_with_http_info(self, token, application_id, always_on_schedule_id, **kwargs): # noqa: E501
"""Removes a schedule from an application # noqa: E501
Removes a schedule from an application # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.admin_delete_always_on_schedule_with_http_info(token, application_id, always_on_schedule_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: The login token obtained via GET /login/{api_key} (required)
:param str application_id: The id of the application (required)
:param str always_on_schedule_id: The id of the schedule to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['token', 'application_id', 'always_on_schedule_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method admin_delete_always_on_schedule" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'token' is set
if ('token' not in params or
params['token'] is None):
raise ValueError("Missing the required parameter `token` when calling `admin_delete_always_on_schedule`") # noqa: E501
# verify the required parameter 'application_id' is set
if ('application_id' not in params or
params['application_id'] is None):
raise ValueError("Missing the required parameter `application_id` when calling `admin_delete_always_on_schedule`") # noqa: E501
# verify the required parameter 'always_on_schedule_id' is set
if ('always_on_schedule_id' not in params or
params['always_on_schedule_id'] is None):
raise ValueError("Missing the required parameter `always_on_schedule_id` when calling `admin_delete_always_on_schedule`") # noqa: E501
collection_formats = {}
path_params = {}
if 'application_id' in params:
path_params['application_id'] = params['application_id'] # noqa: E501
if 'always_on_schedule_id' in params:
path_params['always_on_schedule_id'] = params['always_on_schedule_id'] # noqa: E501
query_params = []
header_params = {}
if 'token' in params:
header_params['token'] = params['token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/admin/application/{application_id}/always_on_schedules/{always_on_schedule_id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def admin_delete_application(self, token, application_id, **kwargs): # noqa: E501
"""Deletes an application (admin only) # noqa: E501
Retrieves a new application from the system. This method is only accessible to admins # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.admin_delete_application(token, application_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: The login token obtained via GET /login/{api_key} (required)
:param str application_id: The id of the application to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.admin_delete_application_with_http_info(token, application_id, **kwargs) # noqa: E501
else:
(data) = self.admin_delete_application_with_http_info(token, application_id, **kwargs) # noqa: E501
return data
def admin_delete_application_with_http_info(self, token, application_id, **kwargs): # noqa: E501
"""Deletes an application (admin only) # noqa: E501
Retrieves a new application from the system. This method is only accessible to admins # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.admin_delete_application_with_http_info(token, application_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: The login token obtained via GET /login/{api_key} (required)
:param str application_id: The id of the application to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['token', 'application_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method admin_delete_application" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'token' is set
if ('token' not in params or
params['token'] is None):
raise ValueError("Missing the required parameter `token` when calling `admin_delete_application`") # noqa: E501
# verify the required parameter 'application_id' is set
if ('application_id' not in params or
params['application_id'] is None):
raise ValueError("Missing the required parameter `application_id` when calling `admin_delete_application`") # noqa: E501
collection_formats = {}
path_params = {}
if 'application_id' in params:
path_params['application_id'] | |
"""Library implementing convolutional neural networks.
Authors
* <NAME> 2020
* <NAME> 2020
* <NAME> 2021
* <NAME> 2021
"""
import math
import torch
import logging
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from typing import Tuple
logger = logging.getLogger(__name__)
class SincConv(nn.Module):
"""This function implements SincConv (SincNet).
<NAME>, <NAME>, "Speaker Recognition from raw waveform with
SincNet", in Proc. of SLT 2018 (https://arxiv.org/abs/1808.00158)
Arguments
---------
input_shape : tuple
The shape of the input. Alternatively use ``in_channels``.
in_channels : int
The number of input channels. Alternatively use ``input_shape``.
out_channels : int
It is the number of output channels.
kernel_size: int
Kernel size of the convolutional filters.
stride : int
Stride factor of the convolutional filters. When the stride factor > 1,
a decimation in time is performed.
dilation : int
Dilation factor of the convolutional filters.
padding : str
(same, valid, causal). If "valid", no padding is performed.
If "same" and stride is 1, output shape is the same as the input shape.
"causal" results in causal (dilated) convolutions.
padding_mode : str
This flag specifies the type of padding. See torch.nn documentation
for more information.
groups : int
This option specifies the convolutional groups. See torch.nn
documentation for more information.
bias : bool
If True, the additive bias b is adopted.
sample_rate : int,
Sampling rate of the input signals. It is only used for sinc_conv.
min_low_hz : float
Lowest possible frequency (in Hz) for a filter. It is only used for
sinc_conv.
min_low_hz : float
Lowest possible value (in Hz) for a filter bandwidth.
Example
-------
>>> inp_tensor = torch.rand([10, 16000])
>>> conv = SincConv(input_shape=inp_tensor.shape, out_channels=25, kernel_size=11)
>>> out_tensor = conv(inp_tensor)
>>> out_tensor.shape
torch.Size([10, 16000, 25])
"""
def __init__(
self,
out_channels,
kernel_size,
input_shape=None,
in_channels=None,
stride=1,
dilation=1,
padding="same",
padding_mode="reflect",
sample_rate=16000,
min_low_hz=50,
min_band_hz=50,
):
super().__init__()
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.padding = padding
self.padding_mode = padding_mode
self.sample_rate = sample_rate
self.min_low_hz = min_low_hz
self.min_band_hz = min_band_hz
# input shape inference
if input_shape is None and in_channels is None:
raise ValueError("Must provide one of input_shape or in_channels")
if in_channels is None:
in_channels = self._check_input_shape(input_shape)
# Initialize Sinc filters
self._init_sinc_conv()
def forward(self, x):
"""Returns the output of the convolution.
Arguments
---------
x : torch.Tensor (batch, time, channel)
input to convolve. 2d or 4d tensors are expected.
"""
x = x.transpose(1, -1)
self.device = x.device
unsqueeze = x.ndim == 2
if unsqueeze:
x = x.unsqueeze(1)
if self.padding == "same":
x = self._manage_padding(
x, self.kernel_size, self.dilation, self.stride
)
elif self.padding == "causal":
num_pad = (self.kernel_size - 1) * self.dilation
x = F.pad(x, (num_pad, 0))
elif self.padding == "valid":
pass
else:
raise ValueError(
"Padding must be 'same', 'valid' or 'causal'. Got %s."
% (self.padding)
)
sinc_filters = self._get_sinc_filters()
wx = F.conv1d(
x,
sinc_filters,
stride=self.stride,
padding=0,
dilation=self.dilation,
)
if unsqueeze:
wx = wx.squeeze(1)
wx = wx.transpose(1, -1)
return wx
def _check_input_shape(self, shape):
"""Checks the input shape and returns the number of input channels.
"""
if len(shape) == 2:
in_channels = 1
elif len(shape) == 3:
in_channels = 1
else:
raise ValueError(
"sincconv expects 2d or 3d inputs. Got " + str(len(shape))
)
# Kernel size must be odd
if self.kernel_size % 2 == 0:
raise ValueError(
"The field kernel size must be an odd number. Got %s."
% (self.kernel_size)
)
return in_channels
def _get_sinc_filters(self,):
"""This functions creates the sinc-filters to used for sinc-conv.
"""
# Computing the low frequencies of the filters
low = self.min_low_hz + torch.abs(self.low_hz_)
# Setting minimum band and minimum freq
high = torch.clamp(
low + self.min_band_hz + torch.abs(self.band_hz_),
self.min_low_hz,
self.sample_rate / 2,
)
band = (high - low)[:, 0]
# Passing from n_ to the corresponding f_times_t domain
self.n_ = self.n_.to(self.device)
self.window_ = self.window_.to(self.device)
f_times_t_low = torch.matmul(low, self.n_)
f_times_t_high = torch.matmul(high, self.n_)
# Left part of the filters.
band_pass_left = (
(torch.sin(f_times_t_high) - torch.sin(f_times_t_low))
/ (self.n_ / 2)
) * self.window_
# Central element of the filter
band_pass_center = 2 * band.view(-1, 1)
# Right part of the filter (sinc filters are symmetric)
band_pass_right = torch.flip(band_pass_left, dims=[1])
# Combining left, central, and right part of the filter
band_pass = torch.cat(
[band_pass_left, band_pass_center, band_pass_right], dim=1
)
# Amplitude normalization
band_pass = band_pass / (2 * band[:, None])
# Setting up the filter coefficients
filters = band_pass.view(self.out_channels, 1, self.kernel_size)
return filters
def _init_sinc_conv(self):
"""Initializes the parameters of the sinc_conv layer."""
# Initialize filterbanks such that they are equally spaced in Mel scale
high_hz = self.sample_rate / 2 - (self.min_low_hz + self.min_band_hz)
mel = torch.linspace(
self._to_mel(self.min_low_hz),
self._to_mel(high_hz),
self.out_channels + 1,
)
hz = self._to_hz(mel)
# Filter lower frequency and bands
self.low_hz_ = hz[:-1].unsqueeze(1)
self.band_hz_ = (hz[1:] - hz[:-1]).unsqueeze(1)
# Maiking freq and bands learnable
self.low_hz_ = nn.Parameter(self.low_hz_)
self.band_hz_ = nn.Parameter(self.band_hz_)
# Hamming window
n_lin = torch.linspace(
0, (self.kernel_size / 2) - 1, steps=int((self.kernel_size / 2))
)
self.window_ = 0.54 - 0.46 * torch.cos(
2 * math.pi * n_lin / self.kernel_size
)
# Time axis (only half is needed due to symmetry)
n = (self.kernel_size - 1) / 2.0
self.n_ = (
2 * math.pi * torch.arange(-n, 0).view(1, -1) / self.sample_rate
)
def _to_mel(self, hz):
"""Converts frequency in Hz to the mel scale.
"""
return 2595 * np.log10(1 + hz / 700)
def _to_hz(self, mel):
"""Converts frequency in the mel scale to Hz.
"""
return 700 * (10 ** (mel / 2595) - 1)
def _manage_padding(
self, x, kernel_size: int, dilation: int, stride: int,
):
"""This function performs zero-padding on the time axis
such that their lengths is unchanged after the convolution.
Arguments
---------
x : torch.Tensor
Input tensor.
kernel_size : int
Size of kernel.
dilation : int
Dilation used.
stride : int
Stride.
"""
# Detecting input shape
L_in = x.shape[-1]
# Time padding
padding = get_padding_elem(L_in, stride, kernel_size, dilation)
# Applying padding
x = F.pad(x, padding, mode=self.padding_mode)
return x
class Conv1d(nn.Module):
"""This function implements 1d convolution.
Arguments
---------
out_channels : int
It is the number of output channels.
kernel_size : int
Kernel size of the convolutional filters.
input_shape : tuple
The shape of the input. Alternatively use ``in_channels``.
in_channels : int
The number of input channels. Alternatively use ``input_shape``.
stride : int
Stride factor of the convolutional filters. When the stride factor > 1,
a decimation in time is performed.
dilation : int
Dilation factor of the convolutional filters.
padding : str
(same, valid, causal). If "valid", no padding is performed.
If "same" and stride is 1, output shape is the same as the input shape.
"causal" results in causal (dilated) convolutions.
groups: int
Number of blocked connections from input channels to output channels.
padding_mode : str
This flag specifies the type of padding. See torch.nn documentation
for more information.
skip_transpose : bool
If False, uses batch x time x channel convention of speechbrain.
If True, uses batch x channel x time convention.
Example
-------
>>> inp_tensor = torch.rand([10, 40, 16])
>>> cnn_1d = Conv1d(
... input_shape=inp_tensor.shape, out_channels=8, kernel_size=5
... )
>>> out_tensor = cnn_1d(inp_tensor)
>>> out_tensor.shape
torch.Size([10, 40, 8])
"""
def __init__(
self,
out_channels,
kernel_size,
input_shape=None,
in_channels=None,
stride=1,
dilation=1,
padding="same",
groups=1,
bias=True,
padding_mode="reflect",
skip_transpose=False,
):
super().__init__()
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.padding = padding
self.padding_mode = padding_mode
self.unsqueeze = False
self.skip_transpose = skip_transpose
if input_shape is None and in_channels is None:
raise ValueError("Must provide one of input_shape or in_channels")
if in_channels is None:
in_channels = self._check_input_shape(input_shape)
self.conv = nn.Conv1d(
in_channels,
out_channels,
self.kernel_size,
stride=self.stride,
dilation=self.dilation,
padding=0,
groups=groups,
bias=bias,
)
def forward(self, x):
"""Returns the output of the convolution.
Arguments
---------
x : torch.Tensor (batch, time, channel)
input to convolve. 2d or 4d tensors are expected.
"""
if not self.skip_transpose:
x = x.transpose(1, -1)
if self.unsqueeze:
x = x.unsqueeze(1)
if self.padding == "same":
x = self._manage_padding(
x, self.kernel_size, self.dilation, self.stride
)
elif self.padding == "causal":
num_pad = (self.kernel_size - 1) * self.dilation
x = F.pad(x, (num_pad, 0))
elif self.padding == "valid":
pass
else:
raise ValueError(
"Padding | |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import torch
import numpy as np
import matplotlib.pyplot as plt
import csv
from PIL import Image
import matplotlib as mpl
from tqdm import tqdm
from sklearn.manifold import TSNE
import umap
from sklearn.metrics import silhouette_score , silhouette_samples
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
# # Visualizing the Disregarding classes
#
# ### Load data
# In[2]:
def access_data(letter,shot):
feat = torch.load('features'+letter+str(shot),map_location=torch.device('cpu'))
classifier= torch.load('classifier'+letter,map_location=torch.device('cpu'))
accuracy = torch.load('complete_class_accuracy'+letter+str(shot)+'shots',map_location=torch.device('cpu'))
idx = torch.load('complete_class_accuracy'+letter+'idx'+str(shot)+'shots',map_location=torch.device('cpu'))
return feat,classifier,accuracy,idx
# In[3]:
shot=5
letter='A'
feat,classifier,acc,idx = access_data(letter,shot)
print(acc.shape)
print(feat.shape)
print(classifier.shape)
print(idx.shape)
# In[4]:
shot=5
letter='B'
featB,classifierB,accB,idxB = access_data(letter,shot)
print(accB.shape)
print(featB.shape)
print(classifierB.shape)
print(idxB.shape)
# In[5]:
base_mean = feat[:64].mean(-2)
base_meanB = featB[:64].mean(-2)
print(base_mean.shape)
# In[6]:
def sphering(features):
return features / torch.norm(features, p = 2, dim = 2, keepdim = True)
def centering(train_features, features):
return features - train_features.reshape(-1, train_features.shape[2]).mean(dim = 0).unsqueeze(0).unsqueeze(0)
feat_processed = sphering(centering(sphering(feat)[:64],sphering(feat) ))
# In[7]:
def proj_class(i,test_features,letter='A'):
if letter=='A':
#one projection per 64 clesses on miniimagenet
w=base_mean[i] #select weights of the i-th class
else:
w=base_meanB[i]
proj = torch.matmul(test_features,w)/ torch.norm(w)**2 #get coef of projection and normalize
try:
projection_ortho = proj.unsqueeze(-1).repeat(1,640)
except:
projection_ortho = proj.unsqueeze(-1).repeat(1,1,640)
projection_ortho = projection_ortho * w #vector of projection along w
projection_ortho = test_features - projection_ortho #projection on the orthogonal space of w
return projection_ortho
# In[8]:
filenametrain = '/home/r21lafar/Documents/dataset/miniimagenetimages/train.csv'
filenametest = '/home/r21lafar/Documents/dataset/miniimagenetimages/test.csv'
directory = '/home/r21lafar/Documents/dataset/miniimagenetimages/images/'
def opencsv(filename):
file = open(filename)
csvreader = csv.reader(file)
header = []
header = next(csvreader)
print(header)
rowstrain = []
rows = []
for row in csvreader:
rows.append(row)
return rows
test = opencsv(filenametest)
train = opencsv(filenametrain)
def openimg(cl,title):
if cl<64:
src=train
if cl>=80:
src=test
cl-=80
if type(cl)==int:
plt.figure(figsize=(5,5))
idx=int((cl+0.5)*600)+np.random.randint(-100,100)
filename=src[idx][0]
im = Image.open(directory +filename)
plt.title(title)
plt.imshow(np.array(im))
# In[9]:
def distance_from_base(proj,run,plot=False,letter='A'):
if letter=='A':
fs_run = feat[acc[0,0,run].long()]
else:
fs_run = featB[acc[0,0,run].long()]
if proj==-1 and run ==-1:
if letter=='A':
proto_fs = feat[-20:].mean(1)
else:
proto_fs = featB[-20:].mean(1)
else:
fs_run = torch.gather(fs_run,dim=1,index=idx[0,run].unsqueeze(-1).repeat(1,1,640).long())
proto_fs = fs_run[:,:shot].mean(1)
if proj!=0:
proto_fs=proj_class(proj-1,proto_fs,letter=letter)
if letter=='A':
D = torch.cdist(proto_fs,base_mean)
else:
D = torch.cdist(proto_fs,base_meanB)
if plot:
plt.figure()
plt.imshow(D.detach().numpy(),aspect='auto')
plt.colorbar()
plt.title('distance between FS class mean and base class '+letter+' mean \n (whole base dataset) projection ' +str(proj) + ' (0 is no projection)')
plt.xlabel('64 base class mean')
plt.ylabel('FS prototype of class')
return D
# ## Create FS scenarii or runs
# ### 2 ways
# In[10]:
n_runs, batch_few_shot_runs = 20,10
n_ways=2
def ncm(train_features, features, run_classes, run_indices, n_shots,i_proj):
with torch.no_grad():
dim = features.shape[2]
targets = torch.arange(n_ways).unsqueeze(1).unsqueeze(0)
#features = preprocess(train_features, features)
scores = []
score=0
for batch_idx in range(n_runs // batch_few_shot_runs):
runs = generate_runs(features, run_classes, run_indices, batch_idx)
means = torch.mean(runs[:,:,:n_shots], dim = 2)
var_intra = runs[:,:,:n_shots].var(2).mean(-1)
var_inter = runs[:,:,:n_shots].mean(2).var(1).mean(-1).unsqueeze(1)
var = torch.cat((var_intra,var_inter),dim=1)
distances = torch.norm(runs[:,:,n_shots:].reshape(batch_few_shot_runs, n_ways, 1, -1, dim) - means.reshape(batch_few_shot_runs, 1, n_ways, 1, dim), dim = 4, p = 2)
winners = torch.min(distances, dim = 2)[1]
accuracy = (winners == targets)
if batch_idx==0:
full_accuracy=accuracy
full_mean=means
full_var = var
else:
full_accuracy=torch.cat((full_accuracy,accuracy),dim=0)
full_mean=torch.cat((full_mean,means),dim=0)
full_var=torch.cat((full_var,var),dim=0)
return full_accuracy,full_mean,full_var
def generate_runs(data, run_classes, run_indices, batch_idx):
n_runs, n_ways, n_samples = run_classes.shape[0], run_classes.shape[1], run_indices.shape[2]
run_classes = run_classes[batch_idx * batch_few_shot_runs : (batch_idx + 1) * batch_few_shot_runs]
run_indices = run_indices[batch_idx * batch_few_shot_runs : (batch_idx + 1) * batch_few_shot_runs]
run_classes = run_classes.unsqueeze(2).unsqueeze(3).repeat(1,1,data.shape[1], data.shape[2])
run_indices = run_indices.unsqueeze(3).repeat(1, 1, 1, data.shape[2])
datas = data.unsqueeze(0).repeat(batch_few_shot_runs, 1, 1, 1)
cclasses = torch.gather(datas, 1, run_classes.to(torch.int64))
res = torch.gather(cclasses, 2, run_indices)
return res
def define_runs(n_ways, n_shots, n_queries, num_classes, elements_per_class):
shuffle_classes = torch.LongTensor(np.arange(num_classes))
run_classes = torch.LongTensor(n_runs, n_ways)
run_indices = torch.LongTensor(n_runs, n_ways, n_shots + n_queries)
for i in range(n_runs):
run_classes[i] = torch.randperm(num_classes)[:n_ways]
for j in range(n_ways):
run_indices[i,j] = torch.randperm(elements_per_class[run_classes[i, j]])[:n_shots + n_queries]
return run_classes, run_indices
run_classes, run_indices = define_runs(n_ways, 5, 500,20, [600 for i in range(20)])
# In[11]:
A,_,full_var = ncm(feat[:64], feat[-20:], run_classes, run_indices, 5,0)
B,_,full_var = ncm(featB[:64], featB[-20:],run_classes, run_indices, 5,0)
plt.plot(A.float().mean(-1).mean(-1),label='backbone A')
plt.plot(B.float().mean(-1).mean(-1),label='backbone B')
plt.legend()
plt.xlabel('run')
plt.ylabel('accuracy')
plt.title('no projection')
# In[12]:
for i in tqdm(range(65)):
if i!=0:
feature=proj_class(i-1,feat,'A')
featureB=proj_class(i-1,featB,'B')
else:
feature =feat
featureB =featB
A,meanA,varA = ncm(feature[:64], feature[-20:], run_classes, run_indices, 5,0)
B,meanB,varB = ncm(featureB[:64], featureB[-20:],run_classes, run_indices, 5,0)
if i==0:
fullA = A.unsqueeze(0)
fullB = B.unsqueeze(0)
fullmeanA = meanA.unsqueeze(0)
fullmeanB = meanB.unsqueeze(0)
fullvarA = varA.unsqueeze(0)
fullvarB = varB.unsqueeze(0)
else:
fullA = torch.cat((fullA, A.unsqueeze(0)) ,dim = 0)
fullB = torch.cat((fullB, B.unsqueeze(0)) ,dim = 0)
fullmeanA = torch.cat((fullmeanA, meanA.unsqueeze(0)) ,dim = 0)
fullmeanB = torch.cat((fullmeanB, meanB.unsqueeze(0)) ,dim = 0)
fullvarA = torch.cat((fullvarA, varA.unsqueeze(0)) ,dim = 0)
fullvarB = torch.cat((fullvarB, varB.unsqueeze(0)) ,dim = 0)
# In[12]:
def what_proj(run):
return fullA[:,run].float().mean(-1).mean(-1).argsort()-1
# In[13]:
fullA[0,2,0].float().mean(-1)
# In[14]:
run=0
fullvarA[0,run,:2].mean(-1)-fullvarA[0,run,2]
# In[15]:
for prj in [0,1,2,3]:
plt.plot(fullvarA[prj,:,:2].mean(-1)-fullvarA[prj,:,2],fullA[prj,:,:].float().mean(-1).mean(-1),'.',label='projection '+ str(prj))
plt.xlabel('intraclass var -(minus)- interclass var')
plt.ylabel('accuracy of run')
plt.legend()
plt.title('20 runs')
# In[16]:
best_boost =fullA.float().mean(-1).mean(-1).max(0)[0] - fullA[0,:,:].float().mean(-1).mean(-1)
worst_boost =fullA.float().mean(-1).mean(-1).min(0)[0] - fullA[0,:,:].float().mean(-1).mean(-1)
# In[17]:
best_boost_id = fullA[:,:,:].float().mean(-1).mean(-1).max(0)[1]
worst_boost_id = fullA[:,:,:].float().mean(-1).mean(-1).min(0)[1]
# In[18]:
intrater = fullvarA[:,:,:2].mean(-1)-fullvarA[:,:,2]
intrater_min = intrater.min(0)[1]
intrater_max = intrater.max(0)[1]
# In[19]:
boost = torch.zeros(intrater_min.shape)
for i in range(intrater_min.shape[0]):
boost[i] = fullA[intrater_min[i],i].float().mean(-1).mean(-1)-fullA[0,i].float().mean(-1).mean(-1)
# In[20]:
boost_max = torch.zeros(intrater_min.shape)
for i in range(intrater_min.shape[0]):
boost_max[i] = fullA[intrater_max[i],i].float().mean(-1).mean(-1)-fullA[0,i].float().mean(-1).mean(-1)
# In[21]:
fullA.shape
# In[ ]:
# In[22]:
plt.hlines(y=0 ,xmin=0,xmax = 20)
plt.plot(boost,'.',label='proj with min intra - inter')
plt.plot(boost_max,'.',label='proj with max intra - inter')
plt.plot(best_boost,'.',label='best boost')
plt.xlabel('run')
plt.ylabel('boost')
plt.legend()
# In[23]:
intrater_best_boost = torch.zeros(intrater_min.shape)
for i in range(intrater_min.shape[0]):
intrater_best_boost[i] = intrater[best_boost_id[i],i]
# In[24]:
plt.plot(intrater_best_boost,'.', label = 'best boost')
plt.plot(intrater.mean(0),'.', label = 'mean intra -inter')
plt.plot(intrater.min(0)[0],'.', label = 'minimum intra -inter')
plt.plot(intrater.max(0)[0],'.', label = 'maximum intra -inter')
plt.ylabel('intra-class - interclass variance')
plt.xlabel('run')
plt.legend()
# In[25]:
intrater.min(dim=0)[0]
# In[ ]:
# In[26]:
get_ipython().run_line_magic('matplotlib', 'inline')
run = 12
nb_sample=30
mk_size=4
plt.figure()
plt.plot(fullA[:,run].float().mean(-1).mean(-1))
plt.figure()
plt.plot(fullvarA[:,run].float().mean(-1).mean(-1))
FULLumap = torch.cat((base_mean,fullmeanA[0,run],feat[80+run_classes[run],:nb_sample].reshape(n_ways*nb_sample,640) ))
umapA=umap.UMAP().fit_transform(FULLumap)
plt.figure()
plt.plot(umapA[:64,0],umapA[:64,1],'o',label='base', c='b')
plt.plot(umapA[64,0],umapA[64,1],'*',label='proto 0', c='purple',markersize=20)
plt.plot(umapA[65,0],umapA[65,1],'*',label='proto 1', c='k',markersize=20)
plt.plot(umapA[69:69+nb_sample,0],umapA[64+5:69+nb_sample,1],'.',label='samples 0',markersize=mk_size, c='purple')
plt.plot(umapA[64+5+nb_sample:69+nb_sample*2,0],umapA[64+5+nb_sample:69+nb_sample*2,1],'.',label='samples 1',markersize=mk_size, c='k')
plt.legend()
boost = fullA[:,run].float().mean(-1).mean(-1)-fullA[0,run].float().mean(-1).mean(-1)
example = what_proj(run)
signboost = boost>=0.
label = [str(i) for i in range(65)]
couleur = ['red','green']
for i in range(len(label)):
plt.annotate(label[i], (umapA[example[i],0], umapA[example[i],1]), color = couleur[signboost[example[i]]*1])
# In[27]:
get_ipython().run_line_magic('matplotlib', 'inline')
run = 0
plt.plot(fullA[:,run].float().mean(-1).mean(-1),label='backbone A')
plt.plot(fullB[:,run].float().mean(-1).mean(-1),label='backbone B')
plt.legend()
plt.xlabel('projection')
plt.ylabel('accuracy')
print(fullA[:,run].shape)
# In[28]:
feat.shape
# In[29]:
nb_samples = 100
feat_sil = feat[:,:nb_samples].reshape(-1,640)
# In[30]:
labels = torch.arange(0,100).unsqueeze(1).repeat(1,nb_samples).reshape(-1)
# In[31]:
sil = silhouette_samples(feat_sil,labels)
# In[32]:
sil_r = sil.reshape(100,nb_samples)
# In[33]:
plt.plot(sil_r.mean(1),'.')
plt.xlabel('class')
plt.ylabel('silhouette')
plt.vlines(x=64,ymin=sil_r.mean(1).min(),ymax = sil_r.mean(1).max())
plt.vlines(x=64+20,ymin=sil_r.mean(1).min(),ymax = sil_r.mean(1).max())
# In[34]:
feat.shape
# In[35]:
plt.plot(feat.var(1).mean(1),'.',label='intra class variance')
plt.hlines(y=feat.mean(1).var(0).mean(),xmin=0,xmax=100,label='interclass variance')
plt.legend()
plt.xlabel('class')
plt.ylabel('mean variance over features')
plt.title('whole dataset')
# ## Project on aligning vector
# In[11]:
def proj_vec(v_proj):
proj = torch.matmul(features,v_proj)/ torch.norm(v_proj)**2 #get coef of projection and normalize
return proj
# In[12]:
run_classes, run_indices = define_runs(n_ways, 5, 500,20, [600 for i in range(20)])
A,full_meanA,full_varA = ncm(feat[:64], feat[-20:], run_classes, run_indices, 5,0)
B,full_meanB,full_var = ncm(featB[:64], featB[-20:],run_classes, run_indices, 5,0)
# In[13]:
def ncm_proj(train_features, features, run_classes, run_indices, n_shots):
with torch.no_grad():
dim = features.shape[2]
targets = torch.arange(n_ways).unsqueeze(1).unsqueeze(0)
#features = preprocess(train_features, features)
scores = []
score=0
for batch_idx in range(n_runs // batch_few_shot_runs):
runs = generate_runs(features, run_classes, run_indices, batch_idx)
means = torch.mean(runs[:,:,:n_shots], dim = 2)
v_diff = (means[:,0]-means[:,1])
#v_diff = torch.randn(means[:,0].shape)
var_intra = runs[:,:,:n_shots].var(2).mean(-1)
var_inter = runs[:,:,:n_shots].mean(2).var(1).mean(-1).unsqueeze(1)
var = torch.cat((var_intra,var_inter),dim=1)
proj_means =torch.zeros(means[:,:,0].shape)
proj_runs =torch.zeros(runs[:,:,:,0].shape)
for i in range(batch_few_shot_runs):
proj_runs[i] = torch.matmul(v_diff[i], torch.swapaxes(runs[i],-1,-2))
proj_means[i] = torch.matmul(v_diff[i], torch.swapaxes(means[i],-1,-2))
distances = torch.norm(proj_runs[:,:,n_shots:].reshape(batch_few_shot_runs, n_ways, 1, -1, 1) - proj_means.reshape(batch_few_shot_runs, 1, n_ways, 1, 1), dim = 4, p = 2)
winners = torch.min(distances, dim = 2)[1]
accuracy = (winners == targets)
if batch_idx==0:
full_accuracy=accuracy
full_mean=means
full_var = var
else:
full_accuracy=torch.cat((full_accuracy,accuracy),dim=0)
full_mean=torch.cat((full_mean,means),dim=0)
full_var=torch.cat((full_var,var),dim=0)
return full_accuracy,full_mean,full_var
# In[ ]:
n_runs = 1000
run_classes, run_indices = define_runs(n_ways, 5, 500,20, [600 for i in range(20)])
n_shots=5
# In[105]:
a,b,c = ncm_proj(feat[:64], feat[-20:], run_classes, run_indices, n_shots)
print(a.float().mean())
# In[104]:
a,b,c = ncm(feat[:64], feat[-20:], run_classes, run_indices, n_shots,0)
print(a.float().mean())
# Avec et sans projection sur l'axe reliant les deux protopypes ou templates. La performance reste la même
# ## test suppression de classe orthogonale à v_diff
# In[101]:
def ncm_del_otho_vdif(train_features, features, run_classes, run_indices, n_shots):
with torch.no_grad():
dim = features.shape[2]
targets = torch.arange(n_ways).unsqueeze(1).unsqueeze(0)
#features = preprocess(train_features, features)
scores = []
score=0
for batch_idx in tqdm(range(n_runs // batch_few_shot_runs)):
runs = generate_runs(features, run_classes, run_indices, batch_idx)
for i in range(3):
runs,means = remove_the_class(runs)
var_intra = runs[:,:,:n_shots].var(2).mean(-1)
var_inter = runs[:,:,:n_shots].mean(2).var(1).mean(-1).unsqueeze(1)
var = torch.cat((var_intra,var_inter),dim=1)
distances = torch.norm(runs[:,:,n_shots:].reshape(batch_few_shot_runs, n_ways, 1, -1, dim) - means.reshape(batch_few_shot_runs, 1, n_ways, 1, dim), dim = 4, p = 2)
winners = torch.min(distances, dim = 2)[1]
accuracy = (winners == targets)
if batch_idx==0:
full_accuracy=accuracy
full_mean=means
full_var = var
else:
full_accuracy=torch.cat((full_accuracy,accuracy),dim=0)
full_mean=torch.cat((full_mean,means),dim=0)
full_var=torch.cat((full_var,var),dim=0)
return full_accuracy,full_mean,full_var
def remove_the_class(runs):
means = torch.mean(runs[:,:,:n_shots], dim = 2)
v_diff = (means[:,0]-means[:,1]) #axis between proto 0 and proto 1
proj_base = torch.zeros(batch_few_shot_runs,base_mean.shape[0])
for j in range(batch_few_shot_runs):
for i in range(base_mean.shape[0]):
w = base_mean[i]
proj_base[j,i] = torch.torch.matmul(v_diff[j], w)/torch.norm(w)
id_proj = abs(proj_base).min(1)[1]
for j in range(batch_few_shot_runs):
runs[j] = proj_class(id_proj[j],runs[j])
means = torch.mean(runs[:,:,:n_shots], dim = 2)
return runs,means
# In[ ]:
# In[99]:
a,b,c = ncm_del_otho_vdif(feat[:64], feat[-20:], run_classes, run_indices, n_shots)
print(a.float().mean().item())
# In[100]:
a,b,c = ncm(feat_processed[:64], feat_processed[-20:], run_classes, run_indices, n_shots,0)
print(a.float().mean())
a,b,c = ncm_del_otho_vdif(feat_processed[:64], feat_processed[-20:], run_classes, run_indices, n_shots)
print(a.float().mean())
# ## Test du LDA / shrinkage
# In[15]:
def LDA(run,**kwargs):
s = run.shape
s_shots = run[:,:n_shots].shape
run_reshaped = run.reshape(s[0]*s[1],-1)
run_reshaped_shots = run[:,:n_shots].reshape(s_shots[0]*s_shots[1],-1)
target = torch.cat((torch.zeros(n_shots),torch.ones(n_shots)))
clf = LinearDiscriminantAnalysis(**kwargs)
clf.fit(run_reshaped_shots, target)
out = torch.tensor(clf.transform(run_reshaped))
out = out.reshape((s[0],s[1],-1))
return out
# In[16]:
def ncm_lda(train_features, features, run_classes, run_indices, n_shots,dictlda):
with torch.no_grad():
dim = features.shape[2]
targets = torch.arange(n_ways).unsqueeze(1).unsqueeze(0)
#features = preprocess(train_features, features)
scores = []
score=0
for batch_idx in tqdm(range(n_runs // batch_few_shot_runs)):
runs = generate_runs(features, run_classes, run_indices, batch_idx)
runs_reduced = torch.zeros((runs.shape[0],runs.shape[1],runs.shape[2],n_components))
for i,run in enumerate(runs):
runs_reduced[i] = LDA(run,**dictlda)
means = torch.mean(runs_reduced[:,:,:n_shots], dim = 2)
distances = torch.norm(runs_reduced[:,:,n_shots:].reshape(batch_few_shot_runs, n_ways, 1, -1, n_components) | |
# -*- coding: utf-8 -*-
"""
This module
"""
import attr
import typing
from ..core.model import (
Property, Resource, Tag, GetAtt, TypeHint, TypeCheck,
)
from ..core.constant import AttrMeta
#--- Property declaration ---
@attr.s
class PropDeliveryStreamAmazonopensearchserviceRetryOptions(Property):
"""
AWS Object Type = "AWS::KinesisFirehose::DeliveryStream.AmazonopensearchserviceRetryOptions"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-amazonopensearchserviceretryoptions.html
Property Document:
- ``p_DurationInSeconds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-amazonopensearchserviceretryoptions.html#cfn-kinesisfirehose-deliverystream-amazonopensearchserviceretryoptions-durationinseconds
"""
AWS_OBJECT_TYPE = "AWS::KinesisFirehose::DeliveryStream.AmazonopensearchserviceRetryOptions"
p_DurationInSeconds: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "DurationInSeconds"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-amazonopensearchserviceretryoptions.html#cfn-kinesisfirehose-deliverystream-amazonopensearchserviceretryoptions-durationinseconds"""
@attr.s
class PropDeliveryStreamHiveJsonSerDe(Property):
"""
AWS Object Type = "AWS::KinesisFirehose::DeliveryStream.HiveJsonSerDe"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-hivejsonserde.html
Property Document:
- ``p_TimestampFormats``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-hivejsonserde.html#cfn-kinesisfirehose-deliverystream-hivejsonserde-timestampformats
"""
AWS_OBJECT_TYPE = "AWS::KinesisFirehose::DeliveryStream.HiveJsonSerDe"
p_TimestampFormats: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "TimestampFormats"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-hivejsonserde.html#cfn-kinesisfirehose-deliverystream-hivejsonserde-timestampformats"""
@attr.s
class PropDeliveryStreamSchemaConfiguration(Property):
"""
AWS Object Type = "AWS::KinesisFirehose::DeliveryStream.SchemaConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-schemaconfiguration.html
Property Document:
- ``p_CatalogId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-schemaconfiguration.html#cfn-kinesisfirehose-deliverystream-schemaconfiguration-catalogid
- ``p_DatabaseName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-schemaconfiguration.html#cfn-kinesisfirehose-deliverystream-schemaconfiguration-databasename
- ``p_Region``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-schemaconfiguration.html#cfn-kinesisfirehose-deliverystream-schemaconfiguration-region
- ``p_RoleARN``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-schemaconfiguration.html#cfn-kinesisfirehose-deliverystream-schemaconfiguration-rolearn
- ``p_TableName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-schemaconfiguration.html#cfn-kinesisfirehose-deliverystream-schemaconfiguration-tablename
- ``p_VersionId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-schemaconfiguration.html#cfn-kinesisfirehose-deliverystream-schemaconfiguration-versionid
"""
AWS_OBJECT_TYPE = "AWS::KinesisFirehose::DeliveryStream.SchemaConfiguration"
p_CatalogId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "CatalogId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-schemaconfiguration.html#cfn-kinesisfirehose-deliverystream-schemaconfiguration-catalogid"""
p_DatabaseName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "DatabaseName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-schemaconfiguration.html#cfn-kinesisfirehose-deliverystream-schemaconfiguration-databasename"""
p_Region: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Region"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-schemaconfiguration.html#cfn-kinesisfirehose-deliverystream-schemaconfiguration-region"""
p_RoleARN: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "RoleARN"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-schemaconfiguration.html#cfn-kinesisfirehose-deliverystream-schemaconfiguration-rolearn"""
p_TableName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "TableName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-schemaconfiguration.html#cfn-kinesisfirehose-deliverystream-schemaconfiguration-tablename"""
p_VersionId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "VersionId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-schemaconfiguration.html#cfn-kinesisfirehose-deliverystream-schemaconfiguration-versionid"""
@attr.s
class PropDeliveryStreamSplunkRetryOptions(Property):
"""
AWS Object Type = "AWS::KinesisFirehose::DeliveryStream.SplunkRetryOptions"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-splunkretryoptions.html
Property Document:
- ``p_DurationInSeconds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-splunkretryoptions.html#cfn-kinesisfirehose-deliverystream-splunkretryoptions-durationinseconds
"""
AWS_OBJECT_TYPE = "AWS::KinesisFirehose::DeliveryStream.SplunkRetryOptions"
p_DurationInSeconds: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "DurationInSeconds"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-splunkretryoptions.html#cfn-kinesisfirehose-deliverystream-splunkretryoptions-durationinseconds"""
@attr.s
class PropDeliveryStreamHttpEndpointConfiguration(Property):
"""
AWS Object Type = "AWS::KinesisFirehose::DeliveryStream.HttpEndpointConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-httpendpointconfiguration.html
Property Document:
- ``rp_Url``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-httpendpointconfiguration.html#cfn-kinesisfirehose-deliverystream-httpendpointconfiguration-url
- ``p_AccessKey``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-httpendpointconfiguration.html#cfn-kinesisfirehose-deliverystream-httpendpointconfiguration-accesskey
- ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-httpendpointconfiguration.html#cfn-kinesisfirehose-deliverystream-httpendpointconfiguration-name
"""
AWS_OBJECT_TYPE = "AWS::KinesisFirehose::DeliveryStream.HttpEndpointConfiguration"
rp_Url: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Url"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-httpendpointconfiguration.html#cfn-kinesisfirehose-deliverystream-httpendpointconfiguration-url"""
p_AccessKey: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "AccessKey"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-httpendpointconfiguration.html#cfn-kinesisfirehose-deliverystream-httpendpointconfiguration-accesskey"""
p_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-httpendpointconfiguration.html#cfn-kinesisfirehose-deliverystream-httpendpointconfiguration-name"""
@attr.s
class PropDeliveryStreamCopyCommand(Property):
"""
AWS Object Type = "AWS::KinesisFirehose::DeliveryStream.CopyCommand"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-copycommand.html
Property Document:
- ``rp_DataTableName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-copycommand.html#cfn-kinesisfirehose-deliverystream-copycommand-datatablename
- ``p_CopyOptions``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-copycommand.html#cfn-kinesisfirehose-deliverystream-copycommand-copyoptions
- ``p_DataTableColumns``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-copycommand.html#cfn-kinesisfirehose-deliverystream-copycommand-datatablecolumns
"""
AWS_OBJECT_TYPE = "AWS::KinesisFirehose::DeliveryStream.CopyCommand"
rp_DataTableName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "DataTableName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-copycommand.html#cfn-kinesisfirehose-deliverystream-copycommand-datatablename"""
p_CopyOptions: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "CopyOptions"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-copycommand.html#cfn-kinesisfirehose-deliverystream-copycommand-copyoptions"""
p_DataTableColumns: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "DataTableColumns"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-copycommand.html#cfn-kinesisfirehose-deliverystream-copycommand-datatablecolumns"""
@attr.s
class PropDeliveryStreamOpenXJsonSerDe(Property):
"""
AWS Object Type = "AWS::KinesisFirehose::DeliveryStream.OpenXJsonSerDe"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-openxjsonserde.html
Property Document:
- ``p_CaseInsensitive``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-openxjsonserde.html#cfn-kinesisfirehose-deliverystream-openxjsonserde-caseinsensitive
- ``p_ColumnToJsonKeyMappings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-openxjsonserde.html#cfn-kinesisfirehose-deliverystream-openxjsonserde-columntojsonkeymappings
- ``p_ConvertDotsInJsonKeysToUnderscores``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-openxjsonserde.html#cfn-kinesisfirehose-deliverystream-openxjsonserde-convertdotsinjsonkeystounderscores
"""
AWS_OBJECT_TYPE = "AWS::KinesisFirehose::DeliveryStream.OpenXJsonSerDe"
p_CaseInsensitive: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "CaseInsensitive"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-openxjsonserde.html#cfn-kinesisfirehose-deliverystream-openxjsonserde-caseinsensitive"""
p_ColumnToJsonKeyMappings: typing.Dict[str, TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_mapping(key_validator=attr.validators.instance_of(str), value_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type))),
metadata={AttrMeta.PROPERTY_NAME: "ColumnToJsonKeyMappings"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-openxjsonserde.html#cfn-kinesisfirehose-deliverystream-openxjsonserde-columntojsonkeymappings"""
p_ConvertDotsInJsonKeysToUnderscores: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "ConvertDotsInJsonKeysToUnderscores"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-openxjsonserde.html#cfn-kinesisfirehose-deliverystream-openxjsonserde-convertdotsinjsonkeystounderscores"""
@attr.s
class PropDeliveryStreamOrcSerDe(Property):
"""
AWS Object Type = "AWS::KinesisFirehose::DeliveryStream.OrcSerDe"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-orcserde.html
Property Document:
- ``p_BlockSizeBytes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-orcserde.html#cfn-kinesisfirehose-deliverystream-orcserde-blocksizebytes
- ``p_BloomFilterColumns``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-orcserde.html#cfn-kinesisfirehose-deliverystream-orcserde-bloomfiltercolumns
- ``p_BloomFilterFalsePositiveProbability``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-orcserde.html#cfn-kinesisfirehose-deliverystream-orcserde-bloomfilterfalsepositiveprobability
- ``p_Compression``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-orcserde.html#cfn-kinesisfirehose-deliverystream-orcserde-compression
- ``p_DictionaryKeyThreshold``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-orcserde.html#cfn-kinesisfirehose-deliverystream-orcserde-dictionarykeythreshold
- ``p_EnablePadding``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-orcserde.html#cfn-kinesisfirehose-deliverystream-orcserde-enablepadding
- ``p_FormatVersion``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-orcserde.html#cfn-kinesisfirehose-deliverystream-orcserde-formatversion
- ``p_PaddingTolerance``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-orcserde.html#cfn-kinesisfirehose-deliverystream-orcserde-paddingtolerance
- ``p_RowIndexStride``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-orcserde.html#cfn-kinesisfirehose-deliverystream-orcserde-rowindexstride
- ``p_StripeSizeBytes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-orcserde.html#cfn-kinesisfirehose-deliverystream-orcserde-stripesizebytes
"""
AWS_OBJECT_TYPE = "AWS::KinesisFirehose::DeliveryStream.OrcSerDe"
p_BlockSizeBytes: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "BlockSizeBytes"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-orcserde.html#cfn-kinesisfirehose-deliverystream-orcserde-blocksizebytes"""
p_BloomFilterColumns: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "BloomFilterColumns"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-orcserde.html#cfn-kinesisfirehose-deliverystream-orcserde-bloomfiltercolumns"""
p_BloomFilterFalsePositiveProbability: float = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(float)),
metadata={AttrMeta.PROPERTY_NAME: "BloomFilterFalsePositiveProbability"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-orcserde.html#cfn-kinesisfirehose-deliverystream-orcserde-bloomfilterfalsepositiveprobability"""
p_Compression: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Compression"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-orcserde.html#cfn-kinesisfirehose-deliverystream-orcserde-compression"""
p_DictionaryKeyThreshold: float = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(float)),
metadata={AttrMeta.PROPERTY_NAME: "DictionaryKeyThreshold"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-orcserde.html#cfn-kinesisfirehose-deliverystream-orcserde-dictionarykeythreshold"""
p_EnablePadding: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "EnablePadding"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-orcserde.html#cfn-kinesisfirehose-deliverystream-orcserde-enablepadding"""
p_FormatVersion: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "FormatVersion"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-orcserde.html#cfn-kinesisfirehose-deliverystream-orcserde-formatversion"""
p_PaddingTolerance: float = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(float)),
metadata={AttrMeta.PROPERTY_NAME: "PaddingTolerance"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-orcserde.html#cfn-kinesisfirehose-deliverystream-orcserde-paddingtolerance"""
p_RowIndexStride: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "RowIndexStride"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-orcserde.html#cfn-kinesisfirehose-deliverystream-orcserde-rowindexstride"""
p_StripeSizeBytes: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "StripeSizeBytes"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-orcserde.html#cfn-kinesisfirehose-deliverystream-orcserde-stripesizebytes"""
@attr.s
class PropDeliveryStreamElasticsearchBufferingHints(Property):
"""
AWS Object Type = "AWS::KinesisFirehose::DeliveryStream.ElasticsearchBufferingHints"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-elasticsearchbufferinghints.html
Property Document:
- ``p_IntervalInSeconds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-elasticsearchbufferinghints.html#cfn-kinesisfirehose-deliverystream-elasticsearchbufferinghints-intervalinseconds
- ``p_SizeInMBs``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-elasticsearchbufferinghints.html#cfn-kinesisfirehose-deliverystream-elasticsearchbufferinghints-sizeinmbs
"""
AWS_OBJECT_TYPE = "AWS::KinesisFirehose::DeliveryStream.ElasticsearchBufferingHints"
p_IntervalInSeconds: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "IntervalInSeconds"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-elasticsearchbufferinghints.html#cfn-kinesisfirehose-deliverystream-elasticsearchbufferinghints-intervalinseconds"""
p_SizeInMBs: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "SizeInMBs"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-elasticsearchbufferinghints.html#cfn-kinesisfirehose-deliverystream-elasticsearchbufferinghints-sizeinmbs"""
@attr.s
class PropDeliveryStreamCloudWatchLoggingOptions(Property):
"""
AWS Object Type = "AWS::KinesisFirehose::DeliveryStream.CloudWatchLoggingOptions"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-cloudwatchloggingoptions.html
Property Document:
- ``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-cloudwatchloggingoptions.html#cfn-kinesisfirehose-deliverystream-cloudwatchloggingoptions-enabled
- ``p_LogGroupName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-cloudwatchloggingoptions.html#cfn-kinesisfirehose-deliverystream-cloudwatchloggingoptions-loggroupname
- ``p_LogStreamName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-cloudwatchloggingoptions.html#cfn-kinesisfirehose-deliverystream-cloudwatchloggingoptions-logstreamname
"""
AWS_OBJECT_TYPE = "AWS::KinesisFirehose::DeliveryStream.CloudWatchLoggingOptions"
p_Enabled: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "Enabled"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-cloudwatchloggingoptions.html#cfn-kinesisfirehose-deliverystream-cloudwatchloggingoptions-enabled"""
p_LogGroupName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "LogGroupName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-cloudwatchloggingoptions.html#cfn-kinesisfirehose-deliverystream-cloudwatchloggingoptions-loggroupname"""
p_LogStreamName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "LogStreamName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-cloudwatchloggingoptions.html#cfn-kinesisfirehose-deliverystream-cloudwatchloggingoptions-logstreamname"""
@attr.s
class PropDeliveryStreamBufferingHints(Property):
"""
AWS Object Type = "AWS::KinesisFirehose::DeliveryStream.BufferingHints"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-bufferinghints.html
Property Document:
- ``p_IntervalInSeconds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-bufferinghints.html#cfn-kinesisfirehose-deliverystream-bufferinghints-intervalinseconds
- ``p_SizeInMBs``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-bufferinghints.html#cfn-kinesisfirehose-deliverystream-bufferinghints-sizeinmbs
"""
AWS_OBJECT_TYPE = "AWS::KinesisFirehose::DeliveryStream.BufferingHints"
p_IntervalInSeconds: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "IntervalInSeconds"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-bufferinghints.html#cfn-kinesisfirehose-deliverystream-bufferinghints-intervalinseconds"""
p_SizeInMBs: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "SizeInMBs"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-bufferinghints.html#cfn-kinesisfirehose-deliverystream-bufferinghints-sizeinmbs"""
@attr.s
class PropDeliveryStreamProcessorParameter(Property):
"""
AWS Object Type = "AWS::KinesisFirehose::DeliveryStream.ProcessorParameter"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-processorparameter.html
Property Document:
- ``rp_ParameterName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-processorparameter.html#cfn-kinesisfirehose-deliverystream-processorparameter-parametername
- ``rp_ParameterValue``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-processorparameter.html#cfn-kinesisfirehose-deliverystream-processorparameter-parametervalue
"""
AWS_OBJECT_TYPE = "AWS::KinesisFirehose::DeliveryStream.ProcessorParameter"
rp_ParameterName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ParameterName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-processorparameter.html#cfn-kinesisfirehose-deliverystream-processorparameter-parametername"""
rp_ParameterValue: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ParameterValue"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-processorparameter.html#cfn-kinesisfirehose-deliverystream-processorparameter-parametervalue"""
@attr.s
class PropDeliveryStreamAmazonopensearchserviceBufferingHints(Property):
"""
AWS Object Type = "AWS::KinesisFirehose::DeliveryStream.AmazonopensearchserviceBufferingHints"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-amazonopensearchservicebufferinghints.html
Property Document:
- ``p_IntervalInSeconds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-amazonopensearchservicebufferinghints.html#cfn-kinesisfirehose-deliverystream-amazonopensearchservicebufferinghints-intervalinseconds
- ``p_SizeInMBs``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-amazonopensearchservicebufferinghints.html#cfn-kinesisfirehose-deliverystream-amazonopensearchservicebufferinghints-sizeinmbs
"""
AWS_OBJECT_TYPE = "AWS::KinesisFirehose::DeliveryStream.AmazonopensearchserviceBufferingHints"
p_IntervalInSeconds: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "IntervalInSeconds"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-amazonopensearchservicebufferinghints.html#cfn-kinesisfirehose-deliverystream-amazonopensearchservicebufferinghints-intervalinseconds"""
p_SizeInMBs: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "SizeInMBs"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-amazonopensearchservicebufferinghints.html#cfn-kinesisfirehose-deliverystream-amazonopensearchservicebufferinghints-sizeinmbs"""
@attr.s
class PropDeliveryStreamDeliveryStreamEncryptionConfigurationInput(Property):
"""
AWS Object Type = "AWS::KinesisFirehose::DeliveryStream.DeliveryStreamEncryptionConfigurationInput"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-deliverystreamencryptionconfigurationinput.html
Property Document:
- ``rp_KeyType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-deliverystreamencryptionconfigurationinput.html#cfn-kinesisfirehose-deliverystream-deliverystreamencryptionconfigurationinput-keytype
- ``p_KeyARN``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-deliverystreamencryptionconfigurationinput.html#cfn-kinesisfirehose-deliverystream-deliverystreamencryptionconfigurationinput-keyarn
"""
AWS_OBJECT_TYPE = "AWS::KinesisFirehose::DeliveryStream.DeliveryStreamEncryptionConfigurationInput"
rp_KeyType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "KeyType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-deliverystreamencryptionconfigurationinput.html#cfn-kinesisfirehose-deliverystream-deliverystreamencryptionconfigurationinput-keytype"""
p_KeyARN: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "KeyARN"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-deliverystreamencryptionconfigurationinput.html#cfn-kinesisfirehose-deliverystream-deliverystreamencryptionconfigurationinput-keyarn"""
@attr.s
class PropDeliveryStreamElasticsearchRetryOptions(Property):
"""
AWS Object Type = "AWS::KinesisFirehose::DeliveryStream.ElasticsearchRetryOptions"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-elasticsearchretryoptions.html
Property Document:
- ``p_DurationInSeconds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-elasticsearchretryoptions.html#cfn-kinesisfirehose-deliverystream-elasticsearchretryoptions-durationinseconds
"""
AWS_OBJECT_TYPE = "AWS::KinesisFirehose::DeliveryStream.ElasticsearchRetryOptions"
p_DurationInSeconds: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "DurationInSeconds"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-elasticsearchretryoptions.html#cfn-kinesisfirehose-deliverystream-elasticsearchretryoptions-durationinseconds"""
@attr.s
class PropDeliveryStreamKMSEncryptionConfig(Property):
"""
AWS Object Type = "AWS::KinesisFirehose::DeliveryStream.KMSEncryptionConfig"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-kmsencryptionconfig.html
Property Document:
- ``rp_AWSKMSKeyARN``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-kmsencryptionconfig.html#cfn-kinesisfirehose-deliverystream-kmsencryptionconfig-awskmskeyarn
"""
AWS_OBJECT_TYPE = "AWS::KinesisFirehose::DeliveryStream.KMSEncryptionConfig"
rp_AWSKMSKeyARN: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "AWSKMSKeyARN"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-kmsencryptionconfig.html#cfn-kinesisfirehose-deliverystream-kmsencryptionconfig-awskmskeyarn"""
@attr.s
class PropDeliveryStreamDeserializer(Property):
"""
AWS Object Type = "AWS::KinesisFirehose::DeliveryStream.Deserializer"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-deserializer.html
Property Document:
- ``p_HiveJsonSerDe``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-deserializer.html#cfn-kinesisfirehose-deliverystream-deserializer-hivejsonserde
- ``p_OpenXJsonSerDe``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-deserializer.html#cfn-kinesisfirehose-deliverystream-deserializer-openxjsonserde
"""
AWS_OBJECT_TYPE = "AWS::KinesisFirehose::DeliveryStream.Deserializer"
p_HiveJsonSerDe: typing.Union['PropDeliveryStreamHiveJsonSerDe', dict] = attr.ib(
default=None,
converter=PropDeliveryStreamHiveJsonSerDe.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropDeliveryStreamHiveJsonSerDe)),
metadata={AttrMeta.PROPERTY_NAME: "HiveJsonSerDe"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-deserializer.html#cfn-kinesisfirehose-deliverystream-deserializer-hivejsonserde"""
p_OpenXJsonSerDe: typing.Union['PropDeliveryStreamOpenXJsonSerDe', dict] = attr.ib(
default=None,
converter=PropDeliveryStreamOpenXJsonSerDe.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropDeliveryStreamOpenXJsonSerDe)),
metadata={AttrMeta.PROPERTY_NAME: "OpenXJsonSerDe"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-deserializer.html#cfn-kinesisfirehose-deliverystream-deserializer-openxjsonserde"""
@attr.s
class PropDeliveryStreamKinesisStreamSourceConfiguration(Property):
"""
AWS Object Type = "AWS::KinesisFirehose::DeliveryStream.KinesisStreamSourceConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-kinesisstreamsourceconfiguration.html
Property Document:
- ``rp_KinesisStreamARN``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-kinesisstreamsourceconfiguration.html#cfn-kinesisfirehose-deliverystream-kinesisstreamsourceconfiguration-kinesisstreamarn
- ``rp_RoleARN``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-kinesisstreamsourceconfiguration.html#cfn-kinesisfirehose-deliverystream-kinesisstreamsourceconfiguration-rolearn
"""
AWS_OBJECT_TYPE = "AWS::KinesisFirehose::DeliveryStream.KinesisStreamSourceConfiguration"
rp_KinesisStreamARN: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "KinesisStreamARN"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-kinesisstreamsourceconfiguration.html#cfn-kinesisfirehose-deliverystream-kinesisstreamsourceconfiguration-kinesisstreamarn"""
rp_RoleARN: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "RoleARN"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-kinesisstreamsourceconfiguration.html#cfn-kinesisfirehose-deliverystream-kinesisstreamsourceconfiguration-rolearn"""
@attr.s
class PropDeliveryStreamRedshiftRetryOptions(Property):
"""
AWS Object Type = "AWS::KinesisFirehose::DeliveryStream.RedshiftRetryOptions"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-redshiftretryoptions.html
Property Document:
- ``p_DurationInSeconds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-redshiftretryoptions.html#cfn-kinesisfirehose-deliverystream-redshiftretryoptions-durationinseconds
"""
AWS_OBJECT_TYPE = "AWS::KinesisFirehose::DeliveryStream.RedshiftRetryOptions"
p_DurationInSeconds: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "DurationInSeconds"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-redshiftretryoptions.html#cfn-kinesisfirehose-deliverystream-redshiftretryoptions-durationinseconds"""
@attr.s
class PropDeliveryStreamRetryOptions(Property):
"""
AWS Object Type = "AWS::KinesisFirehose::DeliveryStream.RetryOptions"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-retryoptions.html
Property Document:
- ``p_DurationInSeconds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-retryoptions.html#cfn-kinesisfirehose-deliverystream-retryoptions-durationinseconds
"""
AWS_OBJECT_TYPE = "AWS::KinesisFirehose::DeliveryStream.RetryOptions"
p_DurationInSeconds: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "DurationInSeconds"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-retryoptions.html#cfn-kinesisfirehose-deliverystream-retryoptions-durationinseconds"""
@attr.s
class PropDeliveryStreamParquetSerDe(Property):
"""
AWS Object Type = "AWS::KinesisFirehose::DeliveryStream.ParquetSerDe"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-parquetserde.html
Property Document:
- ``p_BlockSizeBytes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-parquetserde.html#cfn-kinesisfirehose-deliverystream-parquetserde-blocksizebytes
- ``p_Compression``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-parquetserde.html#cfn-kinesisfirehose-deliverystream-parquetserde-compression
- ``p_EnableDictionaryCompression``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-parquetserde.html#cfn-kinesisfirehose-deliverystream-parquetserde-enabledictionarycompression
- ``p_MaxPaddingBytes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-parquetserde.html#cfn-kinesisfirehose-deliverystream-parquetserde-maxpaddingbytes
- ``p_PageSizeBytes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-parquetserde.html#cfn-kinesisfirehose-deliverystream-parquetserde-pagesizebytes
- ``p_WriterVersion``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-parquetserde.html#cfn-kinesisfirehose-deliverystream-parquetserde-writerversion
"""
AWS_OBJECT_TYPE = "AWS::KinesisFirehose::DeliveryStream.ParquetSerDe"
p_BlockSizeBytes: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "BlockSizeBytes"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-parquetserde.html#cfn-kinesisfirehose-deliverystream-parquetserde-blocksizebytes"""
p_Compression: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Compression"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-parquetserde.html#cfn-kinesisfirehose-deliverystream-parquetserde-compression"""
p_EnableDictionaryCompression: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "EnableDictionaryCompression"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-parquetserde.html#cfn-kinesisfirehose-deliverystream-parquetserde-enabledictionarycompression"""
p_MaxPaddingBytes: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "MaxPaddingBytes"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-parquetserde.html#cfn-kinesisfirehose-deliverystream-parquetserde-maxpaddingbytes"""
p_PageSizeBytes: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "PageSizeBytes"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-parquetserde.html#cfn-kinesisfirehose-deliverystream-parquetserde-pagesizebytes"""
p_WriterVersion: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "WriterVersion"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-parquetserde.html#cfn-kinesisfirehose-deliverystream-parquetserde-writerversion"""
@attr.s
class PropDeliveryStreamVpcConfiguration(Property):
"""
AWS Object Type = "AWS::KinesisFirehose::DeliveryStream.VpcConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-vpcconfiguration.html
Property Document:
- ``rp_RoleARN``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-vpcconfiguration.html#cfn-kinesisfirehose-deliverystream-vpcconfiguration-rolearn
- ``rp_SecurityGroupIds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-vpcconfiguration.html#cfn-kinesisfirehose-deliverystream-vpcconfiguration-securitygroupids
- ``rp_SubnetIds``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-vpcconfiguration.html#cfn-kinesisfirehose-deliverystream-vpcconfiguration-subnetids
"""
AWS_OBJECT_TYPE = "AWS::KinesisFirehose::DeliveryStream.VpcConfiguration"
rp_RoleARN: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "RoleARN"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-vpcconfiguration.html#cfn-kinesisfirehose-deliverystream-vpcconfiguration-rolearn"""
rp_SecurityGroupIds: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "SecurityGroupIds"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-vpcconfiguration.html#cfn-kinesisfirehose-deliverystream-vpcconfiguration-securitygroupids"""
rp_SubnetIds: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "SubnetIds"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-vpcconfiguration.html#cfn-kinesisfirehose-deliverystream-vpcconfiguration-subnetids"""
@attr.s
class PropDeliveryStreamHttpEndpointCommonAttribute(Property):
"""
AWS Object Type = "AWS::KinesisFirehose::DeliveryStream.HttpEndpointCommonAttribute"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-httpendpointcommonattribute.html
Property Document:
- ``rp_AttributeName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-httpendpointcommonattribute.html#cfn-kinesisfirehose-deliverystream-httpendpointcommonattribute-attributename
- ``rp_AttributeValue``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-httpendpointcommonattribute.html#cfn-kinesisfirehose-deliverystream-httpendpointcommonattribute-attributevalue
"""
AWS_OBJECT_TYPE = "AWS::KinesisFirehose::DeliveryStream.HttpEndpointCommonAttribute"
rp_AttributeName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "AttributeName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-httpendpointcommonattribute.html#cfn-kinesisfirehose-deliverystream-httpendpointcommonattribute-attributename"""
rp_AttributeValue: TypeHint.intrinsic_str = attr.ib(
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 10 15:51:15 2021
@author: rosariouceda-sosa
"""
###########################################
# Extraction of Propbank, Verbnet and mappings
# It requires verbnet3.4, verbnet3.3 and verbnet3.2 in nltk_data directory,
# as well as the latest version of propbank
#
# In particular, it does require the last
###########################################
import json
import re
from nltk.corpus import treebank
from nltk.corpus.util import LazyCorpusLoader
from VerbnetCorpusReaderEx import VerbnetCorpusReaderEx
from nltk.corpus import PropbankCorpusReader
from semlinkEx import query_pb_vn_mapping, query_pb_vn_mapping_1_2
from xml.etree import ElementTree
from propbank_readerEx import PropbankCorpusReaderEx
#from nltk.corpus import propbank
propbank = LazyCorpusLoader(
"propbank-latest",
PropbankCorpusReaderEx,
"prop.txt",
r"frames/.*\.xml",
"verbs.txt",
lambda filename: re.sub(r"^wsj/\d\d/", "", filename),
treebank,
) # Must be defined *after* treebank corpus.
vn_dict = {
"verbnet3.2": LazyCorpusLoader("verbnet3.2", VerbnetCorpusReaderEx, r"(?!\.).*\.xml"),
"verbnet3.3": LazyCorpusLoader("verbnet3.3", VerbnetCorpusReaderEx, r"(?!\.).*\.xml"),
"verbnet3.4": LazyCorpusLoader("verbnet3.4", VerbnetCorpusReaderEx, r"(?!\.).*\.xml")
}
#The default is 3.4
current_vn = vn_dict["verbnet3.4"]
VN_FILES = "/Users/rosariouceda-sosa/Documents/usr/SemanticsSvces/verbnet/verbnet-master/verbnet3.4"
VN_DIR = "/Users/rosariouceda-sosa/Documents/usr/SemanticsSvces/verbnet/"
PB_DIR = "/Users/rosariouceda-sosa/Documents/usr/SemanticsSvces/propbank/"
outputFile = ""
logFile = ""
processedGroupingsVN = {}
processedMaps = []
#key is entity and the list of mappings they have. Keys for Verbnet, Propbank and WN are their id's
memberToMap = {}
#inverse of memberToMap
mapToMember = {}
#Each propbank has: [roleSet] : name, arguments, lemmas, provenance
pb_index = {}
#Each verbnet has: [CODE] : name, [arguments] variableName, variableType, lemmas, provenance,
vn_index = {}
#{roleset} admire-31.2': {'provenance': 'verbnet3.4', 'arguments' : {'ARG0' : {'description' : "XXX" , 'vnArg' : Agent}}]
map_index = {}
extended_semnlink_index = []
#
#vnCodeToLemma = {}
###### LOG
outLog = open("/Users/rosariouceda-sosa/Downloads/OutLog_ULKB_Clean.txt", "w")
###########################################################
# AUXILIARY FUNCTIONS
###########################################################
#IMPORTANT: To standardize both verbnet names and codes,
def vn_standard(_verb: str) -> str:
# return _verb.replace(".", "-")
#do nothing
return _verb
def count_dict() -> int :
highest = 0
for thisMap in mapToMember :
if len(mapToMember[thisMap]) > highest:
highest = len(mapToMember[thisMap])
return highest
def compare_strs(_first : str, _second : str) -> bool :
if (_first.lower() == _second.lower()):
return 1
return 0
def checkKeyStr(_dict : {}, _key: str) -> str :
if _key in _dict.keys():
return _dict[_key]
else:
return ""
def toRDFStr(_in :str) -> str :
#somewhat sloppy, but gets the data
_in = _in.replace("/", "_")
_in = _in.replace(":", "-")
_in = _in.replace(" ", "")
_in = _in.replace("(", "_")
_in = _in.replace(")", "_")
_in = _in.replace("'", "")
_in = _in.replace(".", "-")
_in = _in.replace(",", "_")
_in = _in.replace("__", "_")
_in = _in.replace(">", "")
_in = _in.replace("<", "")
_in = _in.replace("#", "-")
_in = _in.replace("%", "_")
_in = _in.replace("?", "_")
#ADD THIS FOR THE INCONSISTENT VERBNET NAMING IN SEMLINK AND NLTK
_in = _in.replace("-", "_")
return _in
def checkArray(_dict : {}, _name : str) -> [] :
if (_name) in _dict.keys() :
return _dict.get(_name)
else:
return []
# Whether the mapping points to 'nothing'
def wrong_mapping(_term :str) -> bool :
_term = _term.strip()
if len(_term) == 0 :
return True
if _term == 'NP' or _term == 'np' or _term == 'NP>' or _term == 'np>':
return True
if _term == 'NM' or _term == 'nm' or _term == 'NM>' or _term == 'nm>':
return True
return False
def clean_text(_text :str, _oneLine : bool) -> str :
_text = _text.replace("\"", "\\\"")
_text = _text.replace("'", "\\'")
_text = _text.replace("\/", "-")
_text = _text.replace("`", " ")
if _oneLine :
_text = _text.replace("\n", "")
return _text
def chunk(mappings: str) -> []:
rList = mappings.split(',')
for item in rList :
item = item.strip()
return rList
# from admire-31.2 to admire
def get_vn_lemma(_verb : str) -> str :
return _verb.split('-', 1)[0]
# from admire-31.2 to 31.2 -- The first hyphen is the one that counts
def get_vn_code(_verb: str) -> str :
stVerb = vn_standard(_verb)
return stVerb.split('-',1)[1]
#from admire.01 to admire
def get_pb_lemma(_verb : str) -> str :
return _verb.split('.', 1)[0]
def get_vn_varName(_var : str) -> str :
if _var.startswith('?') :
_var = _var[1:]
return _var
def match_vn_codes(_first:str , _second: str) -> bool :
if toRDFStr(_first) == toRDFStr(_second):
return True
return False
def matchRDF(_item, _dict: {}) -> str :
toMatch = toRDFStr(_item)
for keyItem in _dict:
if toMatch == toRDFStr(keyItem):
return keyItem
return ""
# Also consider one start with another
def vn_in_dict(_item:str, _dict: {}, _name: str) -> bool :
for keyItem in _dict:
if len (_name ) == 0 :
compareTo = keyItem
else :
compareTo = _dict[keyItem][_name]
if compareTo == _item :
return True
if compareTo.startswith(_item) or compareTo.startswith(_name) :
return True
return False
def vn_to_swap(_item:str, _dict: {}, _name:str) -> str :
_itemCode = get_vn_code(_item)
if _itemCode not in vn_index :
return ""
_itemProvenance = vn_index[_itemCode]['provenance']
_itemVersion = _itemProvenance.split('.',1)[1]
for keyItem in _dict:
if len(_name) == 0 :
compareTo = keyItem
else :
compareTo = _dict[keyItem][_name]
compareToCode = get_vn_code(compareTo)
if _itemCode == compareToCode or compareToCode.startswith(_itemCode) or _itemCode.startswith(compareToCode) :
if compareToCode in vn_index :
compareToProvenance = vn_index[compareToCode]['provenance']
compareToVersion = compareToProvenance.split('.', 1)[1]
if compareToVersion < _itemVersion :
return compareTo
return ""
def unmatched_roles(_have : [], _want: []) -> [] :
result = []
for haveEntry in _have :
haveEntry = haveEntry.lower()
found = False
for wantEntry in _want :
if wantEntry.lower() == haveEntry :
found = True
if not found :
result.append(haveEntry)
return result
def wrongly_matched_roles(_have : [], _want: []) -> [] :
result = []
for haveEntry in _have :
haveEntry = haveEntry.lower()
found = False
for wantEntry in _want :
if wantEntry.lower() == haveEntry :
found = True
if not found :
result.append(haveEntry)
return result
def getRoleStrFrom(_list : []) -> str :
resultStr = ""
noDupList = []
for item in _list :
if item.startswith("?") :
item = item[1:]
if item not in noDupList :
noDupList.append(item)
for item in noDupList :
if len(resultStr) == 0 :
resultStr += item
else :
resultStr += ", " + item
return resultStr
def getRoleListFrom(_list : []) -> [[str]] :
resultStr = ""
noDupList = []
for item in _list :
if item.startswith("?") :
item = item[1:]
if item not in noDupList :
noDupList.append(item)
return noDupList
######################################################################
# SEMLINK INGESTION
######################################################################
#maps from verbnet class + argument a URL
#Check the variables that have been already mapped through Verbnet
def map_to_url(_class: str, _param : str) -> []:
global vnClassToVars, framesToVars
resultList = []
if _class not in vnClassToVars :
return resultList
argList = vnClassToVars[_class]
for argKey in argList :
if argKey.lower() == _param.lower():
resultList.append(argList[argKey])
# elif _class in framesToVars : #try the frames
# argList = framesToVars[_class]
# for frameKey in argList :
# for argKey in argList[frameKey] :
# if argKey.lower() == _param.lower() :
# resultList.append(argList[frameKey][argKey])
return resultList
def process_semlink_1_2() :
global provenance, pbToMap_params, pbToMap, semLinkFromPB
# from {'mapping': '51.2', 'source': 'verbnet3.4', 'arguments': {'ARG0': 'Theme'}}
# TO map_index
#[{'vnVerb': 'admire-31.2', 'provenance': 'verbnet3.4', 'arguments' : {'ARG0' : {'description' : "XXX" , 'vnArg' : Agent}}]
oldProvenance = provenance
provenance = "semlink 1.2.2"
for roleset in pb_index :
if "abound.01" in roleset:
print("DEBUG " + roleset)
semLinkmappingList = query_pb_vn_mapping_1_2(roleset)
#If there is no mapping, ignore.
if not semLinkmappingList or len(semLinkmappingList) == 0 :
# if outLog is not None :
# outLog.write("PROPBANK NO_SEMLINK_1_2," + roleset + "\n")
# outLog.flush()
continue
#If there is a mapping BUT we don't have the roleset, it's an issue.
if roleset not in map_index :
if outLog is not None :
outLog.write("NO_PROPBANK SEMLINK_1_2," + roleset + "\n")
outLog.flush()
map_index[roleset] = {}
#Grab the current map_index entry. We know it's there
ourMappings = map_index[roleset]
for mapping in semLinkmappingList :
vnRawCode = mapping['mapping']
vnRawCode = vn_standard(vnRawCode)
vnRawName = ""
if vnRawCode in vn_index :
vnRawName = vn_index[vnRawCode]['name']
else : # use a hack to substitute the first hyphen by a dot. Oh brother...
if outLog is not None :
outLog.write("NO VERBNET SEMLINK_1_2," + vnRawName + "," + vnRawCode + "\n")
outLog.flush()
continue #go to the next mapping
#If the verbnet | |
#
"""handle input Text for Larch -- inclides translation to Python text
"""
from __future__ import print_function
from utils import isValidName, isNumber, isLiteralStr, strip_comments, find_delims
def get_DefVar(text):
"""
looks for defined variable statement, of the form
>> def varname = exression
returns (varname, expression) if this is a valid defvar statement
or None, None if not a valid defvar statement
"""
if text.find('=') > 0 and text.startswith('def '):
t = text[4:].replace('=',' = ').strip()
words = t.split()
if len(words) > 2 and words[1] == '=':
iequal = t.find('=')
iparen = t.find('(')
icolon = t.find(':')
if iparen < 0 :
iparen = len(t)+1
if icolon < 0 :
icolon = len(t)+1
# print iequal, iparen, icolon, words[0], isValidName(words[0])
if (iequal < iparen and iequal < icolon and
isValidName(words[0])):
return words[0], t[iequal+1:].strip()
return None, None
class InputText:
"""Input Larch Code: handles loading and reading code text, and
providing blocks of compile-able python code to be converted to AST.
InputText accepts and stores single or multiple lines of input text,
including as from an interactive prompt, watching for blocks of code,
and keepin track of whether a block are complete.
When asked for the next block of code, it emits blocks of valid
(hopefully!) python code ready to parsed by 'ast.parse'
Uses a FIFO (First In, First Out) buffer to allow mixing of input/output.
Can read from a specified input source for 'interactive' mode
usage:
>>> text = InputText()
>>> s = 'a statement'
>>> text.put(s) # add lines of text
>>> text.get(s) # get complete code block, ready for Compiler.eval
the following translations are made from valid Larch to valid Python:
1. Block Indentation:
larch blocks may end with one of the tokens:
('end', 'endXXX', '#end', '#endXXX')
for an 'XXX' block (one of 'if', 'for', 'while', 'try', and 'def')
where the token starts a line of text, followed by whitespace or
a comment starting with a '#'.
2. Defined Variables:
larch uses 'def VarName = Expression' for a Defined Variable
(the expression is stored and accessing the VarName causes the
expression to be re-evaluated)
The tokens are found with "def" "varname" "=" "text of expression"
and then translated into
_builtin._definevar_("varname", "text of expression")
3. Command Syntax:
larch allows lines of code which execute a function without return
value to be viewed as "commands" and written without parentheses,
so that the function call
function(x, y)
can be written as
function x, y
4. Print:
as a special case of rule 3, and because Python is going through
changes in the syntax of "print", print statements are translated
from either "print(list)" or "print list" to
_builtin._print(list)
"""
indent = ' '*4
ps1 = ' >'
ps2 = '....>'
block_friends = {'if': ('else', 'elif'),
'for': ('else'),
'def': (),
'try': ('else', 'except', 'finally'),
'while': ('else') }
parens = {'{':'}', '(':')', '[':']'}
fcn_defvar = "_builtin.definevar"
fcn_print = "_builtin._print_"
nonkey = 'NONKEY'
empty_frame = (None, None, -1)
def __init__(self, prompt=None, interactive=True, input=None,
filename=None, _larch=None):
self.prompt = prompt or self.ps1
self.input = None
self._larch = _larch
self.interactive = interactive
self.lineno = 0
self.filename = filename or '<stdin>'
if interactive:
self.input = input or self.__defaultInput
self._fifo = [[], []]
self.block = []
self.keys = []
self.current = None
self.endkeys = ()
self.friends = ()
self.delims = []
self.eos = ''
self.in_string = False
self.input_buff = []
self.input_complete = True
def readfile(self, fname):
fh = open(fname, 'r')
self.put(fh.read(), filename=fname, lineno=0)
fh.close()
def put(self, text, filename=None, lineno=None ):
"""add line of input code text"""
fname = filename or self.filename or '<stdin>'
if lineno is not None:
self.lineno = lineno
def addTextInput(thisline, fname):
self.input_complete = self.__isComplete(thisline)
self.input_buff.append((thisline, self.input_complete,
self.eos, fname, self.lineno))
self.lineno += 1
text = text.split('\n')
text.reverse()
while len(text) > 0:
addTextInput(text.pop(), fname)
if self.interactive:
self.prompt = self.ps2
while not self.input_complete:
t = self.input()
t0 = t.strip()
if len(t0) > 0:
addTextInput(t, fname)
if self.input_complete:
self.prompt = self.ps1
nkeys, nblock = self.convert()
return self.input_complete
def get(self):
"""get compile-able block of python code"""
if len(self) > 0:
if not self._fifo[0]:
self._fifo.reverse()
self._fifo[0].reverse()
try:
return self._fifo[0].pop()
except IndexError:
msg = 'InputText out of complete text'
if self._larch is None:
raise IndexError(msg)
else:
self._larch.raise_exception(None, exc=IndexError, msg=msg)
return self.empty_frame
def convert(self):
"""
Convert input buff (in self.input_buff) to valid python code
and stores this (text, filename, lineno) into _fifo buffer
"""
indent_level = 0
oneliner = False
startkeys = self.block_friends.keys()
self.input_buff.reverse()
while self.input_buff:
text, complete, eos, fname, lineno = self.input_buff.pop()
long_text = eos in '"\''
sindent = self.indent*(indent_level+1)
while not complete:
tnext, complete, xeos, fname, lineno2 = self.input_buff.pop()
if long_text:
text = "%s\n%s" % (text, tnext)
else:
text = "%s\n %s%s" % (text, sindent, tnext)
text = text.strip().rstrip()
txt = text.replace('(', ' (').replace(')', ' )')
if text.startswith('"') or text.startswith("'"):
delim = text[0]
if text[0:3] == text[0]*3:
delim = text[0:3]
while not find_delims(text, delim=delim)[0]:
tnext, complete, eos, fname, lineno2 = self.input_buff.pop()
text = "%s\n %s%s" % (text, sindent, tnext)
# note here the trick of replacing '#end' with '&end' so
# that it is not removed by strip_comments. then below,
# we look for '&end' as an end-of-block token.
if txt.startswith('#end'):
txt = '&end%s' % txt[4:]
txt = strip_comments(txt)
# thiskey, word2 = (txt.split() + [''])[0:2]
words = txt.split(' ', 1)
thiskey = words.pop(0).strip()
word2 = ''
if len(words) > 0:
word2 = words[0].replace(',', ' ').split()[0]
if thiskey.endswith(':'):
thiskey = thiskey[:-1]
prefix, oneliner = '', False
if thiskey in startkeys:
# check for defined variables
if thiskey == 'def':
dname, dexpr = get_DefVar(text)
if dname is not None and dexpr is not None:
if "'" in dexpr:
dexpr.replace("'", "\'")
text = "%s('%s', '%s')" % (self.fcn_defvar,
dname, dexpr)
thiskey = self.nonkey
# note that we **re-test** here,
# as thiskey may have changed above for defined variables
if thiskey in startkeys:
if text.find(':') < 1:
msg = "%s statement needs a ':' at\n %s" % (thiskey,
text)
if self._larch is None:
raise SyntaxError(msg)
else:
self._larch.raise_exception(None, exc=SyntaxError, msg=msg, expr=text)
elif text.endswith(':'):
self.current = thiskey
self.keys.append(thiskey)
self.friends = self.block_friends[thiskey]
self.endkeys = ('end', 'end%s'% thiskey,
'&end', '&end%s'% thiskey)
else: # one-liner form
oneliner = True
elif thiskey in self.endkeys: # end of block
if not thiskey.startswith('&'):
prefix = '#'
if len(self.keys) != 0:
self.current = None
self.friends = ()
self.keys.pop()
if len(self.keys)>0:
self.current = self.keys[-1]
self.friends = self.block_friends[self.current]
self.endkeys = ('end', 'end%s'%self.current,
'&end', '&end%s'%self.current)
elif not text.endswith(')') and self.__isCommand(thiskey, word2):
# handle 'command format', including 'print'
text = '%s(%s)' % (thiskey, text[len(thiskey):].strip())
indent_level = len(self.keys)
if (not oneliner and len(thiskey)>0 and
(thiskey == self.current or thiskey in self.friends)):
indent_level = indent_level - 1
if indent_level < 0:
msg = 'impossible indent level!'
if self._larch is None:
raise SyntaxError(msg)
else:
self._larch.raise_exception(None, exc=SyntaxError, msg=msg)
self.block.append('%s%s%s' % (self.indent*indent_level,
prefix, text))
if len(self.keys) == 0:
outtext = '\n'.join(self.block)
if '\n' in outtext: outtext = outtext + '\n'
self._fifo[1].append((outtext, fname,
1+lineno-len(self.block)))
self.block = []
return len(self.keys), len(self.block)
def clear(self):
"clear the input"
self._fifo = [[], []]
def __isCommand(self, key, word2):
""" decide if a keyword and next word are of the form
'command arg, ...'
which will get translated to
'command(arg, ...)'
to allow 'command syntax'
"""
# this could be in one long test, but we simplify:
# first test key:
if (not isValidName(key) or
key in self.friends or
key.startswith('#') or
len(key) < 1 or len(word2) < 1):
return False
# next test word2
return (isValidName(word2) or isNumber(word2) or
isLiteralStr(word2) )
def __isComplete(self, text):
"""returns whether input text is a complete:
that is: does not contains unclosed parens or quotes
and does not end with a backslash
stores state information from previous textline in
self.eos = char(s) to look for 'end of string' ("" == string complete)
self.delims = current list of closing delims being waited for
"""
parens = self.parens
opens = ''.join(parens.keys())
closes = ''.join(parens.values())
quotes, bslash = '\'"', '\\'
prev_char = ''
# txt = strip_comments(text)
txt = text
| |
"""
This is the core file in the `gradio` package, and defines the Interface class, including methods for constructing the
interface using the input and output types.
"""
import copy
import csv
import getpass
import inspect
import markdown2
import numpy as np
import os
import pkg_resources
import requests
import random
import sys
import time
import warnings
import webbrowser
import weakref
from gradio import networking, strings, utils, encryptor, queue
from gradio.inputs import get_input_instance
from gradio.outputs import get_output_instance
from gradio.interpretation import quantify_difference_in_label, get_regression_or_classification_value
from gradio.external import load_interface, load_from_pipeline
class Interface:
"""
Interfaces are created with Gradio by constructing a `gradio.Interface()` object or by calling `gradio.Interface.load()`.
"""
instances = weakref.WeakSet() # stores references to all currently existing Interface instances
@classmethod
def get_instances(cls):
"""
:return: list of all current instances.
"""
return list(Interface.instances)
@classmethod
def load(cls, name, src=None, api_key=None, alias=None, **kwargs):
"""
Class method to construct an Interface from an external source repository, such as huggingface.
Parameters:
name (str): the name of the model (e.g. "gpt2"), can include the `src` as prefix (e.g. "huggingface/gpt2")
src (str): the source of the model: `huggingface` or `gradio` (or empty if source is provided as a prefix in `name`)
api_key (str): optional api key for use with Hugging Face Model Hub
alias (str): optional, used as the name of the loaded model instead of the default name
Returns:
(gradio.Interface): a Gradio Interface object for the given model
"""
interface_info = load_interface(name, src, api_key, alias)
# create a dictionary of kwargs without overwriting the original interface_info dict because it is mutable
# and that can cause some issues since the internal prediction function may rely on the original interface_info dict
kwargs = dict(interface_info, **kwargs)
interface = cls(**kwargs)
interface.api_mode = True # set api mode to true so that the interface will not preprocess/postprocess
return interface
@classmethod
def from_pipeline(cls, pipeline, **kwargs):
"""
Class method to construct an Interface from a Hugging Face transformers.Pipeline.
pipeline (transformers.Pipeline):
Returns:
(gradio.Interface): a Gradio Interface object from the given Pipeline
"""
interface_info = load_from_pipeline(pipeline)
kwargs = dict(interface_info, **kwargs)
interface = cls(**kwargs)
return interface
def __init__(self, fn, inputs=None, outputs=None, verbose=None, examples=None,
examples_per_page=10, live=False, layout="unaligned", show_input=True, show_output=True,
capture_session=None, interpretation=None, num_shap=2.0, theme=None, repeat_outputs_per_model=True,
title=None, description=None, article=None, thumbnail=None,
css=None, height=500, width=900, allow_screenshot=True, allow_flagging=None, flagging_options=None,
encrypt=False, show_tips=None, flagging_dir="flagged", analytics_enabled=None, enable_queue=None, api_mode=None):
"""
Parameters:
fn (Callable): the function to wrap an interface around.
inputs (Union[str, List[Union[str, InputComponent]]]): a single Gradio input component, or list of Gradio input components. Components can either be passed as instantiated objects, or referred to by their string shortcuts. The number of input components should match the number of parameters in fn.
outputs (Union[str, List[Union[str, OutputComponent]]]): a single Gradio output component, or list of Gradio output components. Components can either be passed as instantiated objects, or referred to by their string shortcuts. The number of output components should match the number of values returned by fn.
verbose (bool): DEPRECATED. Whether to print detailed information during launch.
examples (Union[List[List[Any]], str]): sample inputs for the function; if provided, appears below the UI components and can be used to populate the interface. Should be nested list, in which the outer list consists of samples and each inner list consists of an input corresponding to each input component. A string path to a directory of examples can also be provided. If there are multiple input components and a directory is provided, a log.csv file must be present in the directory to link corresponding inputs.
examples_per_page (int): If examples are provided, how many to display per page.
live (bool): whether the interface should automatically reload on change.
layout (str): Layout of input and output panels. "horizontal" arranges them as two columns of equal height, "unaligned" arranges them as two columns of unequal height, and "vertical" arranges them vertically.
capture_session (bool): DEPRECATED. If True, captures the default graph and session (needed for Tensorflow 1.x)
interpretation (Union[Callable, str]): function that provides interpretation explaining prediction output. Pass "default" to use simple built-in interpreter, "shap" to use a built-in shapley-based interpreter, or your own custom interpretation function.
num_shap (float): a multiplier that determines how many examples are computed for shap-based interpretation. Increasing this value will increase shap runtime, but improve results. Only applies if interpretation is "shap".
title (str): a title for the interface; if provided, appears above the input and output components.
description (str): a description for the interface; if provided, appears above the input and output components.
article (str): an expanded article explaining the interface; if provided, appears below the input and output components. Accepts Markdown and HTML content.
thumbnail (str): path to image or src to use as display picture for models listed in gradio.app/hub
theme (str): Theme to use - one of "default", "huggingface", "grass", "peach". Add "dark" prefix, e.g. "darkpeach" or "darkdefault" for darktheme.
css (str): custom css or path to custom css file to use with interface.
allow_screenshot (bool): if False, users will not see a button to take a screenshot of the interface.
allow_flagging (bool): if False, users will not see a button to flag an input and output.
flagging_options (List[str]): if not None, provides options a user must select when flagging.
encrypt (bool): If True, flagged data will be encrypted by key provided by creator at launch
flagging_dir (str): what to name the dir where flagged data is stored.
show_tips (bool): DEPRECATED. if True, will occasionally show tips about new Gradio features
enable_queue (bool): DEPRECATED. if True, inference requests will be served through a queue instead of with parallel threads. Required for longer inference times (> 1min) to prevent timeout.
api_mode (bool): DEPRECATED. If True, will skip preprocessing steps when the Interface is called() as a function (should remain False unless the Interface is loaded from an external repo)
"""
if not isinstance(fn, list):
fn = [fn]
if not isinstance(inputs, list):
inputs = [inputs]
if not isinstance(outputs, list):
outputs = [outputs]
self.input_components = [get_input_instance(i) for i in inputs]
self.output_components = [get_output_instance(o) for o in outputs]
if repeat_outputs_per_model:
self.output_components *= len(fn)
if interpretation is None or isinstance(interpretation, list) or callable(interpretation):
self.interpretation = interpretation
elif isinstance(interpretation, str):
self.interpretation = [interpretation.lower() for _ in self.input_components]
else:
raise ValueError("Invalid value for parameter: interpretation")
self.predict = fn
self.predict_durations = [[0, 0]] * len(fn)
self.function_names = [func.__name__ for func in fn]
self.__name__ = ", ".join(self.function_names)
if verbose is not None:
warnings.warn("The `verbose` parameter in the `Interface` is deprecated and has no effect.")
self.status = "OFF"
self.live = live
self.layout = layout
self.show_input = show_input
self.show_output = show_output
self.flag_hash = random.getrandbits(32)
self.capture_session = capture_session
if capture_session is not None:
warnings.warn("The `capture_session` parameter in the `Interface` will be deprecated in the near future.")
self.session = None
self.title = title
self.description = description
if article is not None:
article = utils.readme_to_html(article)
article = markdown2.markdown(
article, extras=["fenced-code-blocks"])
self.article = article
self.thumbnail = thumbnail
theme = theme if theme is not None else os.getenv("GRADIO_THEME", "default")
if theme not in ("default", "huggingface", "grass", "peach", "darkdefault", "darkhuggingface", "darkgrass", "darkpeach"):
raise ValueError("Invalid theme name.")
self.theme = theme
self.height = height
self.width = width
if css is not None and os.path.exists(css):
with open(css) as css_file:
self.css = css_file.read()
else:
self.css = css
if examples is None or isinstance(examples, str) or (isinstance(examples, list) and (len(examples) == 0 or isinstance(examples[0], list))):
self.examples = examples
elif isinstance(examples, list) and len(self.input_components) == 1: # If there is only one input component, examples can be provided as a regular list instead of a list of lists
self.examples = [[e] for e in examples]
else:
raise ValueError(
"Examples argument must either be a directory or a nested list, where each sublist represents a set of inputs.")
self.num_shap = num_shap
self.examples_per_page = examples_per_page
self.simple_server = None
self.allow_screenshot = allow_screenshot
# For allow_flagging and analytics_enabled: (1) first check for parameter, (2) check for environment variable, (3) default to True
self.allow_flagging = allow_flagging if allow_flagging is not None else os.getenv("GRADIO_ALLOW_FLAGGING", "True")=="True"
self.analytics_enabled = analytics_enabled | |
<filename>src/app/services/plot_service.py
# MIT License
#
# Copyright (c) 2020 OdinLabs IO
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import itertools
from collections import defaultdict
from math import pi
from typing import Dict
from bokeh.layouts import column
from bokeh.models import ColumnDataSource, FactorRange, Range1d, LinearAxis, PreText
from bokeh.palettes import Spectral11
from bokeh.plotting import figure
from bokeh.transform import cumsum
from app.model import is_valid_aggregate, is_valid_dimension
from app.model.model import CHART_PARAMETERS_AXIS, is_valid_filter
TOOLS = "hover,crosshair,pan,wheel_zoom,zoom_in,zoom_out,box_zoom,undo,redo,reset,tap,save,box_select,poly_select,lasso_select,"
PALETTE = Spectral11
ALPHA = 0.8
class PlotService:
def __init__(self):
self._analytics_service = None
self._dashboard_service = None
self._axis_order = defaultdict(lambda: 0, {})
def init_app(self, analytics_service, dashboard_service):
self._analytics_service = analytics_service
self._dashboard_service = dashboard_service
self._axis_order = defaultdict(lambda: len(CHART_PARAMETERS_AXIS),
{axe: i for i, axe in enumerate(CHART_PARAMETERS_AXIS, 0)})
def _axis_index(self, axis, sort_by):
axis_order = self._axis_order
axis = sorted([(axe, axis_order[axe[sort_by]]) for axe in axis], key=lambda x: x[1])
return [axe[0] for axe in axis]
def _generate_value_tooltips(self, fields, fields_legend):
return [(legend, "@" + field) for field, legend in zip(fields, fields_legend)]
def _color(self, values):
return self._color_def(values, color_dict=dict())
def _color_def(self, values, color_dict):
color_palette = itertools.cycle(PALETTE)
colors = []
for c in values:
color = color_dict.get(c)
if not color:
color_dict[c] = next(color_palette)
colors.append(color_dict[c])
return colors
def _bar_chart(self, labels: Dict[str, str], df):
dimensions = df.index.names
aggregates = df.columns
if len(dimensions) == 1:
aggr_0 = aggregates[0]
# normal vbar and line for other aggregates
factors = [factor for factor in df.index.values]
data = dict({'x': factors, 'aggr_0': [v for v in df[aggr_0].values]})
data['color'] = self._color(factors)
tooltips_var = []
tooltips_label = []
extra_y_ranges = {}
if len(aggregates) > 1:
for i in range(1, len(aggregates)):
aggr_n = aggregates[i]
aggr_n_name = 'aggr_' + str(i)
tooltips_var.append(aggr_n)
tooltips_label.append(aggr_n_name)
data[aggr_n_name] = df[aggr_n]
extra_y_ranges[aggr_n_name] = Range1d(start=df[aggr_n].min() - 10, end=df[aggr_n].max() + 10)
p = figure(x_range=factors, tools=TOOLS,
tooltips=[('Value', "@x: @aggr_0")] + self._generate_value_tooltips(tooltips_label,
tooltips_var))
source = ColumnDataSource(data=data)
p.vbar(x='x', top='aggr_0', source=source, width=0.9, alpha=ALPHA, legend_field='x',
fill_color='color')
if len(aggregates) > 1:
color_palette = itertools.cycle(PALETTE)
p.extra_y_ranges = extra_y_ranges
for i in range(1, len(aggregates)):
aggr_n = aggregates[i]
aggr_n_name = 'aggr_' + str(i)
color = next(color_palette)
p.line(x='x', y=aggr_n_name, color=color, y_range_name=aggr_n_name,
source=source)
p.add_layout(LinearAxis(y_range_name=aggr_n_name, axis_label=aggr_n), 'left')
p.yaxis[i].major_label_text_color = color
p.yaxis[0].axis_label = aggr_0
p.legend.title = labels.get(dimensions[0], dimensions[0])
p.add_layout(p.legend[0], 'right')
p.xaxis.major_label_orientation = 1
p.xgrid.grid_line_color = None
return p
elif len(dimensions) == 2:
# stacked bar chart
aggr_0 = aggregates[0]
unstacked = df.unstack(-1)
factors = [factor for factor in unstacked.index]
data = dict({'x': factors})
aggr_column = unstacked[aggr_0]
unstacked_values = [v for v in aggr_column.columns]
color = []
colors = itertools.cycle(PALETTE)
for memb in unstacked_values:
data[memb] = [v for v in aggr_column[memb].fillna(0).values]
color.append(next(colors))
p = figure(x_range=factors, tools=TOOLS, tooltips="$name @x: @$name")
source = ColumnDataSource(data=data)
p.vbar_stack(unstacked_values, x='x', source=source, color=color, width=0.9, alpha=ALPHA,
legend_label=unstacked_values)
p.legend.title = labels.get(dimensions[-1], dimensions[-1])
p.add_layout(p.legend[0], 'right')
p.yaxis.axis_label = aggr_0
p.xaxis.major_label_orientation = 1
p.xgrid.grid_line_color = None
return p
elif len(dimensions) == 3 or len(dimensions) == 4:
# group by n-1 first and stack by last dimension
aggr_0 = aggregates[0]
unstacked = df.unstack(-1)
factors = [factor for factor in unstacked.index]
data = dict({'x': factors})
aggr_column = unstacked[aggr_0]
unstacked_values = [v for v in aggr_column.columns]
color = []
colors = itertools.cycle(PALETTE)
for memb in unstacked_values:
data[memb] = [v for v in aggr_column[memb].fillna(0).values]
color.append(next(colors))
source = ColumnDataSource(data=data)
p = figure(x_range=FactorRange(*factors), tools=TOOLS, tooltips="$name @x: @$name")
p.vbar_stack(unstacked_values, x='x', source=source, color=color, width=0.9, alpha=ALPHA,
legend_label=unstacked_values)
p.legend.title = labels.get(dimensions[-1], dimensions[-1])
p.add_layout(p.legend[0], 'right')
p.yaxis.axis_label = aggr_0
p.xaxis.major_label_orientation = 1
p.xgrid.grid_line_color = None
return p
else:
p = PreText(text="""Bar Chart accepts 1 to 4 dimensions.""", width=500, height=100)
return p
def _pie_chart(self, labels: Dict[str, str], df):
dimensions = df.index.names
aggregates = df.columns
if len(dimensions) == 1 and len(aggregates) != 0:
aggr_0 = aggregates[0]
category = dimensions[0]
data = dict()
df = df.reset_index()
aggr_0_sum = df[aggr_0].sum()
data['value'] = df[aggr_0]
data['angle'] = df[aggr_0] / aggr_0_sum * 2 * pi
data['percentage'] = df[aggr_0] / aggr_0_sum * 100
data['color'] = self._color(df[category])
data['category'] = df[category]
data['legend'] = [l + ": {:.2f} %".format(v) for l, v in zip(data['category'], data['percentage'])]
source = ColumnDataSource(data=data)
p = figure(tools=TOOLS, tooltips="@category:@value / @percentage")
p.wedge(x=0, y=0, radius=0.8, start_angle=cumsum('angle', include_zero=True), end_angle=cumsum('angle'),
line_color="white", fill_color='color', legend_field='legend', source=source)
p.legend.title = labels.get(category, category)
p.add_layout(p.legend[0], 'right')
p.xaxis.major_label_orientation = 1
p.xgrid.grid_line_color = None
p.toolbar.autohide = True
return p
else:
p = PreText(text="""Pie chart accepts at most one aggregate and a dimension.""", width=500, height=100)
return p
def _scatter_chart(self, labels: Dict[str, str], df):
dimensions = df.index.names
aggregates = df.columns
if len(aggregates) == 1:
aggr_0 = aggregates[0]
if len(dimensions) == 1:
factors = [factor for factor in df.index]
p = figure(x_range=factors, tools=TOOLS, tooltips="@x: @aggr_0")
category = dimensions[0]
data = dict({'x': factors, 'aggr_0': [v for v in df[aggr_0].values]})
p.line(x='x', y='aggr_0', source=ColumnDataSource(data=data))
p.xaxis.axis_label = labels.get(category, category)
p.yaxis.axis_label = aggr_0
p.xaxis.major_label_orientation = 1
p.xgrid.grid_line_color = None
p.toolbar.autohide = True
return p
elif len(dimensions) == 2 or len(dimensions) == 3 or len(dimensions) == 4:
# unstack first
unstacked = df.stack().unstack(-2).unstack(-1)
p = figure(x_range=FactorRange(*unstacked.index), tools=TOOLS,
tooltips=self._generate_value_tooltips(['x', 'aggr_0', 'category'],
["", aggr_0, labels.get(dimensions[-1])]))
color_dict = dict()
stacked_columns = list(set([i[0] for i in unstacked.columns]))
self._color_def(stacked_columns, color_dict=color_dict)
aggr_0_values = []
factors = []
categories = []
for cat in stacked_columns:
cat_column = unstacked[cat].dropna()
aggr_0_values += cat_column[aggr_0].values.tolist()
factors += [i for i in cat_column.index]
categories += [cat for i in range(len(cat_column))]
colors = [color_dict[cat_val] for cat_val in categories]
data = dict({'x': factors, 'aggr_0': aggr_0_values, 'category': categories,
'color': colors})
p.scatter(x='x', y='aggr_0', size=10,
fill_color='color',
fill_alpha=ALPHA,
source=ColumnDataSource(data),
legend_group='category')
p.legend.title = labels.get(dimensions[-1], dimensions[-1])
p.add_layout(p.legend[0], 'right')
p.yaxis.axis_label = aggr_0
p.xaxis.axis_label = "/".join([labels.get(dim, dim) for dim in dimensions[:-1]])
p.xaxis.major_label_orientation = 1
p.xgrid.grid_line_color = None
p.toolbar.autohide = True
return p
else:
p = PreText(text="""Scatter Plot 1 aggregates accepts at most 4 dimensions.""", width=500, height=100)
return p
elif len(aggregates) == 2:
aggr_0 = aggregates[0]
aggr_1 = aggregates[1]
if len(dimensions) == 1:
df = df.reset_index()
category = dimensions[0]
data = dict()
data['aggr_0'] = df[aggr_0]
data['aggr_1'] = df[aggr_1]
data['category'] = df[category]
data['color'] = self._color(df[category])
p = figure(tools=TOOLS, tooltips=self._generate_value_tooltips(['aggr_0', 'aggr_1', 'category'],
[aggr_0, aggr_1,
labels.get(category, category)]))
p.scatter(x='aggr_0', y='aggr_1',
source=ColumnDataSource(data=data),
fill_color='color',
fill_alpha=ALPHA,
legend_group='category')
p.legend.title = labels.get(category, category)
p.add_layout(p.legend[0], 'right')
p.xaxis.axis_label = aggr_0
p.yaxis.axis_label = aggr_1
p.add_layout(p.legend[0], 'right')
p.xaxis.major_label_orientation = 1
p.xgrid.grid_line_color = None
p.toolbar.autohide = True
return p
elif len(dimensions) == 2 or len(dimensions) == 3 or len(dimensions) == 4:
unstacked = df.stack().unstack(-2).unstack(-1)
p = figure(x_range=FactorRange(*unstacked.index), tools=TOOLS,
tooltips=self._generate_value_tooltips(['x', 'aggr_0', 'aggr_1', 'category'],
["", aggr_0, aggr_1, labels.get(dimensions[-1])]))
color_dict = dict()
stacked_columns = list(set([i[0] for i in unstacked.columns]))
self._color_def(stacked_columns, color_dict=color_dict)
aggr_0_values = []
aggr_1_values = []
factors = []
categories = []
for cat in stacked_columns:
cat_column = unstacked[cat].dropna()
aggr_0_values += cat_column[aggr_0].values.tolist()
aggr_1_values += cat_column[aggr_1].values.tolist()
factors += [i for i in cat_column.index]
categories += [cat for i in range(len(cat_column))]
colors = [color_dict[cat_val] for cat_val in categories]
data = dict({'x': factors, 'aggr_0': aggr_0_values, 'aggr_1': aggr_1_values, 'category': categories,
'color': colors})
p.scatter(x='x', y='aggr_0', size='aggr_1',
fill_color='color',
fill_alpha=ALPHA,
source=ColumnDataSource(data),
legend_group='category')
p.legend.title = labels.get(dimensions[-1], dimensions[-1])
p.add_layout(p.legend[0], 'right')
p.yaxis.axis_label = aggr_0
p.xaxis.axis_label = "/".join([labels.get(dim, dim) for dim in dimensions[:-1]])
p.xaxis.major_label_orientation = 1
p.xgrid.grid_line_color = None
p.toolbar.autohide = True
return p
else:
# get first dimension (more than one dimension => duplicates in plot)
df = df.reset_index()
category = dimensions[0]
data = dict()
data['aggr_0'] = df[aggr_0]
data['aggr_1'] = df[aggr_1]
data['category'] = df[category]
data['color'] = self._color(df[category])
p = figure(tools=TOOLS,
tooltips=self._generate_value_tooltips(['aggr_0', 'aggr_1', 'category'],
[aggr_0, aggr_1, labels.get(category, category)]))
p.scatter(x='aggr_0', y='aggr_1',
source=ColumnDataSource(data=data),
fill_color='color',
fill_alpha=ALPHA, legend_group='category')
p.legend.title = labels.get(category, category)
p.xaxis.axis_label = aggr_0
| |
<filename>catkit/hardware/boston/BostonDmController.py
import os
import sys
import threading
import numpy as np
from catkit.interfaces.DeformableMirrorController import DeformableMirrorController
from catkit.hardware.boston.DmCommand import DmCommand, convert_dm_image_to_command
from catkit.multiprocessing import SharedMemoryManager
# BMC is Boston's library and it only works on windows.
try:
sdk_path = os.environ.get('CATKIT_BOSTON_SDK_PATH')
if sdk_path is not None:
sys.path.append(sdk_path)
import bmc
else:
bmc = None
except ImportError:
bmc = None
"""Interface for Boston Micro-machines deformable mirror controller that can control 2 DMs.
It does so by interpreting the first half of the command for DM1, and the second for DM2.
This controller cannot control the two DMs independently, it will always send a command to both."""
class BostonDmController(DeformableMirrorController):
instrument_lib = bmc
def _clear_state(self):
self.dm1_command = None
self.dm2_command = None
self.dm1_command_object = None
self.dm2_command_object = None
self.channels = {}
def initialize(self, serial_number, command_length, dac_bit_width):
""" Initialize dm manufacturer specific object - this does not, nor should it, open a connection."""
self.log.info("Opening DM connection")
# Create class attributes for storing individual DM commands.
self._clear_state()
self.serial_num = serial_number
self.command_length = command_length
self.dac_bit_width = dac_bit_width
self.lock = threading.Lock()
def send_data(self, data):
# The DM controller expects the command to be unitless (normalized Volts): 0.0 - 1.0, where 1.0 := max_volts
data_min = np.min(data)
data_max = np.max(data)
if data_min < 0 or data_max > 1:
self.log.warning(f"DM command out of range and will be clipped by hardware. min:{data_min}, max:{data_max}")
status = self.instrument.send_data(data)
if status != self.instrument_lib.NO_ERR:
raise Exception("{}: Failed to send data - {}".format(self.config_id,
self.instrument.error_string(status)))
def _open(self):
self._clear_state()
dm = self.instrument_lib.BmcDm()
status = dm.open_dm(self.serial_num)
if status != self.instrument_lib.NO_ERR:
raise Exception("{}: Failed to connect - {}.".format(self.config_id,
dm.error_string(status)))
# If we get this far, a connection has been successfully opened.
# Set self.instrument so that we can close if anything here subsequently fails.
self.instrument = dm
hardware_command_length = dm.num_actuators()
if self.command_length != hardware_command_length:
raise ValueError("config.ini error - '{}':'command_length' = {} but hardware gives {}.".format(self.config_id,
self.command_length,
hardware_command_length))
# Initialize the DM to zeros.
zeros = np.zeros(self.command_length, dtype=float)
try:
self.send_data(zeros) # TODO: call self.apply_shape_to_both()
except Exception:
self._clear_state()
raise
else:
# Store the current dm_command values in class attributes.
self.dm1_command = zeros
self.dm2_command = zeros.copy() # dm 1 & 2 should NOT be using the same memory
self.dm_controller = self.instrument # For legacy API purposes
return self.instrument
def _close(self):
"""Close dm connection safely."""
try:
try:
self.log.info("Closing DM connection")
# FIXME: I'm pretty sure the new SDK does this under the hood.
# Set the DM to zeros.
zeros = np.zeros(self.command_length, dtype=float)
self.send_data(zeros)
finally:
self.instrument.close_dm()
finally:
self.instrument = None
self._clear_state()
def apply_shape_to_both(self, dm1_shape, dm2_shape,
flat_map=True,
bias=False,
as_voltage_percentage=False,
as_volts=False,
sin_specification=None,
output_path=None,
channel=None,
do_logging=True):
""" Combines both commands and sends to the controller to produce a shape on each DM.
The concept of channels is optional. If no channel is supplied, the DM shapes are applied directly
onto the DM. However, if channels are used, each channel acts as an independent contribution to the
total shape that is on the DM. Each contribution will be updated (= replaced) by calling apply_shape_to_both()
with the name of that channel. In this way, the current contribution from each channel can be read out
using the BOSTON_DM.channels[channel_name] attribute.
While individual contributions can be added as delta contributions to a running total, this approach was
not taken for code clarity. This comes at the cost of a few microseconds of runtime for each sent DM command.
Note: if channels are used, the dm shapes are required to be numpy arrays. In this case, DmCommand objects are
not allowed. A TypeError will be thrown is this is the case.
:param dm<1|2>_shape: catkit.hardware.boston.DmCommand.DmCommand or numpy array of the following shapes: 34x34, 1x952,
1x2048, 1x4096. Interpreted by default as the desired DM surface height in units of meters, but
see parameters as_volts and as_voltage_percentage. When using channels, this should be a numpy array,
and they should have the same shape for each channel.
:param flat_map: If true, add flat map correction to the data before outputting commands
:param bias: If true, add bias to the data before outputting commands
:param as_voltage_percentage: Interpret the data as a voltage percentage instead of meters; Deprecated.
:param as_volts: If true, interpret the data as volts instead of meters
:param sin_specification: Add this sine to the data
:param output_path: str, Path to save commands to if provided. Default `None` := don't save.
:param channel: str or None, the DM channel on which to write this shape. Default `None` := set the entire shape.
:param do_logging: boolean. Whether to emit a logging message. In fast (>100Hz) loops, the logs can be overwhelmed by
log messages from the DM. Setting this to False doesn't emit a log message. Default: True.
"""
with self.lock:
if do_logging:
if channel is None:
self.log.info("Applying shape to both DMs")
else:
self.log.info(f'Applying shape to both DMs in channel {channel}.')
if channel is None:
if self.channels:
self.log.warn('A channel was not supplied while channels were used previously. ' +
'All channels will be reset. This may not be what you want.')
self.channels = {}
else:
if isinstance(dm1_shape, DmCommand) or isinstance(dm2_shape, DmCommand):
# DmCommand objects cannot be added together, yet.
raise TypeError('DM shapes cannot be DmCommands when using channels.')
# Check if dm{1,2}_shape is 2D, then convert to 1D.
# This standardizes the shape stored in the channels attribute.
if dm1_shape.ndim == 2:
dm1_shape = convert_dm_image_to_command(dm1_shape)
if dm2_shape.ndim == 2:
dm2_shape = convert_dm_image_to_command(dm2_shape)
self.channels[channel] = (dm1_shape, dm2_shape)
# Add contributions for each channel, and use that as the dm command.
dm1_shape = 0
dm2_shape = 0
for dm1, dm2 in self.channels.values():
dm1_shape += dm1
dm2_shape += dm2
if not isinstance(dm1_shape, DmCommand):
dm1_shape = DmCommand(data=dm1_shape,
dm_num=1,
flat_map=flat_map,
bias=bias,
as_voltage_percentage=as_voltage_percentage,
as_volts=as_volts,
sin_specification=sin_specification)
if not isinstance(dm2_shape, DmCommand):
dm2_shape = DmCommand(data=dm2_shape,
dm_num=2,
flat_map=flat_map,
bias=bias,
as_voltage_percentage=as_voltage_percentage,
as_volts=as_volts,
sin_specification=sin_specification)
# Ensure that the correct dm_num is set.
dm1_shape.dm_num = 1
dm2_shape.dm_num = 2
if output_path is not None:
dm1_shape.export_fits(output_path)
dm2_shape.export_fits(output_path)
# Use DmCommand class to format the commands correctly (with zeros for other DM).
dm1_command = dm1_shape.to_dm_command()
dm2_command = dm2_shape.to_dm_command()
# Add both arrays together (first half and second half) and send to DM.
full_command = dm1_command + dm2_command
try:
self.send_data(full_command)
except Exception:
# We shouldn't guarantee the state of the DM.
self._clear_state()
raise
else:
# Update both dm_command class attributes.
self.dm1_command = dm1_command
self.dm2_command = dm2_command
self.dm1_command_object = dm1_shape
self.dm2_command_object = dm2_shape
def apply_shape(self, dm_shape, dm_num,
flat_map=True,
bias=False,
as_voltage_percentage=False,
as_volts=False,
sin_specification=None,
output_path=None):
""" Forms a command for a single DM, and re-sends the existing shape to other DM.
:param dm_shape: catkit.hardware.boston.DmCommand.DmCommand or numpy array of the following shapes: 34x34, 1x952,
1x2048, 1x4096. Interpreted by default as the desired DM surface height in units of meters, but
see parameters as_volts and as_voltage_percentage.
:param dm_num: Which DM to apply the shape to. Valid values are 1, 2.
:param flat_map: If true, add flat map correction to the data before outputting commands
:param bias: If true, add bias to the data before outputting commands
:param as_voltage_percentage: Interpret the data as a voltage percentage instead of meters; Deprecated.
:param as_volts: If true, interpret the data as volts instead of meters
:param sin_specification: Add this sine to the data
:param output_path: str, Path to save commands to if provided. Default `None` := don't save.
"""
self.log.info("Applying shape to DM " + str(dm_num))
if not isinstance(dm_shape, DmCommand):
dm_shape = DmCommand(data=dm_shape,
dm_num=dm_num,
flat_map=flat_map,
bias=bias,
as_voltage_percentage=as_voltage_percentage,
as_volts=as_volts,
sin_specification=sin_specification)
# Ensure the dm_num is correct.
dm_shape.dm_num = dm_num
if output_path is not None:
dm_shape.export_fits(output_path)
other_dm_command_object = self.dm2_command_object if dm_num == 1 else self.dm1_command_object
other_dm_command_object.export_fits(output_path)
# Use DmCommand class to format the single command correctly (with zeros for other DM).
dm_command = dm_shape.to_dm_command()
# Grab the other DM's currently applied shape.
other_dm_command = self.dm2_command if dm_num == 1 else self.dm1_command
# Add both arrays together (first half and second half) and send to DM.
full_command = dm_command + other_dm_command
try:
self.send_data(full_command)
except Exception:
# We shouldn't guarantee the state of the DM.
self._clear_state()
raise
else:
# Update the dm_command | |
<reponame>wenting-zhao/sgnmt
# -*- coding: utf-8 -*-
# coding=utf-8
# Copyright 2019 The SGNMT Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains predictors that deal wit the length of the
target sentence. The ``NBLengthPredictor`` assumes a negative binomial
distribution on the target sentence lengths, where the parameters r and
p are linear combinations of features extracted from the source
sentence. The ``WordCountPredictor`` adds the number of words as cost,
which can be used to prevent hypotheses from getting to short when
using a language model.
"""
import logging
import math
from scipy.special import logsumexp
from scipy.special import gammaln
from cam.sgnmt import utils
from cam.sgnmt.misc.trie import SimpleTrie
from cam.sgnmt.predictors.core import Predictor, UnboundedVocabularyPredictor
import numpy as np
NUM_FEATURES = 5
EPS_R = 0.1;
def load_external_lengths(path):
"""Loads a length distribution from a plain text file. The file
must contain blank separated <length>:<score> pairs in each line.
Args:
path (string): Path to the length file.
Returns:
list of dicts mapping a length to its scores, one dict for each
sentence.
"""
lengths = []
with open(path) as f:
for line in f:
scores = {}
for pair in line.strip().split():
if ':' in pair:
length, score = pair.split(':')
scores[int(length)] = float(score)
else:
scores[int(pair)] = 0.0
lengths.append(scores)
return lengths
def load_external_ids(path):
"""
load file of ids to list
"""
with open(path) as f:
return [int(line.strip()) for line in f]
class NBLengthPredictor(Predictor):
"""This predictor assumes that target sentence lengths are
distributed according a negative binomial distribution with
parameters r,p. r is linear in features, p is the logistic of a
linear function over the features. Weights can be trained using
the Matlab script ``estimate_length_model.m``
Let w be the model_weights. All features are extracted from the
src sentence::
r = w0 * #char
+ w1 * #words
+ w2 * #punctuation
+ w3 * #char/#words
+ w4 * #punct/#words
+ w10
p = logistic(w5 * #char
+ w6 * #words
+ w7 * #punctuation
+ w8 * #char/#words
+ w9 * #punct/#words
+ w11)
target_length ~ NB(r,p)
The biases w10 and w11 are optional.
The predictor predicts EOS with NB(#consumed_words,r,p)
"""
def __init__(self, text_file, model_weights, use_point_probs, offset = 0):
"""Creates a new target sentence length model predictor.
Args:
text_file (string): Path to the text file with the
unindexed source sentences, i.e. not
using word ids
model_weights (list): Weights w0 to w11 of the length
model. See class docstring for more
information
use_point_probs (bool): Use point estimates for EOS token,
0.0 otherwise
offset (int): Subtract this from hypothesis length before
applying the NB model
"""
super(NBLengthPredictor, self).__init__()
self.use_point_probs = use_point_probs
self.offset = offset
if len(model_weights) == 2*NUM_FEATURES: # add biases
model_weights.append(0.0)
model_weights.append(0.0)
if len(model_weights) != 2*NUM_FEATURES+2:
logging.fatal("Number of length model weights has to be %d or %d"
% (2*NUM_FEATURES, 2*NUM_FEATURES+2))
self.r_weights = model_weights[0:NUM_FEATURES] + [model_weights[-2]]
self.p_weights = model_weights[NUM_FEATURES:2*NUM_FEATURES] + [model_weights[-1]]
self.src_features = self._extract_features(text_file)
self.n_consumed = 0
def _extract_features(self, file_name):
"""Extract all features from the source sentences. """
feats = []
with open(file_name) as f:
for line in f:
feats.append(self._analyse_sentence(line.strip()))
return feats
def _analyse_sentence(self, sentence):
"""Extract features for a single source sentence.
Args:
sentence (string): Source sentence string
Returns:
5-tuple of features as described in the class docstring
"""
n_char = len(sentence) + 0.0
n_words = len(sentence.split()) + 0.0
n_punct = sum([sentence.count(s) for s in ",.:;-"]) + 0.0
return [n_char, n_words, n_punct, n_char/n_words, n_punct/n_words]
def get_unk_probability(self, posterior):
"""If we use point estimates, return 0 (=1). Otherwise, return
the 1-p(EOS), with p(EOS) fetched from ``posterior``
"""
if self.use_point_probs:
if self.n_consumed == 0:
return self.max_eos_prob
return 0.0
if self.n_consumed == 0:
return 0.0
return np.log(1.0 - np.exp(posterior[utils.EOS_ID]))
def predict_next(self):
"""Returns a dictionary with single entry for EOS. """
if self.n_consumed == 0:
return {utils.EOS_ID : utils.NEG_INF}
return {utils.EOS_ID : self._get_eos_prob()}
def _get_eos_prob(self):
"""Get loglikelihood according cur_p, cur_r, and n_consumed """
eos_point_prob = self._get_eos_point_prob(max(
1,
self.n_consumed - self.offset))
if self.use_point_probs:
return eos_point_prob - self.max_eos_prob
if not self.prev_eos_probs:
self.prev_eos_probs.append(eos_point_prob)
return eos_point_prob
# bypass utils.log_sum because we always want to use logsumexp here
prev_sum = logsumexp(np.asarray([p for p in self.prev_eos_probs]))
self.prev_eos_probs.append(eos_point_prob)
# Desired prob is eos_point_prob / (1-last_eos_probs_sum)
return eos_point_prob - np.log(1.0-np.exp(prev_sum))
def _get_eos_point_prob(self, n):
return gammaln(n + self.cur_r) \
- gammaln(n + 1) \
- gammaln(self.cur_r) \
+ n * np.log(self.cur_p) \
+ self.cur_r * np.log(1.0-self.cur_p)
def _get_max_eos_prob(self):
"""Get the maximum loglikelihood according cur_p, cur_r
TODO: replace this brute force impl. with something smarter
"""
max_prob = utils.NEG_INF
n_prob = max_prob
n = 0
while n_prob == max_prob:
n += 1
n_prob = self._get_eos_point_prob(n)
max_prob = max(max_prob, n_prob)
return max_prob
def initialize(self, src_sentence):
"""Extract features for the source sentence. Note that this
method does not use ``src_sentence`` as we need the string
representation of the source sentence to extract features.
Args:
src_sentence (list): Not used
"""
feat = self.src_features[self.current_sen_id] + [1.0]
self.cur_r = max(EPS_R, np.dot(feat, self.r_weights));
p = np.dot(feat, self.p_weights)
p = 1.0 / (1.0 + math.exp(-p))
self.cur_p = max(utils.EPS_P, min(1.0 - utils.EPS_P, p))
self.n_consumed = 0
self.prev_eos_probs = []
if self.use_point_probs:
self.max_eos_prob = self._get_max_eos_prob()
def consume(self, word):
"""Increases the current history length
Args:
word (int): Not used
"""
self.n_consumed = self.n_consumed + 1
def get_state(self):
"""State consists of the number of consumed words, and the
accumulator for previous EOS probability estimates if we
don't use point estimates.
"""
return self.n_consumed,self.prev_eos_probs
def set_state(self, state):
"""Set the predictor state """
self.n_consumed,self.prev_eos_probs = state
def is_equal(self, state1, state2):
"""Returns true if the number of consumed words is the same """
n1,_ = state1
n2,_ = state2
return n1 == n2
class WordCountPredictor(Predictor):
"""This predictor adds the (negative) number of words as feature.
This means that this predictor encourages shorter hypotheses when
used with a positive weight.
"""
def __init__(self, word=-1,
nonterminal_penalty=False,
nonterminal_ids=None,
min_terminal_id=0,
max_terminal_id=30003,
negative_wc=True,
vocab_size=30003):
"""Creates a new word count predictor instance.
Args:
word (int): If this is non-negative we count only the
number of the specified word. If its
negative, count all words
nonterminal_penalty (bool): If true, apply penalty only to
tokens in a range (the range *outside*
min/max terminal id)
nonterminal_ids: file containing ids of nonterminal tokens
min_terminal_id: lower bound of tokens *not* to penalize,
if nonterminal_penalty selected
max_terminal_id: upper bound of tokens *not* to penalize,
if nonterminal_penalty selected
negative_wc: If true, the score of this predictor is the
negative word count.
vocab_size: upper bound of tokens, used to find nonterminal range
"""
super(WordCountPredictor, self).__init__()
val = 1.0
if negative_wc:
val = -1.0
if nonterminal_penalty:
if nonterminal_ids:
nts = load_external_ids(nonterminal_ids)
else:
min_nt_range = range(0, min_terminal_id)
max_nt_range = range(max_terminal_id + 1, vocab_size)
nts = list(min_nt_range) + list(max_nt_range)
self.posterior = {nt: val for nt in nts}
self.posterior[utils.EOS_ID] = 0.0
self.posterior[utils.UNK_ID] = 0.0
self.unk_prob = 0.0
elif word < 0:
self.posterior = {utils.EOS_ID : 0.0}
self.unk_prob = val
else:
self.posterior = {word : val}
self.unk_prob = 0.0
def get_unk_probability(self, posterior):
return self.unk_prob
def predict_next(self):
return self.posterior
def initialize(self, src_sentence):
"""Empty"""
pass
def consume(self, word):
"""Empty"""
pass
def get_state(self):
"""Returns true """
return True
def set_state(self, state):
"""Empty"""
pass
def is_equal(self, state1, state2):
"""Returns true """
return True
class WeightNonTerminalPredictor(Predictor):
"""This wrapper multiplies the weight of given tokens (those outside
the min/max terminal range) by a factor."""
def __init__(self, slave_predictor,
penalty_factor=1.0,
nonterminal_ids=None,
min_terminal_id=0,
max_terminal_id=30003,
vocab_size=30003):
"""Creates a new id-weighting wrapper for a predictor
Args:
slave_predictor: predictor to | |
"""
Authors: <NAME>, <NAME>
TUM, 2020
In order to guarantee transferability of models, Network models should follow the following conventions.
Classes should be called
Node
Edge
Network
in order to guarantee correct import in other modules.
"""
# -------------------------------------------------------------------------------------------------------------------- #
# standard distribution imports
# -----------------------------
import os
import logging
# additional module imports (> requirements)
# ------------------------------------------
import pandas as pd
import numpy as np
from pyproj import Transformer
# src imports
# -----------
from src.routing.NetworkBase import NetworkBase
from src.routing.routing_imports.Router import Router
# -------------------------------------------------------------------------------------------------------------------- #
# global variables
# ----------------
from src.misc.globals import *
LOG = logging.getLogger(__name__)
# import os
# import pandas as pd
# import imports.Router as Router
def read_node_line(columns):
return Node(int(columns["node_index"]), int(columns["is_stop_only"]), float(columns["pos_x"]), float(columns["pos_y"]))
class Node():
def __init__(self, node_index, is_stop_only, pos_x, pos_y, node_order=None):
self.node_index = node_index
self.is_stop_only = is_stop_only
self.pos_x = pos_x
self.pos_y = pos_y
#
self.edges_to = {} #node_obj -> edge
self.edges_from = {} #node_obj -> edge
#
self.travel_infos_from = {} #node_index -> (tt, dis)
self.travel_infos_to = {} #node_index -> (tt, dis)
#
# attributes set during path calculations
self.is_target_node = False # is set and reset in computeFromNodes
#attributes for forwards dijkstra
self.prev = None
self.settled = 1
self.cost_index = -1
self.cost = None
# attributes for backwards dijkstra (for bidirectional dijkstra)
self.next = None
self.settled_back = 1
self.cost_index_back = -1
self.cost_back = None
def __str__(self):
return str(self.node_index)
def must_stop(self):
return self.is_stop_only
def get_position(self):
return (self.pos_x, self.pos_y)
def get_next_node_edge_pairs(self, ch_flag = False):
"""
:return: list of (node, edge) tuples [references to objects] in forward direction
"""
return self.edges_to.items()
def get_prev_node_edge_pairs(self, ch_flag = False):
"""
:return: list of (node, edge) tuples [references to objects] in backward direction
"""
return self.edges_from.items()
def add_next_edge_to(self, other_node, edge):
#print("add next edge to: {} -> {}".format(self.node_index, other_node.node_index))
self.edges_to[other_node] = edge
self.travel_infos_to[other_node.node_index] = edge.get_tt_distance()
def add_prev_edge_from(self, other_node, edge):
self.edges_from[other_node] = edge
self.travel_infos_from[other_node.node_index] = edge.get_tt_distance()
def get_travel_infos_to(self, other_node_index):
return self.travel_infos_to[other_node_index]
def get_travel_infos_from(self, other_node_index):
return self.travel_infos_from[other_node_index]
class Edge():
def __init__(self, edge_index, distance, travel_time):
self.edge_index = edge_index
self.distance = distance
self.travel_time = travel_time
#
def __str__(self):
return "-".join(self.edge_index)
def set_tt(self, travel_time):
self.travel_time = travel_time
def get_tt(self):
"""
:return: (current) travel time on edge
"""
return self.travel_time
def get_distance(self):
"""
:return: distance of edge
"""
return self.distance
def get_tt_distance(self):
"""
:return: (travel time, distance) tuple
"""
return (self.travel_time, self.distance)
# Position: (start_node_id, end_node_id, relative_pos)
# -> (node_id, None, None) in case vehicle is on a node
# -> relative_pos in [0.0, 1.0]
# A Route is defined as list of node-indices (int)
# while all given start-and end-position nodes are included
class NetworkBasic(NetworkBase):
def __init__(self, network_name_dir, network_dynamics_file_name=None, scenario_time=None):
"""
The network will be initialized.
This network only uses basic routing algorithms (dijkstra and bidirectional dijkstra)
:param network_name_dir: name of the network_directory to be loaded
:param type: determining whether the base or a pre-processed network will be used
:param scenario_time: applying travel times for a certain scenario at a given time in the scenario
:param network_dynamics_file_name: file-name of the network dynamics file
:type network_dynamics_file_name: str
"""
self.nodes = [] #list of all nodes in network (index == node.node_index)
self.network_name_dir = network_name_dir
self.travel_time_file_folders = self._load_tt_folder_path(network_dynamics_file_name=network_dynamics_file_name)
self.loadNetwork(network_name_dir, network_dynamics_file_name=network_dynamics_file_name, scenario_time=scenario_time)
self.current_dijkstra_number = 1 #used in dijkstra-class
self.sim_time = 0 # TODO #
self.zones = None # TODO #
with open(os.sep.join([self.network_name_dir, "base","crs.info"]), "r") as f:
self.crs = f.read()
def loadNetwork(self, network_name_dir, network_dynamics_file_name=None, scenario_time=None):
nodes_f = os.path.join(network_name_dir, "base", "nodes.csv")
print(f"Loading nodes from {nodes_f} ...")
nodes_df = pd.read_csv(nodes_f)
self.nodes = nodes_df.apply(read_node_line, axis=1)
#
edges_f = os.path.join(network_name_dir, "base", "edges.csv")
print(f"Loading edges from {edges_f} ...")
with open(edges_f) as fhin:
header = fhin.readline()
for line in fhin:
lc = line.strip().split(",")
o_node = self.nodes[int(lc[0])]
d_node = self.nodes[int(lc[1])]
# for the table approach, int values are used (to avoid rounding mistakes!)
tmp_edge = Edge((o_node, d_node), float(lc[2]), float(lc[3]))
o_node.add_next_edge_to(d_node, tmp_edge)
d_node.add_prev_edge_from(o_node, tmp_edge)
print("... {} nodes loaded!".format(len(self.nodes)))
if scenario_time is not None:
latest_tt = None
if len(self.travel_time_file_folders.keys()) > 0:
tts = sorted(list(self.travel_time_file_folders.keys()))
for tt in tts:
if tt > scenario_time:
break
latest_tt = tt
self.load_tt_file(latest_tt)
def _load_tt_folder_path(self, network_dynamics_file_name=None):
""" this method searches in the network-folder for travel_times folder. the name of the folder is defined by the simulation time from which these travel times are valid
stores the corresponding time to trigger loading of new travel times ones the simulation time is reached.
"""
tt_folders = {}
if network_dynamics_file_name is None:
LOG.info("... no network dynamics file given -> read folder structure")
for f in os.listdir(self.network_name_dir):
time = None
try:
time = int(f)
except:
continue
tt_folders[time] = os.path.join(self.network_name_dir, f)
else:
LOG.info("... load network dynamics file: {}".format(os.path.join(self.network_name_dir, network_dynamics_file_name)))
nw_dynamics_df = pd.read_csv(os.path.join(self.network_name_dir, network_dynamics_file_name))
nw_dynamics_df.set_index("simulation_time", inplace=True)
for sim_time, tt_folder_name in nw_dynamics_df["travel_time_folder"].items():
tt_folders[int(sim_time)] = os.path.join(self.network_name_dir, str(tt_folder_name))
return tt_folders
def update_network(self, simulation_time, update_state = True):
"""This method can be called during simulations to update travel times (dynamic networks).
:param simulation_time: time of simulation
:type simulation_time: float
:return: new_tt_flag True, if new travel times found; False if not
:rtype: bool
"""
LOG.debug(f"update network {simulation_time}")
self.sim_time = simulation_time
if update_state:
if self.travel_time_file_folders.get(simulation_time, None) is not None:
self.load_tt_file(simulation_time)
return True
return False
def load_tt_file(self, scenario_time):
"""
loads new travel time files for scenario_time
"""
self._reset_internal_attributes_after_travel_time_update()
f = self.travel_time_file_folders[scenario_time]
tt_file = os.path.join(f, "edges_td_att.csv")
tmp_df = pd.read_csv(tt_file, index_col=[0,1])
for edge_index_tuple, new_tt in tmp_df["edge_tt"].iteritems():
self._set_edge_tt(edge_index_tuple[0], edge_index_tuple[1], new_tt)
def _set_edge_tt(self, o_node_index, d_node_index, new_travel_time):
o_node = self.nodes[o_node_index]
d_node = self.nodes[d_node_index]
edge_obj = o_node.edges_to[d_node]
edge_obj.set_tt(new_travel_time)
new_tt, dis = edge_obj.get_tt_distance()
o_node.travel_infos_to[d_node_index] = (new_tt, dis)
d_node.travel_infos_from[o_node_index] = (new_tt, dis)
def get_node_list(self):
"""
:return: list of node objects.
"""
return self.nodes
def get_number_network_nodes(self):
return len(self.nodes)
def get_must_stop_nodes(self):
""" returns a list of node-indices with all nodes with a stop_only attribute """
return [n.node_index for n in self.nodes if n.must_stop()]
def return_position_from_str(self, position_str):
a, b, c = position_str.split(";")
if b == "-1":
return (int(a), None, None)
else:
return (int(a), int(b), float(c))
def return_node_coordinates(self, node_index):
return self.nodes[node_index].get_position()
def return_position_coordinates(self, position_tuple):
"""Returns the spatial coordinates of a position.
:param position_tuple: (o_node, d_node, rel_pos) | (o_node, None, None)
:return: (x,y) for metric systems
"""
if position_tuple[1] is None:
return self.return_node_coordinates(position_tuple[0])
else:
c0 = np.array(self.return_node_coordinates(position_tuple[0]))
c1 = np.array(self.return_node_coordinates(position_tuple[1]))
c_rel = position_tuple[2] * c1 + (1 - position_tuple[2]) * c0
return c_rel[0], c_rel[1]
def return_network_bounding_box(self):
min_x = min([node.pos_x for node in self.nodes])
max_x = max([node.pos_x for node in self.nodes])
min_y = min([node.pos_y for node in self.nodes])
max_y = max([node.pos_y for node in self.nodes])
proj_transformer = Transformer.from_proj(self.crs, 'epsg:4326')
lats, lons = proj_transformer.transform([min_x, max_x], [min_y, max_y])
return list(zip(lons, lats))
def return_positions_lon_lat(self, position_tuple_list: list) -> list:
pos_list = [self.return_position_coordinates(pos) for pos in position_tuple_list]
x, y = list(zip(*pos_list))
proj_transformer = Transformer.from_proj(self.crs, 'epsg:4326')
lats, lons = proj_transformer.transform(x, y)
return list(zip(lons, lats))
def get_section_infos(self, start_node_index, end_node_index):
"""
:param start_node_index_index: index of start_node of section
:param end_node_index: index of end_node of section
:return: (travel time, distance); if no section between nodes (None, None)
"""
return self.nodes[start_node_index].get_travel_infos_to(end_node_index)
def return_route_infos(self, route, rel_start_edge_position, start_time):
"""
This method returns the information travel information along a route. The start position is given by a relative
value on the first edge [0,1], where 0 means that the vehicle is at the first node.
:param route: list of nodes
:param rel_start_edge_position: float [0,1] determining the start position
:param start_time: can be used as an offset in case the route is planned for a future time
:return: (arrival time, distance to travel)
"""
arrival_time = start_time
distance = 0
_, start_tt, start_dis = self.get_section_overhead( (route[0], route[1], rel_start_edge_position), from_start=False)
arrival_time += start_tt
distance += start_dis
if len(route) > 2:
for i in range(2, len(route)):
tt, dis = self.get_section_infos(route[i-1], route[i])
arrival_time += tt
distance += dis
return (arrival_time, distance)
def assign_route_to_network(self, route, start_time):
"""This method can be used for dynamic network models in which the travel times will be derived from the
number of vehicles/routes assigned to the network.
:param route: list of nodes
:param start_time: can be used as an offset in case the route is planned for a future time
:return:
TODO
"""
pass
def get_section_overhead(self, position, from_start=True, customized_section_cost_function=None):
"""This method computes the section overhead for a certain position.
:param position: (current_edge_origin_node_index, current_edge_destination_node_index, relative_position)
:param from_start: computes already traveled travel_time and distance,
if False: computes rest travel time (relative_position -> 1.0-relative_position)
:param customized_section_cost_function: customized routing objective function
| |
<filename>embiggen/pipelines/compute_node_embedding.py
"""Sub-module with methods to compute node-embedding with a one-liner."""
import inspect
import warnings
from typing import Dict, List, Tuple, Union
import pandas as pd
import tensorflow as tf
from cache_decorator import Cache
from ensmallen import Graph
from ..embedders import (Embedder, GraphCBOW, GraphGloVe, GraphSkipGram,
Siamese, SimplE, TransE, TransH, TransR)
from ..utils import has_gpus, has_nvidia_drivers, has_rocm_drivers
SUPPORTED_NODE_EMBEDDING_METHODS = {
"CBOW": GraphCBOW,
"GloVe": GraphGloVe,
"SkipGram": GraphSkipGram,
"Siamese": Siamese,
"TransE": TransE,
"SimplE": SimplE,
"TransH": TransH,
"TransR": TransR,
}
REQUIRE_ZIPFIAN = [
"CBOW",
"SkipGram"
]
RANDOM_WALK_BASED_MODELS = [
"CBOW",
"GloVe",
"SkipGram"
]
LINK_PREDICTION_BASED_MODELS = [
"Siamese",
"TransR",
"TransE",
"TransH",
"SimplE"
]
assert set(RANDOM_WALK_BASED_MODELS +
LINK_PREDICTION_BASED_MODELS) == set(SUPPORTED_NODE_EMBEDDING_METHODS)
def get_available_node_embedding_methods() -> List[str]:
"""Return list of supported node embedding methods."""
return list(SUPPORTED_NODE_EMBEDDING_METHODS.keys())
def get_node_embedding_method(node_embedding_method_name: str) -> Embedder:
"""Return node embedding method curresponding to given name."""
return SUPPORTED_NODE_EMBEDDING_METHODS[node_embedding_method_name]
def is_node_embedding_method_supported(node_embedding_method_name: str) -> bool:
"""Return boolean value representing if given node embedding method is supported.
Parameters
--------------------
node_embedding_method_name: str,
Name of the node embedding method.
Returns
--------------------
Whether the given node embedding method is supported.
"""
return node_embedding_method_name in get_available_node_embedding_methods()
def _train_model(
graph: Graph,
node_embedding_method_name: str,
fit_kwargs: Dict,
verbose: bool,
support_mirrored_strategy: bool,
**kwargs: Dict
) -> Tuple[Union[pd.DataFrame, Tuple[pd.DataFrame]], pd.DataFrame]:
"""Return embedding computed with required node embedding method.
Parameters
--------------------------
graph: Graph,
The graph to embed.
node_embedding_method_name: str,
The name of the node embedding method to use.
fit_kwargs: Dict,
Arguments to pass to the fit call.
verbose: bool = True,
Whether to show loading bars.
use_mirrored_strategy: bool = True,
Whether to use mirrored strategy.
**kwargs: Dict,
Arguments to pass to the node embedding method constructor.
Read the documentation of the selected method.
Returns
--------------------------
Tuple with node embedding and training history.
"""
# Creating the node embedding model
model = get_node_embedding_method(node_embedding_method_name)(
graph,
support_mirrored_strategy=support_mirrored_strategy,
**kwargs
)
# Fitting the node embedding model
history = model.fit(
verbose=verbose,
**fit_kwargs
)
# Extracting computed embedding
node_embedding = model.get_embedding_dataframe()
return node_embedding, history
@Cache(
cache_path=[
"node_embeddings/{node_embedding_method_name}/{graph_name}/{_hash}_embedding.pkl.gz",
"node_embeddings/{node_embedding_method_name}/{graph_name}/{_hash}_training_history.csv.xz",
],
args_to_ignore=["devices", "use_mirrored_strategy", "verbose"]
)
def _compute_node_embedding(
graph: Graph,
graph_name: str, # pylint: disable=unused-argument
node_embedding_method_name: str,
fit_kwargs: Dict,
verbose: bool = True,
use_mirrored_strategy: bool = True,
devices: Union[List[str], str] = None,
**kwargs: Dict
) -> Tuple[Union[pd.DataFrame, Tuple[pd.DataFrame]], pd.DataFrame]:
"""Return embedding computed with required node embedding method.
Specifically, this method also caches the embedding automatically.
Parameters
--------------------------
graph: Graph,
The graph to embed.
graph_name: str,
The name of the graph.
node_embedding_method_name: str,
The name of the node embedding method to use.
fit_kwargs: Dict,
Arguments to pass to the fit call.
verbose: bool = True,
Whether to show loading bars.
use_mirrored_strategy: bool = True,
Whether to use mirrored strategy.
devices: Union[List[str], str] = None,
The devices to use.
If None, all GPU devices available are used.
**kwargs: Dict,
Arguments to pass to the node embedding method constructor.
Read the documentation of the selected method.
Returns
--------------------------
Tuple with node embedding and training history.
"""
# Since the verbose kwarg may be provided also on the fit_kwargs
# we normalize the parameter to avoid collisions.
verbose = fit_kwargs.pop("verbose", verbose)
kwargs = dict(
graph=graph,
node_embedding_method_name=node_embedding_method_name,
fit_kwargs=fit_kwargs,
verbose=verbose,
support_mirrored_strategy=use_mirrored_strategy,
**kwargs
)
if use_mirrored_strategy:
strategy = tf.distribute.MirroredStrategy(devices=devices)
with strategy.scope():
return _train_model(**kwargs)
return _train_model(**kwargs)
def compute_node_embedding(
graph: Graph,
node_embedding_method_name: str,
use_mirrored_strategy: bool = True,
devices: Union[List[str], str] = None,
fit_kwargs: Dict = None,
verbose: Union[bool, int] = True,
automatically_drop_unsupported_parameters: bool = False,
automatically_enable_time_memory_tradeoffs: bool = True,
automatically_sort_by_decreasing_outbound_node_degree: bool = True,
**kwargs: Dict
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Return embedding computed using SkipGram on given graph.
Parameters
--------------------------
graph: Graph,
Graph to embed.
node_embedding_method_name: str,
The name of the node embedding method to use.
use_mirrored_strategy: bool = True,
Whether to use mirror strategy to distribute the
computation across multiple devices.
Note that this will be automatically disabled if the
set of devices detected is only composed of one,
since using MirroredStrategy adds a significant overhead
and may endup limiting the device usage.
devices: Union[List[str], str] = None,
The devices to use.
If None, all GPU devices available are used.
fit_kwargs: Dict = None,
Arguments to pass to the fit call.
verbose: bool = True,
Whether to show loading bars.
automatically_drop_unsupported_parameters: bool = False,
If required, we filter out the unsupported parameters.
This may be useful when running a suite of experiments with a set of
parameters and you do not want to bother in dropping the parameters
that are only supported in a subset of methods.
automatically_enable_time_memory_tradeoffs: bool = True,
Whether to activate the time memory tradeoffs automatically.
Often case, this is something you want enabled on your graph object.
Since, generally, it is a good idea to enable these while
computing a node embedding we enable these by default.
automatically_sort_by_decreasing_outbound_node_degree: bool = True,
Whether to automatically sort the nodes by the outbound node degree.
This is necessary in order to run SkipGram efficiently with the NCE loss.
It will ONLY be executed if the requested model is SkipGram.
**kwargs: Dict,
Arguments to pass to the node embedding method constructor.
Read the documentation of the selected method to learn
which methods are supported by the selected constructor.
Returns
--------------------------
Tuple with node embedding and training history.
"""
if not is_node_embedding_method_supported(node_embedding_method_name):
raise ValueError(
(
"The given node embedding method `{}` is not supported. "
"The supported node embedding methods are `{}`."
).format(
node_embedding_method_name,
get_available_node_embedding_methods()
)
)
# To avoid some nighmares we check availability of GPUs.
if not has_gpus():
# If there are no GPUs, mirrored strategy makes no sense.
if use_mirrored_strategy:
use_mirrored_strategy = False
warnings.warn(
"It does not make sense to use mirrored strategy "
"when GPUs are not available.\n"
"The parameter has been disabled."
)
# We check for drivers to try and give a more explainatory
# warning about the absence of GPUs.
if has_nvidia_drivers():
warnings.warn(
"It was not possible to detect GPUs but the system "
"has NVIDIA drivers installed.\n"
"It is very likely there is some mis-configuration "
"with your TensorFlow instance.\n"
"The model will train a LOT faster if you figure "
"out what may be the cause of this issue on your "
"system: sometimes a simple reboot will do a lot of good.\n"
"If you are currently on COLAB, remember to enable require "
"a GPU instance from the menu!"
)
elif has_rocm_drivers():
warnings.warn(
"It was not possible to detect GPUs but the system "
"has ROCM drivers installed.\n"
"It is very likely there is some mis-configuration "
"with your TensorFlow instance.\n"
"The model will train a LOT faster if you figure "
"out what may be the cause of this issue on your "
"system: sometimes a simple reboot will do a lot of good."
)
else:
warnings.warn(
"It was neither possible to detect GPUs nor GPU drivers "
"of any kind on your system (neither CUDA or ROCM).\n"
"The model will proceed with trainining, but it will be "
"significantly slower than what would be possible "
"with GPU acceleration."
)
# If the fit kwargs are not given we normalize them to an empty dictionary.
if fit_kwargs is None:
fit_kwargs = {}
# If the model requested is SkipGram and the given graph does not have sorted
# node IDs according to decreasing outbound node degrees, we create the new graph
# that has the node IDs sorted.
if automatically_sort_by_decreasing_outbound_node_degree and node_embedding_method_name in REQUIRE_ZIPFIAN and not graph.has_nodes_sorted_by_decreasing_outbound_node_degree():
graph = graph.sort_by_decreasing_outbound_node_degree()
# If required, we filter out the unsupported parameters.
# This may be useful when running a suite of experiments with a set of
# parameters and you do not want to bother in dropping the parameters
# that are only supported in a subset of methods.
if automatically_drop_unsupported_parameters and kwargs:
# Get the list of supported parameters
supported_parameter = inspect.signature(
get_node_embedding_method(node_embedding_method_name).__init__
).parameters
# Filter out the unsupported parameters
kwargs = {
key: value
for key, value in kwargs.items()
if key in supported_parameter
}
# If required we enable the time memory tradeoffs.
if automatically_enable_time_memory_tradeoffs:
if node_embedding_method_name in RANDOM_WALK_BASED_MODELS:
graph.enable(
vector_sources=False,
vector_destinations=True,
vector_cumulative_node_degrees=True
)
if node_embedding_method_name in LINK_PREDICTION_BASED_MODELS:
graph.enable(
vector_sources=True,
vector_destinations=True,
vector_cumulative_node_degrees=False
)
# If devices are | |
<reponame>joaopbicalho/CodingInPython
def get_cur_hedons():
global cur_hedons
return cur_hedons
def get_cur_health():
global cur_health
return cur_health
def offer_star(activity):
global cur_star
global star_counter
global time_since_curstar
global time_since_star
global star_break
global time_since_star1
global time_since_star2
global star_span
time_since_curstar = 0
star_counter += 1
time_since_star = time_since_star1 + time_since_star2
if time_since_star >= 120:
star_counter += -1
time_since_star1 = time_since_star2
time_since_star2 = 0
star_span = 1
if star_counter > 2:
cur_star = "none"
star_break = "activated"
elif activity == "running":
cur_star = "running"
elif activity == "textbooks":
cur_star = "textbooks"
elif time_since_curstar < 120:
if star_counter > 2:
cur_star = "none"
star_break = "activated"
elif activity == "running":
cur_star = "running"
star_span = star_counter
elif activity == "textbooks":
cur_star = "textbooks"
star_span = star_counter
def perform_activity(activity, duration):
global cur_health
global cur_hedons
global running_duration
global running_counter
global resting_duration
global user_state
global textbooks_duration
global textbooks_counter
global running_hed_counter
global star_time
global cur_star
global star_counter
global time_since_star
global time_since_curstar
global star_break
global time_since_star1
global time_since_star2
global star_span
global textbook_hed_counter
if activity == "running":
running_duration += duration
resting_duration = 0
textbook_hed_counter = 0
textbooks_duration = 0
textbooks_counter = 0
if user_state == "tired" and cur_star != "running":
cur_hedons += duration * (-2)
cur_star = "none"
time_since_curstar = "not zero"
if running_duration <= 180:
cur_health += duration*3
user_state = "tired"
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif running_duration >180:
running_counter += 1
if running_counter == 1:
cur_health += (running_duration - 180) + 540 - (running_duration - duration) * 3
user_state = "tired"
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
else:
cur_health += (duration)
user_state = "tired"
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif user_state == "not tired" and cur_star != "running":
running_hed_counter += 1
user_state = "tired"
cur_star = "none"
time_since_curstar = "not zero"
if running_duration <= 10:
cur_hedons += running_duration * 2
user_state = "tired"
cur_health += duration * 3
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif running_duration > 10 and running_hed_counter == 1:
cur_hedons += (running_duration - 10) * (-2) + 20
if running_duration <= 180:
cur_health += duration * 3
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif running_duration > 180 and running_counter == 1:
running_counter += 1
cur_health += (running_duration - 180) + 540 - (running_duration - duration) * 3
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
else:
cur_health += duration
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
else:
cur_hedons += duration * (-1)
user_state = "tired"
if running_duration <= 180:
cur_health += duration * 3
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif running_duration > 180 and running_counter == 1:
running_counter += 1
cur_health += (running_duration - 180) + 540
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif running_counter != 1:
cur_health += (running_duration - 180) + 540 - (running_duration - duration) * 3
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif cur_star == "running" and user_state == "not tired" and star_break != "activated" and time_since_curstar == 0:
user_state = "tired"
cur_star = "none"
time_since_curstar = "not zero"
if duration <= 10:
cur_hedons += 5 * duration
if running_duration <= 180:
cur_health += duration * 3
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif running_duration > 180 and running_counter == 1:
running_counter += 1
cur_health += (running_duration - 180) + 540 - (running_duration - duration) * 3
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
else:
cur_health += duration
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif duration > 10:
cur_hedons += (duration - 10) * (-2) + 50
if running_duration <= 180:
cur_health += duration * 3
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif running_duration > 180 and running_counter == 1:
running_counter += 1
cur_health += (running_duration - 180) + 540 - (running_duration - duration) * 3
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
else:
cur_health += duration
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif cur_star == "running" and user_state == "tired" and star_break != "activated" and time_since_curstar == 0:
user_state = "tired"
cur_star = "none"
time_since_curstar = "not zero"
if duration <= 10:
cur_hedons += duration
if running_duration <= 180:
cur_health += duration * 3
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif running_duration > 180 and running_counter == 1:
running_counter += 1
cur_health += (running_duration - 180) + 540
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
else:
cur_health += duration
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif duration > 10:
cur_hedons += (duration - 10) * (-2) + 10
if running_duration <= 180:
cur_health += duration * 3
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif running_duration > 180 and running_counter == 1:
running_counter += 1
cur_health += (running_duration - 180) + 540
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
else:
cur_health += duration
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif activity == "textbooks":
resting_duration = 0
cur_health = cur_health + 2 * duration
running_duration = 0
running_counter = 0
textbooks_counter += 1
textbooks_duration += duration
if user_state == "tired" and cur_star != "textbooks":
cur_star = "none"
cur_hedons += duration * (-2)
time_since_curstar = "not zero"
user_state = "tired"
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif user_state == "not tired" and cur_star == "textbooks" and star_break != "activated" and time_since_curstar == 0:
cur_star = "none"
time_since_curstar = "not zero"
user_state = "tired"
if duration <= 10:
cur_hedons += 4 * duration
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif duration <= 20:
cur_hedons += (duration - 10) + 40
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif duration > 20:
cur_hedons += ((duration - 20) * (-1)) + 50
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif user_state == "tired" and cur_star == "textbooks" and star_break != "activated" and time_since_curstar == 0:
cur_star = "none"
user_state = "tired"
time_since_curstar = "not zero"
if duration <= 10:
cur_hedons += duration
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif duration > 10:
cur_hedons += (duration - 10) * (-2) + 10
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif user_state == "not tired" and cur_star != "textbooks":
cur_star = "none"
user_state = "tired"
time_since_curstar = "not zero"
if duration <= 20:
cur_hedons += duration
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif duration > 20 and textbook_hed_counter == 0:
textbook_hed_counter += 1
cur_hedons += (duration - 20) * (-1) + 20
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif duration > 20 and textbook_hed_counter != 0:
textbook_hed_counter += 1
cur_hedons += (duration) * (-1)
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif activity == "resting":
running_duration = 0
textbook_hed_counter = 0
running_counter = 0
textbooks_duration = 0
textbooks_counter | |
memory if it is a valid point
if np.isnan(y0).any():
self.mem_ban.add(x0, y0)
else:
self.mem_med.add(x0, y0)
if self.verbose:
print(" y = %s" % np.array_str(y0))
return y0
def feasible_moves(self, x0, dx):
"""Starting from x0, all moves within constraints and not tabu."""
# Generate candidate moves
X = hj_move(x0, dx)
# Remove duplicate moves (can arise if an element of dx is zero)
X = np.unique(X, axis=0)
# Filter by input constraints
X = X[self.constraint(X)]
# Filter against short term memory
X = X[~self.mem_short.contains(X)]
# Filter against permanent ban list
# (we put points where CFD results indicate constraint violated here)
X = X[~self.mem_ban.contains(X)]
return X
def evaluate_moves(self, x0, dx):
"""From a given start point, evaluate permissible candidate moves."""
X = self.feasible_moves(x0, dx)
# Check which points we have seen before
log_seen = self.mem_long.contains(X)
X_seen = X[log_seen]
X_unseen = X[~log_seen]
# Re-use previous objectives from long-term mem if possible
Y_seen = self.mem_long.lookup(X_seen)
# Only go as far as evaluating unseen if there are actually points
if X_unseen.shape[0] > 0:
# Shuffle the unseen points to remove selection bias
np.random.shuffle(X_unseen)
# Limit the maximum parallel evaluations
if self.max_parallel:
# Evaluate in batches
isplit = range(
self.max_parallel, len(X_unseen), self.max_parallel
)
X_batch = np.split(X_unseen, isplit)
Y_batch = [self.objective(Xi) for Xi in X_batch]
# Join results
Y_unseen = np.concatenate(Y_batch)
else:
# Evaluate objective for unseen points
Y_unseen = self.objective(X_unseen)
# Increment function evaluation counter
self.fevals += len(X_unseen)
# Join the results together
X = np.vstack((X_seen, X_unseen))
Y = np.vstack((Y_seen, Y_unseen))
# If there are no unseen points
else:
X = X_seen
Y = Y_seen
return X, Y
def select_move(self, x0, y0, X, Y):
"""Choose next move given starting point and list of candidate moves."""
j = self.j_objective
try:
# Categorise the candidates for next move with respect to current
with np.errstate(invalid="ignore"):
b_dom = (Y[:, j] < y0[:, j]).all(axis=1)
b_non_dom = (Y[:, j] > y0[:, j]).all(axis=1)
b_equiv = ~np.logical_and(b_dom, b_non_dom)
except IndexError:
print("ERROR! in select_move")
print("Y=%s", str(Y))
print("y0=%s", str(y0))
print("shape Y", Y.shape)
print("shape y0", y0.shape)
quit()
# Convert to indices
i_dom = np.where(b_dom)[0]
i_non_dom = np.where(b_non_dom)[0]
i_equiv = np.where(b_equiv)[0]
# Choose the next point
if len(i_dom) > 0:
# If we have dominating points, randomly choose from them
np.random.shuffle(i_dom)
x1, y1 = X[i_dom[0]], Y[i_dom[0]]
elif len(i_equiv) > 0:
# Randomly choose from equivalent points
np.random.shuffle(i_equiv)
x1, y1 = X[i_equiv[0]], Y[i_equiv[0]]
elif len(i_non_dom) > 0:
# Randomly choose from non-dominating points
np.random.shuffle(i_non_dom)
x1, y1 = X[i_non_dom[0]], Y[i_non_dom[0]]
else:
raise Exception("No valid points to pick next move from")
# Keep in matrix form
x1 = np.atleast_2d(x1)
y1 = np.atleast_2d(y1)
return x1, y1
def pattern_move(self, x0, y0, x1, y1, dx):
"""If this move is in a good direction, increase move length."""
x1a = x0 + self.fac_pattern * (x1 - x0)
# # If we are running objectives in parallel, do not waste the spare cores
# if self.max_parallel:
# # Pick (n_parallel - 1) feasible moves from the pattern move point
# X1a = self.feasible_moves(x1a, dx)
# X1a_unseen = X1a[~self.mem_long.contains(X1a)]
# np.random.shuffle(X1a_unseen)
# X1a_unseen = X1a_unseen[: (self.max_parallel -1)]
# X = np.vstack(x1,X1a_unseen)
# Y = self.objective(X)
# else:
y1a = self.objective(x1a)
if (y1a < y1).all():
return x1a
else:
return x1
def update_med(self, X, Y):
"""Update the near-optimal points in medium term memory."""
if X.shape[0] == 0:
flag = False
else:
if len(self.j_objective) == 1:
flag = self.mem_med.update_best(X, Y)
else:
flag = self.mem_med.update_front(X, Y)
return flag
def search(self, x0, dx, callback=None):
"""Perform a search with given intial point and step size."""
# Evaluate the objective at given initial guess point, update memories
y0 = self.initial_guess(x0)
max_step = dx * self.fac_restart ** 2.0
# Main loop, until max evaluations reached or step size below tolerance
self.i_search = 0
while self.fevals < self.max_fevals and np.any(dx > self.tol):
# Save in case we want to resume later
self.dx = dx.reshape(-1).tolist()
self.x0 = x0.reshape(-1).tolist()
self.y0 = y0.reshape(-1).tolist()
# Record our progress in a memory file, if specified
if self.mem_file:
self.save_memories(self.mem_file)
# Plot stuff
if self.verbose:
self.plot_long("long.pdf")
self.plot_opt("opt.pdf")
# If we are given a callback, evaluate it now
if callback:
callback(self)
# Evaluate objective for permissible candidate moves
X, Y = self.evaluate_moves(x0, dx)
# If any objectives are NaN, add to permanent ban list
inan = np.isnan(Y).any(-1)
Xnan = X[inan]
self.mem_ban.add(Xnan)
# Delete NaN from results
X, Y = X[~inan], Y[~inan]
# Put new results into long-term memory
self.mem_long.add(X, Y)
# Put Pareto-equivalent results into medium-term memory
# Flag true if we sucessfully added a point
flag = self.update_med(X, Y)
if self.verbose and flag:
print(
"NEW OPT\n x = %s\n y = %s"
% tuple([np.array_str(xy) for xy in self.mem_med.get(0)])
)
# Reset counter if we added to medium memory, otherwise increment
self.i_search = 0 if flag else self.i_search + 1
# Choose next point based on local search counter
if self.i_search == self.i_restart:
if self.verbose:
print("RESTART")
# RESTART: reduce step sizes and randomly select from
# medium-term
dx *= self.fac_restart
if len(self.j_objective) == 1:
# Pick the current optimum if scalar objective
x1, y1 = self.mem_med.get(0)
else:
# Pick from sparse region of Pareto from if multi-objective
x1, y1 = self.mem_med.sample_sparse(self.x_regions)
self.i_search = 0
elif self.i_search in self.i_intensify or X.shape[0] == 0:
# INTENSIFY: Select a near-optimal point if the medium memory
# is populated
if self.mem_med.npts > 0:
if self.verbose:
print("INTENSIFY")
x1, y1 = self.mem_med.sample_random()
else:
# If nothing in the medium-term memory, we have not found
# any valid points yet, so increase step size and try again
if np.all(dx <= max_step):
if self.verbose:
print("INCREASE STEP")
dx /= self.fac_restart
x1, y1 = x0, y0
else:
print(
"Could not find a point satisfying constraints near initial guess, quitting."
)
elif self.i_search == self.i_diversify:
if self.verbose:
print("DIVERSIFY")
# DIVERSIFY: Generate a new point in sparse design region
x1 = self.mem_long.generate_sparse(self.x_regions)
y1 = self.objective(x1)
else:
if self.verbose:
print("i=%d, fevals=%d" % (self.i_search, self.fevals))
# Normally, choose the best candidate move
x1, y1 = self.select_move(x0, y0, X, Y)
# Check for a pattern move every i_pattern steps
if not self.i_pattern is None:
if np.mod(self.i_search, self.i_pattern):
x1 = self.pattern_move(x0, y0, x1, y1, dx)
if self.verbose:
print(
" x = %s\n y = %s"
% tuple([np.array_str(xy) for xy in (x1, y1)])
)
# Add chosen point to short-term list (tabu)
self.mem_short.add(x1)
# Update current point before next iteration
x0, y0 = x1, y1
# After the loop return current point
return x0, y0
def resume(self, fname):
self.load_memories(fname)
self.mem_file = fname
self.search(self.x0, self.dx)
def save_memories(self, fname):
"""Dump the memories to a json file."""
# Assemble a dict for each memory
d = {k: m.to_dict() for k, m in zip(self.MEM_KEYS, self.mem_all)}
for a in ["i_search", "x0", "y0", "dx"]:
d[a] = getattr(self, a)
# Write the file
with open(fname, "w") as f:
json.dump(d, f)
def load_memories(self, fname):
"""Populate memories from a json file."""
if self.verbose:
print("READ memories from %s" % fname)
# Load the file
with open(fname, "r") as f:
d = json.load(f)
# Populate the memories
for k, m in zip(self.MEM_KEYS, self.mem_all):
if self.verbose:
print(" %s: %d points" % (k, d[k]["npts"]))
m.from_dict(d[k])
if "i_search" in d:
self.i_search = d["i_search"]
self.x0 = np.atleast_2d(d["x0"])
self.y0 = np.atleast_2d(d["y0"])
self.dx = np.atleast_2d(d["dx"])
def plot_long(self, fname):
"""Generate a graph of long-term memory."""
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
Yl = np.flip(self.mem_long.Y, axis=0) * 100.0
pts = np.arange(len(Yl))
Ym = self.mem_med.Y
_, ind = find_rows(Ym, Yl)
ax.plot(pts, Yl[:, 0], "k-")
ax.plot(pts[ind], Yl[ind, 0], "r*")
ax.set_ylabel("Lost Efficiency, $\Delta \eta/\%$")
ax.set_xlabel("Design Evaluations")
plt.tight_layout()
plt.savefig(fname)
plt.close()
def plot_opt(self, fname):
"""Generate a graph of optimisation progress."""
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
Yl = np.flip(self.mem_long.Y, axis=0)
pts = np.arange(len(Yl))
Ymin = np.empty_like(pts)
for i, p in enumerate(pts):
if np.all(np.isnan(Yl[: (p + 1), 0])):
Ymin[i] = np.nan
else:
Ymin[i] = np.nanmin(Yl[: (p + 1), 0]) * 100.0
ax.plot(pts, Ymin - Ymin[-1], "k-")
ax.set_ylabel("Optimum Lost Efficiency Error, $\Delta \eta/\%$")
ax.set_xlabel("Design Evaluations")
plt.tight_layout()
plt.savefig(fname)
| |
__version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyLivePlayAuthKeyRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyLivePlayAuthKey(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeLiveTranscodeTemplate(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeLiveTranscodeTemplateRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeLiveTranscodeTemplate(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeScreenShotSheetNumList(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeScreenShotSheetNumListRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeScreenShotSheetNumList(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doUnBindLiveDomainCert(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.UnBindLiveDomainCertRequest()
model.from_json_string(json.dumps(args))
rsp = client.UnBindLiveDomainCert(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteRecordTask(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteRecordTaskRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteRecordTask(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeLiveTranscodeDetailInfo(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeLiveTranscodeDetailInfoRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeLiveTranscodeDetailInfo(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeLogDownloadList(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeLogDownloadListRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeLogDownloadList(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeLiveRecordRules(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeLiveRecordRulesRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeLiveRecordRules(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeLiveDelayInfoList(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeLiveDelayInfoListRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeLiveDelayInfoList(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeLiveStreamPublishedList(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeLiveStreamPublishedListRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeLiveStreamPublishedList(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeLiveDomain(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeLiveDomainRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeLiveDomain(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateLiveCallbackRule(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateLiveCallbackRuleRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateLiveCallbackRule(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doBindLiveDomainCert(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.BindLiveDomainCertRequest()
model.from_json_string(json.dumps(args))
rsp = client.BindLiveDomainCert(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeLiveCallbackRules(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeLiveCallbackRulesRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeLiveCallbackRules(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribePlayErrorCodeDetailInfoList(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribePlayErrorCodeDetailInfoListRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribePlayErrorCodeDetailInfoList(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteLiveRecordRule(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteLiveRecordRuleRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteLiveRecordRule(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribePushBandwidthAndFluxList(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribePushBandwidthAndFluxListRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribePushBandwidthAndFluxList(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doForbidLiveStream(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ForbidLiveStreamRequest()
model.from_json_string(json.dumps(args))
rsp = client.ForbidLiveStream(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doAddLiveDomain(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
| |
if jBase.createDatabase(val) == 0:
bd = TS.SimboloBase(val, None, nodo.mode)
tablaSimbolos.put(val, bd)
consola += "Base de datos " + val + " creada. \n"
else:
consola += "Error al crear la base de datos \n"
elif nodo.owner != False and nodo.mode == False:
if jBase.createDatabase(val) == 0:
bd = TS.SimboloBase(val, nodo.owner, None)
tablaSimbolos.put(val, bd)
consola += "Base de datos " + val + " creada. \n"
else:
consola += "Error al crear la base de datos \n"
elif nodo.owner != False and nodo.mode != False:
if jBase.createDatabase(val) == 0:
bd = TS.SimboloBase(val, nodo.owner, nodo.mode)
tablaSimbolos.put(val, bd)
consola += "Base de datos " + val + " creada. \n"
else:
consola += "Error al crear la base de datos \n"
elif nodo.replace == False and nodo.exists != False:
if nodo.owner == False and nodo.mode == False:
if jBase.createDatabase(val) == 0:
bd = TS.SimboloBase(val, None, None)
tablaSimbolos.put(val, bd)
consola += "Base de datos " + val + " creada. \n"
elif jBase.createDatabase(val) == 2:
consola += "La base de datos " + val + " ya existe. \n"
else:
consola += "Error al crear la base de datos \n"
elif nodo.owner == False and nodo.mode != False:
if jBase.createDatabase(val) == 0:
bd = TS.SimboloBase(val, None, nodo.mode)
tablaSimbolos.put(val, bd)
consola += "Base de datos " + val + " creada. \n"
elif jBase.createDatabase(val) == 2:
consola += "La base de datos " + val + " ya existe. \n"
else:
consola += "Error al crear la base de datos \n"
elif nodo.owner != False and nodo.mode == False:
if jBase.createDatabase(val) == 0:
bd = TS.SimboloBase(val, nodo.owner, None)
tablaSimbolos.put(val, bd)
consola += "Base de datos " + val + " creada. \n"
elif jBase.createDatabase(val) == 2:
consola += "La base de datos " + val + " ya existe. \n"
else:
consola += "Error al crear la base de datos \n"
elif nodo.owner != False and nodo.mode != False:
if jBase.createDatabase(val) == 0:
bd = TS.SimboloBase(val, nodo.owner, nodo.mode)
tablaSimbolos.put(val, bd)
consola += "Base de datos " + val + " creada. \n"
elif jBase.createDatabase(val) == 2:
consola += "La base de datos " + val + " ya existe. \n"
else:
consola += "Error al crear la base de datos \n"
def crearTabla(nodo, tablaSimbolos):
val = nodo.id
global useActual
global consola
primarykeys = []
if nodo.herencia == False:
contador = 0
nueva = TS.SimboloTabla(val, None)
for col in nodo.columnas:
pk = False
default_ = None
check = None
null = True
unique = False
if isinstance(col, SColumna):
if col.opcionales != None:
for opc in col.opcionales:
if isinstance(opc, SOpcionales):
if opc.tipo == TipoOpcionales.PRIMARYKEY:
pk = True
elif opc.tipo == TipoOpcionales.DEFAULT:
default_ = opc.valor
elif opc.tipo == TipoOpcionales.CHECK:
if opc.id == None:
check = {"id": col.id + "_check",
"condicion": opc.valor}
listaConstraint.append(
TS.Constraints(useActual, val, col.id + "_check", col.id, "check"))
else:
check = {"id": opc.id,
"condicion": opc.valor}
listaConstraint.append(
TS.Constraints(useActual, val, opc.id, col.id, "check"))
elif opc.tipo == TipoOpcionales.NULL:
null = True
elif opc.tipo == TipoOpcionales.NOTNULL:
null = False
elif opc.tipo == TipoOpcionales.UNIQUE:
if opc.id == None:
unique = col.id + "_unique"
listaConstraint.append(
TS.Constraints(useActual, val, col.id + "_unique", col.id, "unique"))
else:
unique = opc.id
listaConstraint.append(
TS.Constraints(useActual, val, opc.id, col.id, "unique"))
colnueva = TS.SimboloColumna(col.id, col.tipo, pk, None, unique, default_, null, check,
len(nueva.columnas))
if pk:
primarykeys.append(colnueva.index)
nueva.crearColumna(col.id, colnueva)
if colnueva == None:
listaSemanticos.append(
Error.ErrorS("Error Semantico", "Ya existe una columna con el nombre " + col.id))
else:
auxc = TS.SimboloColumna(col.id, col.tipo, pk, None, unique, default_, null, check,
len(nueva.columnas))
nueva.crearColumna(col.id, auxc)
contador += 1
elif isinstance(col, SColumnaUnique):
for id in col.id:
if nueva.modificarUnique(id.valor, True, id.valor + "_unique") == None:
listaSemanticos.append(
Error.ErrorS("Error Semantico", "No se encontró la columna con id " + id.valor))
else:
listaConstraint.append(TS.Constraints(
useActual, val, id.valor + "_unique", id.valor, "unique"))
elif isinstance(col, SColumnaCheck):
print("Entró al constraint")
condicion = col.condicion
opIzq = condicion.opIzq
idcol = opIzq.valor
result = False
if col.id == None:
result = nueva.modificarCheck(
idcol, col.condicion, idcol + "_check")
listaConstraint.append(TS.Constraints(
useActual, val, idcol + "_check", idcol, "check"))
else:
result = nueva.modificarCheck(idcol, condicion, col.id)
listaConstraint.append(TS.Constraints(
useActual, val, col.id, idcol, "check"))
if result != True:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "No se encontró la columna con id " + idcol))
elif isinstance(col, SColumnaFk):
for i in range(len(col.idlocal)):
idlocal = col.idlocal[i].valor
idfk = col.idfk[i].valor
columnafk = tablaSimbolos.getColumna(
useActual, col.id, idfk)
columnalocal = nueva.getColumna(idlocal)
if columnafk != None and columnalocal != None:
if columnafk.tipo.tipo == columnalocal.tipo.tipo:
nueva.modificarFk(idlocal, col.id, idfk)
if col.idconstraint != None:
listaConstraint.append(
TS.Constraints(useActual, val, col.idconstraint, columnalocal, "FK"))
listaFK.append(TS.llaveForanea(
useActual, val, col.id, idlocal, idfk))
else:
listaSemanticos.append(Error.ErrorS("Error Semantico",
"La columna %s y la columna %s no tienen el mismo tipo" % (
idlocal, idfk)))
else:
listaSemanticos.append(
Error.ErrorS("Error Semantico", "No se encontró la columna"))
elif isinstance(col, SColumnaPk):
for id in col.id:
if nueva.modificarPk(id.valor) == None:
listaSemanticos.append(
Error.ErrorS("Error Semantico", "No se encontró la columna " + id.valor))
else:
primarykeys.append(nueva.getColumna(id.valor).index)
base = tablaSimbolos.get(useActual)
base.crearTabla(val, nueva)
tt = jBase.createTable(useActual, nodo.id, contador)
if len(primarykeys) > 0:
jBase.alterAddPK(useActual, val, primarykeys)
if tt == 0:
consola += "La tabla " + nodo.id + " se creó con éxito. \n"
elif tt == 1:
consola += "Error en la operación al crear la tabla " + nodo.id + "\n"
elif tt == 2:
consola += "La base de datos " + useActual + " no existe. \n"
else:
consola += "La tabla " + nodo.id + " ya existe. \n"
def AlterDatabase(nodo, tablaSimbolos):
global consola
if nodo.rename:
b = jBase.alterDatabase(nodo.id.valor, nodo.idnuevo)
if b == 0:
base = tablaSimbolos.renameBase(nodo.id.valor, nodo.idnuevo)
if base:
for fk in listaFK:
if fk.idbase == nodo.id.valor:
fk.idbase = nodo.idnuevo
for cons in listaConstraint:
if cons.idbase == nodo.id.valor:
cons.idbase = nodo.idnuevo
consola += "La base se renombró con éxito " + nodo.idnuevo + " \n"
else:
consola += "Error no se pudo renombrar la base " + \
nodo.id.valor + " en la tabla de simbolos \n"
elif b == 2:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "La base de datos " + nodo.id.valor + " no existe"))
elif b == 3:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "La base de datos ya existe " + nodo.idnuevo))
elif b == 1:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "Error en la operacion."))
def AlterAddColumn(nodo, tablaSimbolos):
global consola
global useActual
base = tablaSimbolos.get(useActual)
tabla = base.getTabla(nodo.idtabla)
for col in nodo.listaColumnas:
auxcol = TS.SimboloColumna(
col.idcolumna, col.tipo, False, None, None, None, True, None, len(tabla.columnas))
if tabla.crearColumna(col.idcolumna, auxcol):
b = jBase.alterAddColumn(useActual, nodo.idtabla, col.idcolumna)
if b == 0:
consola += "La columna " + col.idcolumna + \
" se agregó a la tabla " + nodo.idtabla + " \n"
elif b == 1:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "Error en la operacion."))
elif b == 2:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "Error la base " + useActual + "no existe"))
elif b == 3:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "Error la tabla " + nodo.idtabla + "no existe"))
else:
consola += "Error al crear la columna " + col.idcolumna + " \n"
def AlterRenameColumn(nodo, tablaSimbolos):
base = tablaSimbolos.get(useActual)
tabla = base.getTabla(nodo.idtabla)
global consola
op = tabla.renameColumna(nodo.idcolumna, nodo.idnuevo)
if op == 0:
for fk in listaFK:
if fk.idcfk == nodo.idcolumna:
fk.idcfk = nodo.idnuevo
tablaRF = base.getTabla(fk.idtlocal)
columnaRF = tablaRF.getColumna(fk.idclocal)
columnaRF.foreign_key["columna"] = nodo.idnuevo
elif fk.idclocal == nodo.idcolumna:
fk.idclocal = nodo.idnuevo
for cons in listaConstraint:
if cons.idcol == nodo.idcolumna:
cons.idcol = nodo.idnuevo
consola += "Se cambio el nombre de la columna " + \
nodo.idcolumna + " a " + nodo.idnuevo + " con exito \n"
elif op == 1:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "La columna con nombre " + nodo.idnuevo + " ya existe"))
elif op == 2:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "La columna con nombre " + nodo.idactual + " no existe"))
def AlterRenameTable(nodo, tablaSimbolos):
global useActual
global consola
base = tablaSimbolos.get(useActual)
op = base.renameTable(nodo.idactual, nodo.idnuevo)
if op == 0:
lib = jBase.alterTable(useActual, nodo.idactual, nodo.idnuevo)
if lib == 0:
for fk in listaFK:
if fk.idtfk == nodo.idactual:
fk.idtfk = nodo.idnuevo
tablaRF = base.getTabla(fk.idtlocal)
columnaRF = tablaRF.getColumna(fk.idclocal)
columnaRF.foreign_key["tabla"] = nodo.idnuevo
elif fk.idtlocal == nodo.idactual:
fk.idtlocal = nodo.idnuevo
for cons in listaConstraint:
if cons.idtabla == nodo.idactual:
| |
<reponame>FiskFan1999/ergochat_irctest
#
# (C) Copyright 2011 <NAME> <<EMAIL>>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""SCRAM authentication mechanisms for PyXMPP SASL implementation.
Normative reference:
- :RFC:`5802`
"""
from __future__ import absolute_import, division, unicode_literals
__docformat__ = "restructuredtext en"
import sys
import re
import logging
import hashlib
import hmac
from binascii import a2b_base64
from base64 import standard_b64encode
from .core import default_nonce_factory
from .exceptions import BadChallengeException, \
ExtraChallengeException, ServerScramError, BadSuccessException, \
NotAuthorizedException
logger = logging.getLogger("pyxmpp2_scram")
HASH_FACTORIES = {
"SHA-1": hashlib.sha1, # pylint: disable=E1101
"SHA-224": hashlib.sha224, # pylint: disable=E1101
"SHA-256": hashlib.sha256, # pylint: disable=E1101
"SHA-384": hashlib.sha384, # pylint: disable=E1101
"SHA-512": hashlib.sha512, # pylint: disable=E1101
"MD-5": hashlib.md5, # pylint: disable=E1101
}
VALUE_CHARS_RE = re.compile(br"^[\x21-\x2B\x2D-\x7E]+$")
_QUOTED_VALUE_RE = br"(?:[\x21-\x2B\x2D-\x7E]|=2C|=3D)+"
CLIENT_FIRST_MESSAGE_RE = re.compile(
br"^(?P<gs2_header>(?:y|n|p=(?P<cb_name>[a-zA-z0-9.-]+)),"
br"(?:a=(?P<authzid>" + _QUOTED_VALUE_RE + br"))?,)"
br"(?P<client_first_bare>(?P<mext>m=[^\000=]+,)?"
br"n=(?P<username>" + _QUOTED_VALUE_RE + br"),"
br"r=(?P<nonce>[\x21-\x2B\x2D-\x7E]+)"
br"(?:,.*)?)$"
)
SERVER_FIRST_MESSAGE_RE = re.compile(
br"^(?P<mext>m=[^\000=]+,)?"
br"r=(?P<nonce>[\x21-\x2B\x2D-\x7E]+),"
br"s=(?P<salt>[a-zA-Z0-9/+=]+),"
br"i=(?P<iteration_count>\d+)"
br"(?:,.*)?$"
)
CLIENT_FINAL_MESSAGE_RE = re.compile(
br"(?P<without_proof>c=(?P<cb>[a-zA-Z0-9/+=]+),"
br"(?:r=(?P<nonce>[\x21-\x2B\x2D-\x7E]+))"
br"(?:,.*)?)"
br",p=(?P<proof>[a-zA-Z0-9/+=]+)$"
)
SERVER_FINAL_MESSAGE_RE = re.compile(
br"^(?:e=(?P<error>[^,]+)|v=(?P<verifier>[a-zA-Z0-9/+=]+)(?:,.*)?)$")
class SCRAMOperations(object):
"""Functions used during SCRAM authentication and defined in the RFC.
"""
def __init__(self, hash_function_name):
self.hash_function_name = hash_function_name
self.hash_factory = HASH_FACTORIES[hash_function_name]
self.digest_size = self.hash_factory().digest_size
@staticmethod
def Normalize(str_):
"""The Normalize(str) function.
This one also accepts Unicode string input (in the RFC only UTF-8
strings are used).
"""
# pylint: disable=C0103
if isinstance(str_, bytes):
str_ = str_.decode("utf-8")
return str_.encode("utf-8")
def HMAC(self, key, str_):
"""The HMAC(key, str) function."""
# pylint: disable=C0103
return hmac.new(key, str_, self.hash_factory).digest()
def H(self, str_):
"""The H(str) function."""
# pylint: disable=C0103
return self.hash_factory(str_).digest()
if sys.version_info.major >= 3:
@staticmethod
# pylint: disable=C0103
def XOR(str1, str2):
"""The XOR operator for two byte strings."""
return bytes(a ^ b for a, b in zip(str1, str2))
else:
@staticmethod
# pylint: disable=C0103
def XOR(str1, str2):
"""The XOR operator for two byte strings."""
return "".join(chr(ord(a) ^ ord(b)) for a, b in zip(str1, str2))
def Hi(self, str_, salt, i):
"""The Hi(str, salt, i) function."""
# pylint: disable=C0103
Uj = self.HMAC(str_, salt + b"\000\000\000\001") # U1
result = Uj
for _ in range(2, i + 1):
Uj = self.HMAC(str_, Uj) # Uj = HMAC(str, Uj-1)
result = self.XOR(result, Uj) # ... XOR Uj-1 XOR Uj
return result
@staticmethod
def escape(data):
"""Escape the ',' and '=' characters for 'a=' and 'n=' attributes.
Replaces '=' with '=3D' and ',' with '=2C'.
:Parameters:
- `data`: string to escape
:Types:
- `data`: `bytes`
"""
return data.replace(b'=', b'=3D').replace(b',', b'=2C')
@staticmethod
def unescape(data):
"""Unescape the ',' and '=' characters for 'a=' and 'n=' attributes.
Reverse of `escape`.
:Parameters:
- `data`: string to unescape
:Types:
- `data`: `bytes`
"""
return data.replace(b'=2C', b',').replace(b'=3D', b'=')
class SCRAMClientAuthenticator(SCRAMOperations):
"""Provides SCRAM SASL authentication for a client.
:Ivariables:
- `password`: current authentication password
- `pformat`: current authentication password format
- `realm`: current authentication realm
"""
# pylint: disable-msg=R0902
def __init__(self, hash_name, channel_binding):
"""Initialize a `SCRAMClientAuthenticator` object.
:Parameters:
- `hash_function_name`: hash function name, e.g. ``"SHA-1"``
- `channel_binding`: `True` to enable channel binding
:Types:
- `hash_function_name`: `unicode`
- `channel_binding`: `bool`
"""
SCRAMOperations.__init__(self, hash_name)
self.name = "SCRAM-{0}".format(hash_name)
if channel_binding:
self.name += "-PLUS"
self.channel_binding = channel_binding
self.username = None
self.password = <PASSWORD>
self.authzid = None
self._c_nonce = None
self._server_first_message = False
self._client_first_message_bare = False
self._gs2_header = None
self._finished = False
self._auth_message = None
self._salted_password = <PASSWORD>
self._cb_data = None
@classmethod
def are_properties_sufficient(cls, properties):
return "username" in properties and "password" in properties
def start(self, properties):
self.username = properties["username"]
self.password = properties["password"]
self.authzid = properties.get("authzid", "")
c_nonce = properties.get("nonce_factory", default_nonce_factory)()
if not VALUE_CHARS_RE.match(c_nonce):
c_nonce = standard_b64encode(c_nonce)
self._c_nonce = c_nonce
if self.channel_binding:
cb_data = properties.get("channel-binding")
if not cb_data:
raise ValueError("No channel binding data provided")
if "tls-unique" in cb_data:
cb_type = "tls-unique"
elif "tls-server-end-point" in cb_data:
cb_type = "tls-server-end-point"
elif cb_data:
cb_type = cb_data.keys()[0]
self._cb_data = cb_data[cb_type]
cb_flag = b"p=" + cb_type.encode("utf-8")
else:
plus_name = self.name + "-PLUS"
if plus_name in properties.get("enabled_mechanisms", []):
# -PLUS is enabled (supported) on our side,
# but was not selected - that means it was not included
# in the server features
cb_flag = b"y"
else:
cb_flag = b"n"
if self.authzid:
authzid = b"a=" + self.escape(self.authzid.encode("utf-8"))
else:
authzid = b""
gs2_header = cb_flag + b"," + authzid + b","
self._gs2_header = gs2_header
nonce = b"r=" + c_nonce
client_first_message_bare = (b"n=" +
self.escape(self.username.encode("utf-8")) + b"," + nonce)
self._client_first_message_bare = client_first_message_bare
client_first_message = gs2_header + client_first_message_bare
return client_first_message
def challenge(self, challenge):
"""Process a challenge and return the response.
:Parameters:
- `challenge`: the challenge from server.
:Types:
- `challenge`: `bytes`
:return: the response
:returntype: bytes
:raises: `BadChallengeException`
"""
# pylint: disable=R0911
if not challenge:
raise BadChallengeException('Empty challenge')
if self._server_first_message:
return self._final_challenge(challenge)
match = SERVER_FIRST_MESSAGE_RE.match(challenge)
if not match:
raise BadChallengeException("Bad challenge syntax: {0!r}".format(challenge))
self._server_first_message = challenge
mext = match.group("mext")
if mext:
raise BadChallengeException("Unsupported extension received: {0!r}".format(mext))
nonce = match.group("nonce")
if not nonce.startswith(self._c_nonce):
raise BadChallengeException("Nonce does not start with our nonce")
salt = match.group("salt")
try:
salt = a2b_base64(salt)
except ValueError:
raise BadChallengeException("Bad base64 encoding for salt: {0!r}".format(salt))
iteration_count = match.group("iteration_count")
try:
iteration_count = int(iteration_count)
except ValueError:
raise BadChallengeException("Bad iteration_count: {0!r}".format(iteration_count))
return self._make_response(nonce, salt, iteration_count)
def _make_response(self, nonce, salt, iteration_count):
"""Make a response for the first challenge from the server.
:return: the response
:returntype: bytes
"""
self._salted_password = self.Hi(self.Normalize(self.password), salt,
iteration_count)
self.password = None # not needed any more
if self.channel_binding:
channel_binding = b"c=" + standard_b64encode(self._gs2_header +
self._cb_data)
else:
channel_binding = b"c=" + standard_b64encode(self._gs2_header)
# pylint: disable=C0103
client_final_message_without_proof = (channel_binding + b",r=" + nonce)
client_key = self.HMAC(self._salted_password, b"Client Key")
stored_key = self.H(client_key)
auth_message = ( self._client_first_message_bare + b"," +
self._server_first_message + b"," +
client_final_message_without_proof )
self._auth_message = auth_message
client_signature = self.HMAC(stored_key, auth_message)
client_proof = self.XOR(client_key, client_signature)
proof = b"p=" + standard_b64encode(client_proof)
client_final_message = (client_final_message_without_proof + b"," +
proof)
return client_final_message
def _final_challenge(self, challenge):
"""Process the second challenge from the server and return the
response.
:Parameters:
- `challenge`: the challenge from server.
:Types:
- `challenge`: `bytes`
:raises: `ExtraChallengeException`, `BadChallengeException`, `ServerScramError`, or `BadSuccessException`
"""
if self._finished:
return ExtraChallengeException()
match = SERVER_FINAL_MESSAGE_RE.match(challenge)
if not match:
raise BadChallengeException("Bad final message syntax: {0!r}".format(challenge))
error = match.group("error")
if error:
raise ServerScramError("{0!r}".format(error))
verifier = match.group("verifier")
if not verifier:
raise BadSuccessException("No verifier value in the final message")
server_key = self.HMAC(self._salted_password, b"Server Key")
server_signature = self.HMAC(server_key, self._auth_message)
if server_signature != a2b_base64(verifier):
raise BadSuccessException("Server verifier does not match")
self._finished = True
def finish(self, data):
"""Process success indicator from the server.
Process any addiitional data passed with the success.
Fail if the server was not authenticated.
:Parameters:
- `data`: an optional additional data with success.
:Types:
- `data`: `bytes`
:return: username and authzid
:returntype: `dict`
:raises: `BadSuccessException`"""
if not self._server_first_message:
raise BadSuccessException("Got success too early")
if self._finished:
return {"username": self.username, "authzid": self.authzid}
else:
self._final_challenge(data)
if self._finished:
return {"username": self.username,
"authzid": self.authzid}
else:
raise BadSuccessException("Something went wrong when processing additional"
" data with success?")
class SCRAMServerAuthenticator(SCRAMOperations):
"""Provides SCRAM SASL authentication for a server.
"""
def __init__(self, hash_name, channel_binding, password_database):
"""Initialize a `SCRAMClientAuthenticator` object.
:Parameters:
- `hash_function_name`: hash function name, e.g. ``"SHA-1"``
- `channel_binding`: `True` to enable channel binding
:Types:
- `hash_function_name`: `unicode`
- `channel_binding`: `bool`
"""
SCRAMOperations.__init__(self, hash_name)
self.name = "SCRAM-{0}".format(hash_name)
if channel_binding:
self.name += "-PLUS"
self.channel_binding = channel_binding
self.properties = None
self.out_properties = None
self.password_database = password_database
self._client_first_message_bare = None
self._stored_key = None
self._server_key = None
def start(self, properties, initial_response):
self.properties = properties
self._client_first_message_bare = None
self.out_properties = {}
if not initial_response:
return b""
return self.response(initial_response)
def response(self, response):
if self._client_first_message_bare:
logger.debug("Client final message: {0!r}".format(response))
return self._handle_final_response(response)
else:
logger.debug("Client first message: {0!r}".format(response))
return self._handle_first_response(response)
def _handle_first_response(self, response):
match = CLIENT_FIRST_MESSAGE_RE.match(response)
if not match:
raise NotAuthorizedException("Bad response syntax: {0!r}".format(response))
mext = match.group("mext")
if mext:
raise NotAuthorizedException("Unsupported extension received: {0!r}".format(mext))
gs2_header | |
<filename>gpsTime.py<gh_stars>0
import numpy as np
from math import modf
import datetime as dt
import calendar
def cal2jd(yr,mn,dy) :
"""
CAL2JD Converts calendar date to Julian date using algorithm
from "Practical Ephemeris Calculations" by <NAME>
(Springer-Verlag, 1989). Uses astronomical year for B.C. dates
(2 BC = -1 yr).
Input:
yr : YYYY (int)
mn : MM 01 to 12 (int)
day : DD 01 to 31 (int)
Output:
jd : julian date (float)
"""
if mn > 2:
y = yr
m = mn
else:
y = yr - 1
m = mn + 12
date1=4.5+31.*(10.+12.*1582.) # Last day of Julian calendar (1582.10.04 Noon)
date2=15.5+31.*(10.+12.*1582.) # First day of Gregorian calendar (1582.10.15 Noon)
date=dy+31.*(mn+12.*yr)
if date <= date1:
b = -2
elif date >= date2 :
b = np.fix(y/400.) - np.fix(y/100.)
else:
#warning('Dates between October 5 & 15, 1582 do not exist');
return
if y > 0:
jd = np.fix(365.25*y) + np.fix(30.6001*(m+1)) + b + 1720996.5 + dy
else:
jd = np.fix(365.25*y-0.75) + np.fix(30.6001*(m+1)) + b + 1720996.5 + dy
return jd
def yyyydoy2jd(year,doy,hh=0,mm=0,ss=0.0):
"""
yyyydoy2jd Take a year, day-of-year, etc and convert it into a julian day
Usage: jd = yyyydoy2jd(year,doy,hh,mm,ss)
Input: year - 4 digit integer
doy - 3 digit, or less integer, (1 <= doy <= 366)
hh - 2 digit, or less int, (0 <= hh < 24) (not required)
mm - 2 digit, or less int,(0 <= ss < 60) (not required)
ss - float (not required)
Output: 'jd' (float)
"""
#
# need to split seconds into two components
# sec => 2 digit, or less int, (0 <= ss < 60)
# ms => int 0 <= ms < 1,000,000
#
ms,sec = modf(float(ss))
ms = ms * 10e5
dto = dt.datetime(int(year),01,01,int(hh),int(mm),int(sec),int(ms))
dto = dto + dt.timedelta(days=(int(doy) - 1))
mn = dto.month
dy = dto.day
jd = cal2jd(int(year),int(mn),int(dy))
jd = jd + float(hh)/24. + float(mm)/60./24. + float(sec)/3600./24.
return jd - 2400000.5
def jd2gps(jd):
"""
JD2GPS Converts Julian date to GPS week number (since
1980.01.06) and seconds of week.
Usage: [gpsweek,sow,rollover]=jd2gps(jd)
Input: jd - Julian date
Output: gpsweek - GPS week number
sow - seconds of week since 0 hr, Sun.
rollover - number of GPS week rollovers (modulus 1024)
"""
jdgps = cal2jd(1980,1,6); # beginning of GPS week numbering
nweek = int(np.fix((jd-jdgps)/7.))
sow = (jd - (jdgps+nweek*7)) * 3600*24
rollover = np.fix(nweek/1024) # rollover every 1024 weeks
gpsweek = int(nweek)
# rollover is being returned as an array?
# should just be an int
return gpsweek,sow,rollover
def jd2cal(jd):
"""
JD2CAL Converts Julian date to calendar date using algorithm
from "Practical Ephemeris Calculations" by <NAME>
(Springer-Verlag, 1989). Must use astronomical year for B.C.
dates (2 BC = -1 yr). Non-vectorized version. See also CAL2JD,
DOY2JD, GPS2JD, JD2DOW, JD2DOY, JD2GPS, JD2YR, YR2JD.
Usage: [yr, mn, dy]=jd2cal(jd)
Input: jd - Julian date
Output: yr - year of calendar date
mn - month of calendar date
dy - day of calendar date (including decimal)
"""
a = np.fix(jd+0.5)
if a < 2299161. :
c = a + 1524.
else:
b = np.fix( (a-1867216.25) / 36524.25 )
c = a + b - np.fix(b/4.) + 1525.
d = np.fix( (c-122.1)/365.25 )
e = np.fix(365.25*d)
f = np.fix( (c-e) / 30.6001 )
dy = c - e - np.fix(30.6001*f) + np.remainder((jd+0.5),a)
mn = f - 1. - 12. * np.fix(f/14.)
yr = d - 4715. - np.fix( (7.+mn)/10. )
return yr,mn,dy
def jd2doy(jd):
"""
JD2DOY Converts Julian date to year and day of year.
Usage: [doy,yr]=jd2doy(jd)
Input: jd - Julian date
Output: doy - day of year
yr - year
"""
[yr,mn,dy] = jd2cal(jd)
doy = jd - cal2jd(yr,1,0)
# MM ensure the doy is 0 padded
doy = "%03d" % doy
return yr, doy
def yyyy2yy(year):
"""
yy = yyyy2yy(YYYY)
return the yy form of YYYY
yy - last two digits of YYYY
- returned as an int
very messy hack
"""
yy = int( str(int(year))[-2] + str(int(year))[-1] )
return(yy)
def dateTime2gpssow(dt):
"""
dateTime2gpssow Converts a datetime object into gps week,
and gps seconds of week
Usage: week,sow = dateTime2gpssow(dateTime)
Input: dt - python datetime object
Output: week - gps week (int)
sow - seconds into gpsweek since 0 hr, Sunday (float)
"""
day = dt.day + dt.hour/24. + dt.minute/1440. + dt.second/86400.
jd = cal2jd(dt.year,dt.month,day)
week, sow, rollover = jd2gps(jd)
return week, sow
def ydhms2dt(year,doy,hh,mm,ss):
"""
ydhms2dt Take a year, day-of-year, etc and convert it into a date time object
Usage: dto = ydhms2dt(year,day,hh,mm,ss)
Input: year - 4 digit integer
doy - 3 digit, or less integer, (1 <= doy <= 366)
hh - 2 digit, or less int, (0 <= hh < 24)
mm - 2 digit, or less int,(0 <= ss < 60)
ss - float
Output: 'dto' a date time object
"""
#
# need to split seconds into two components
# sec => 2 digit, or less int, (0 <= ss < 60)
# ms => int 0 <= ms < 1,000,000
ms,sec = modf(float(ss))
ms = ms * 10e5
dto = dt.datetime(int(year),01,01,int(hh),int(mm),int(sec),int(ms))
dto = dto + dt.timedelta(days=(int(doy) - 1))
return dto
def ymdhms2dt(year,month,day,hh,mm,ss):
"""
ymhms2dt Take a year, day-of-year, etc and convert it into a date time object
Usage: dto = ymdhms2dt(year,month,day,hh,mm,ss)
Input: year - 4 digit integer
month - integer, (1 => January)
day - integer
hh - 2 digit, or less int, (0 <= hh < 24)
mm - 2 digit, or less int,(0 <= ss < 60)
ss - float
Output: 'dto' a date time object
"""
#
# need to split seconds into two components
# sec => 2 digit, or less int, (0 <= ss < 60)
# ms => int 0 <= ms < 1,000,000
ms,sec = modf(float(ss))
ms = ms * 10e5
dto = dt.datetime(int(year),int(month),int(day),int(hh),int(mm),int(sec),0)
#dto = dt.datetime(int(year),int(month),int(day),int(hh),int(mm),int(sec),int(ms))
#dt.date(int(year),int(month),int(day))
#01,01,int(hh),int(mm),int(sec),int(ms))
#dto = dto + dt.timedelta(hours= int(hh),minutes=int(mm),seconds=int(sec))
return dto
def jd2mdt(jd):
"""
jd2mdt Take a julian date and convert it into a matplotlib date time stamp
All matplotlib date plotting is done by converting date instances into
days since the 0001-01-01 UTC
Usage: mp_ts = jd2mdt(jd)
Input: jd julian date
Output: 'mp_ts' (float)
a matplot lib time stamp which is days from 0001-01-01
"""
#ms,sec = modf(float(ss))
#ms = ms * 10e5
year,mon,d = jd2cal(jd)
day = int(np.fix(d))
h = (d - float(day)) * 24.
hh = int(np.fix(h))
m = (h - float(hh)) * 60.
mm = int(np.fix(m))
s = (m - float(mm)) * 60.
sec = int(np.fix(s))
ms = 0
dto = dt.datetime(int(year),int(mon),int(day),int(hh),int(mm),int(sec),int(ms))
mp_epoch = dt.datetime(1, 1, 1)
DAY = 86400
td = dto - mp_epoch
mp_ts = td.days + 1 + (1000000 * td.seconds + td.microseconds) / 1e6 / DAY
return mp_ts
def ydhms2mdt(year,doy,hh,mm,ss):
"""
ydhms2mdt Take a year, day-of-year, etc and convert it into a matplotlib date
All matplotlib date plotting is done by converting date instances into
days since the 0001-01-01 UTC
Usage: mp_ts = ydhms2dt(year,day,hh,mm,ss)
Input: year - 4 digit integer
doy - 3 digit, or less integer, (1 <= doy <= 366)
hh - 2 digit, or less int, (0 <= hh < 24)
mm - 2 digit, or less int,(0 <= ss < 60)
ss - float
Output: 'mp_ts' (float)
a matplot lib time stamp which is days from 0001-01-01
"""
#
# need to split seconds into two components
# sec => 2 digit, or less int, (0 <= ss < 60)
# ms => int 0 <= ms < 1,000,000
ms,sec = modf(float(ss))
ms = ms * 10e5
dto = dt.datetime(int(year),01,01,int(hh),int(mm),int(sec),int(ms))
dto = dto + dt.timedelta(days=(int(doy) - 1))
mp_epoch = dt.datetime(1, 1, 1)
DAY = 86400
td = dto - mp_epoch
mp_ts = td.days + 1 + (1000000 * td.seconds + td.microseconds) / 1e6 / DAY
return mp_ts
def ydhms2decyr(year,doy,hh=0,mm=0,ss=0.0):
"""
ydhms2decyr(year,doy,hh,mm,ss)
Convert | |
import numpy as np
import pandas as pd
import re
import warnings
import scipy.optimize as opt
from scipy.stats import norm, f, chi2, ncf, ncx2, binom
from scipy.special import ncfdtrinc, chndtrinc
import matplotlib.pyplot as plt
import seaborn as sns
from poibin import PoiBin
warnings.filterwarnings("ignore")
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 10)) # outward by 10 points
else:
spine.set_color('none') # don't draw spine
# turn off ticks where there is no spine
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
# no yaxis ticks
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
# no xaxis ticks
ax.xaxis.set_ticks([])
return None
# noinspection PyProtectedMember,PyProtectedMember,PyProtectedMember,PyProtectedMember,PyProtectedMember,PyProtectedMember,PyProtectedMember,PyProtectedMember
class PCurve(object):
__version__ = "0.1.0"
__pcurve_app_version__ = "4.06"
_REGEX_STAT_TEST = re.compile(
"""
^ # Beginning of string
(?P<testtype>chi2|F|t|r|z|p) # Matches the type of test statistic
(\((?P<df1>\d+)(,)?(?P<df2>\d+)?\))? #Matches the degrees of freedom
=(-?) #Matches an equal sign with a potential minus sign
(?P<stat>(\d*)\.(\d+)) # Matches the test statistics
""",
re.IGNORECASE | re.VERBOSE)
_POWER_LEVELS = [0.051, 0.06, 0.07, 0.08, 0.09, 0.1, 0.11, 0.12, 0.13, 0.14, 0.15, 0.16, 0.17, 0.18, 0.19, 0.2,
0.21, 0.22, 0.23, 0.24, 0.25, 0.26, 0.27, 0.28, 0.29, 0.3, 0.31, 0.32, 0.33, 0.34, 0.35, 0.36,
0.37, 0.38, 0.39, 0.4, 0.41, 0.42, 0.43, 0.44, 0.45, 0.46, 0.47, 0.48, 0.49, 0.5, 0.51, 0.52, 0.53,
0.54, 0.55, 0.56, 0.57, 0.58, 0.59, 0.6, 0.61, 0.62, 0.63, 0.64, 0.65, 0.66, 0.67, 0.68, 0.69, 0.7,
0.71, 0.72, 0.73, 0.74, 0.75, 0.76, 0.77, 0.78, 0.79, 0.8, 0.81, 0.82, 0.83, 0.84, 0.85, 0.86,
0.87, 0.88, 0.89, 0.9, 0.91, 0.92, 0.93, 0.94, 0.95, 0.96, 0.97, 0.98, 0.99]
@staticmethod
def _bound_pvals(pvals):
"""
Bound a p-value to avoid precision error
:param pvals: The p-values
:return: The bounded p-values 2.2e-16 < p < 1-2.2e-16
"""
return np.where(pvals < 2.2e-16, 2.2e-16, np.where(pvals > 1 - 2.2e-16, 1 - 2.2e-16, pvals))
@staticmethod
def _format_pval(p):
if p < .0001:
return "< .0001"
elif p < .9999:
pstring = f"= {p:.4f}".strip("0")
return pstring
else:
return "> .9999"
@staticmethod
def _compute_prop_lower_33(pcrit, family, df1, df2, p, ncp33):
"""
Compute the proportion of p-values that is expected to be smaller than `pcrit` under 33% power.
:param pcrit: The p-values
:return: The proportion
"""
# Transform the critical p-value `pcrit` into the corresponding critical value for the dist.
critval_f = f.ppf(1 - pcrit, df1, df2)
critval_chi = chi2.ppf(1 - pcrit, df1)
# Compute the proportion of values that are expected to be larger than this under 33% power
exp_smaller_f = ncf._sf(critval_f, df1, df2, ncp33)
exp_smaller_chi = ncx2._sf(critval_chi, df1, ncp33)
# Return the appropriate stats for the family
return np.where(p > .05, np.nan, np.where(family == "F", exp_smaller_f, exp_smaller_chi))
@staticmethod
def _compute_stouffer_z(pp):
isnotnan = ~np.isnan(pp)
pp_notnan = pp[isnotnan]
return np.sum(norm._ppf(pp_notnan)) / np.sqrt(isnotnan.sum())
@staticmethod
def _compute_ncp_f(df1, df2, power=1 / 3):
"""
Uses the inverse function of the non-central F distribution with regard to NCP to recover the NCP corresponding
to a given level of power for a given F test.
:param df1: Numerator degrees of freedom
:param df2: Denominator degrees of freedom
:param power: Desired level of power
:return:
"""
critval = f._ppf(.95, df1, df2)
return ncfdtrinc(df1, df2, 1 - power, critval)
@staticmethod
def _compute_ncp_chi(df, power=1 / 3):
"""
Uses the inverse function of the non-central Chi2 distribution with regard to NCP to recover the NCP
corresponding to a given level of power for a given Chi2 test.
:param df: Degrees of freedom
:param power: Desired level of power
:return:
"""
critval = chi2._ppf(.95, df)
return chndtrinc(critval, df, 1 - power)
def _compute_ncp_all(self, power=1 / 3):
family = self._df_results["family"].values
df1, df2 = self._df_results[["df1", "df2"]].to_numpy().T
return np.where(family == "F", self._compute_ncp_f(df1, df2, power),
self._compute_ncp_chi(df1, power))
def _parse_result(self, result_str):
"""
Parse a statistical result, entered as a string
:param result_str: A statistical result (e.g., F(1, 234) = 12.32)
:return:
"""
result_str_replaced = (
result_str
.replace(" ", "") # Whitespaces
.replace("\u2009", "") # Thin whitespaces
.replace("\u2212", "-") # All possible symbols for 'minus'
.replace("\u2013", "-")
.replace("\uFE63", "-")
.replace("\u002D", "-")
.replace("\uFF0D", "-")
)
match = self._REGEX_STAT_TEST.match(result_str_replaced) # See regex above
if match is None: # Statistical test not recognized
raise ValueError(f"The input {result_str} is not recognized. Please correct it")
test_props = match.groupdict() # Test recognized, accessing properties
test_type = test_props["testtype"]
df1_raw = test_props["df1"]
df2_raw = test_props["df2"]
stat_raw = test_props["stat"]
# Testing that degrees of freedom are correctly entered
if (test_type == "F") and ((test_props["df1"] is None) or (test_props["df2"] is None)):
raise ValueError(
f"Error in {result_str}: The test statistics {test_type} requires you to specify the numerator \
and denominator degrees of freedom.")
if (test_type not in ["z", "p"]) and (test_props["df1"] is None):
raise ValueError(
f"Error in {result_str}: The test statistics {test_type} requires you to specify the degrees of \
freedom.")
stat_raw = float(stat_raw)
if test_type == "F":
family = "F"
df1 = float(df1_raw)
df2 = float(df2_raw)
stat = stat_raw
elif test_type == "t":
family = "F"
df1 = 1
df2 = float(df1_raw)
stat = stat_raw ** 2
elif test_type == "r":
family = "F"
df1 = 1
df2 = float(df1_raw)
stat = (stat_raw / (np.sqrt((1 - stat_raw ** 2) / df2))) ** 2
elif test_type == "chi2":
family = "Chi2"
df1 = float(df1_raw)
df2 = None
stat = stat_raw
elif test_type == "z":
family = "Chi2"
df1 = 1
df2 = None
stat = stat_raw ** 2
else:
family = "Chi2"
df1 = 1
df2 = None
stat = norm.ppf(1 - stat_raw / 2) ** 2
return result_str_replaced, family, df1, df2, stat
def _compute_pvals(self):
family = self._df_results["family"].values
df1, df2, stat = self._df_results[["df1", "df2", "stat"]].to_numpy().T
pval = np.where(family == "F", f._sf(stat, df1, df2), chi2._sf(stat, df1))
return self._bound_pvals(pval)
def _compute_stouffer_z_at_power(self, power):
# NCP and pp-values of F tests
df1_f, df2_f, stat_f = self._sig_f_tests
ncp_f_est = self._compute_ncp_f(df1_f, df2_f, power)
pp_f_est = (ncf._cdf(stat_f, df1_f, df2_f, ncp_f_est) - (1 - power)) / power
# NCP and pp-values of Chi2 tests
df1_chi, stat_chi = self._sig_chi_tests
ncp_chi_est = self._compute_ncp_chi(df1_chi, power)
pp_chi_est = (ncx2._cdf(stat_chi, df1_chi, ncp_chi_est) - (1 - power)) / power
# pp-values for all tests
pp_est = self._bound_pvals(np.hstack([pp_f_est, pp_chi_est]))
stouffer_at_power = self._compute_stouffer_z(pp_est)
return stouffer_at_power
def _solve_power_for_pct(self, pct):
z = norm._ppf(pct)
error = lambda est: self._compute_stouffer_z_at_power(est) - z
return opt.brentq(error, .0501, .9999)
def _compute_ppvals_null(self, pcurvetype="full"):
"""
Compute the pp-value of the p-values under the null. It simply stretches the p-value over the interval [0, 1]
:param pcurvetype: The type of p-curve (full or half)
:return:
"""
p = self._df_results["p"].values
if pcurvetype == "full":
return np.array([self._bound_pvals(x * 20) if x < .05 else np.nan for x in p])
else:
return np.array([self._bound_pvals(x * 40) if x < .025 else np.nan for x in p])
def _compute_ppvals_33(self, pcurvetype="full"):
family = self._df_results["family"].values
df1, df2, stat, pvals, ncp33 = self._df_results[["df1", "df2", "stat", "p", "ncp33"]].to_numpy().T
if pcurvetype == "full":
pthresh = .05 # Only keep p-values smaller than .05
propsig = 1 / 3 # Under 33% power, 1/3 of p-values should be lower than .05
else:
pthresh = .025 # Only keep p-values smaller than .025
# We don't know which prop of p-values should be smaller than .025 under 33% power, so compute it
propsig = 3 * self._compute_prop_lower_33(.025, family, df1, df2, pvals, ncp33)
# We then stretch the ppval on the [0, 1] interval.
pp_33_f = (1 / propsig) * (ncf.cdf(stat, df1, df2, ncp33) - (1 - propsig))
pp_33_chi = (1 / propsig) * (ncx2.cdf(stat, df1, ncp33) - (1 - propsig))
pp_33 = np.where(family == "F", pp_33_f, pp_33_chi)
return np.array([self._bound_pvals(pp) if p < pthresh else np.nan for (p, pp) in zip(pvals, pp_33)])
def _get_33_power_curve(self):
family = self._df_results["family"].values
df1, df2, p, ncp33 = self._df_results[["df1", "df2", "p", "ncp33"]].to_numpy().T
cprop = lambda x: self._compute_prop_lower_33(x, family, df1, df2, p, ncp33)
propsig = np.array([cprop(c) for c in [.01, .02, .03, .04, .05]])
diffs = np.diff(propsig, axis=0,
prepend=0) # Difference of CDFs: Likelihood of p-values falling between each value
props = np.nanmean(3 * diffs, axis=1)
return props
def _run_binom_test(self, alternative="null"):
family = self._df_results["family"].values
df1, df2, p, ncp33 = self._df_results[["df1", "df2", "p", "ncp33"]].to_numpy().T
k_below_25 = self._n_tests['p025']
if alternative == "null":
return binom(n=self._n_tests['p05'], p=.5).sf(k_below_25 - 1)
| |
self.cmdForms['loadMacro'].descr.entryByName
ebn['loadMacro']['widget'].configure(state='disabled')
def __call__(self, macroName, macroFile, menuBar='menuRoot',
menuButton='Macros', menuEntry=None, cascade=None, **kw):
"""None<---loadMacro(macroName, macroFile, menuBar='menuRoot',
menuButton='Macros', menuEntry=None, cascade=None, **kw)
"""
self.doitWrapper(macroName, macroFile, menuBar=menuBar,
menuButton=menuButton, menuEntry=menuEntry,
cascade=cascade)
def doit(self, macroName, macroFile, menuBar='menuRoot',
menuButton='Macros', menuEntry=None, cascade=None):
if not hasattr(self, 'macroFile') or macroFile != self.macroFile:
names, macros, docs = self.getMacros(macroFile)
else:
names = self.macNames
macros = self.macMacros
docs = self.macDoc
if len(names) == 0 or len(macros)==0 or len(docs)==0: return
macIndex = names.index(macroName)
macro = macros[macIndex]
from VFCommand import Command, CommandGUI
c = Command(func=macro)
g = CommandGUI()
if cascade:
g.addMenuCommand(menuBar, menuButton, menuEntry,
cascadeName=cascade)
else:
g.addMenuCommand(menuBar, menuButton, menuEntry)
self.vf.addCommand(c, macro.__name__, g)
## class loadMacroCommand(Command):
## """
## Command to load dynamically macro commands.
## Using the Gui the user can open a macro file. The macros available in
## that file are then displayed in a list chooser. When a macro is selected
## in the listchooser, its documentation string is deisplayed and a default
## name for the macro in the viewer is suggested. The user can also specify
## a menuBar, a menuButton as well as an optional cascade name.
## """
## active = 0
## def getMacros(self, file):
## """load all macro functions from file"""
## self.file = file
## _dir, file = os.path.split(file)
## if file[-3:]=='.py': file = file[:-3]
## import sys
## sys.path.insert(0, _dir)
## m = __import__(file, globals(), locals(), [])
## sys.path = sys.path[1:]
## m.__dict__['self'] = self.vf
## import types
## names = []
## macros = []
## doc = []
## for key,value in m.__dict__.items():
## if type(value)==types.FunctionType:
## names.append(key)
## macros.append(value)
## doc.append(value.__doc__)
## return names, macros, doc
## def loadMacLib_cb(self, filename):
## """Call back function for 'Open Macro library' button"""
## # self.ifd[0]['widget'] is the 'Open Macro Library' button
## self.ifd[0]['widget'].configure(relief='sunken')
## #file = os.path.split(filename)[1][:-3]
## names, macros, docs = self.getMacros(filename)
## self.macNames = names
## self.macMacros = macros
## self.macDoc = docs
## # Get a handle to the listchooser widget
## lc = self.ifd[1]['widget']
## lc.clear()
## if len(names) == len(docs):
## entries = map(lambda x, y: (x, y), names, docs)
## else:
## entries = map(lambda x: (x, None), names)
## map(lc.add, entries)
## self.ifd[0]['widget'].configure(relief='raised')
## # set cascade name to libary Name - "mac"
## w = self.ifd[5]['widget']
## w.delete(0, 'end')
## w.insert(0, os.path.split(filename)[1][:-3])
## def setDefaultEntryName(self, event=None):
## """Call back function for the listchooser showing macros.
## gets the name of the currently selected macro and puts it in the entry
## type in"""
## # enable add button
## self.ifd.entryByName['Load Macro']['widget'].configure(state='normal')
## # put default name into name entry
## val = self.ifd[1]['widget'].get()
## w = self.ifd[4]['widget']
## w.delete(0, 'end')
## w.insert(0, val[0])
## self.selectedMac = val[0]
## def addMacro(self, macro, menuBar, menuButton, name, cascade=None):
## from VFCommand import Command, CommandGUI
## c = Command(func=macro)
## g = CommandGUI()
## if cascade:
## g.addMenuCommand(menuBar, menuButton, name, cascadeName=cascade)
## else:
## g.addMenuCommand(menuBar, menuButton, name)
## self.vf.addCommand(c, macro.__name__, g)
## ## g.register(self.vf)
## self.log(file=self.file, macroName=macro.__name__, menuBar=menuBar,
## menuButton=menuButton, name=name, cascade=cascade)
## def loadMacro_cb(self, event=None):
## bar = self.ifd[2]['widget'].get()
## menub = self.ifd[3]['widget'].get()
## name = self.ifd[4]['widget'].get()
## cascade = self.ifd[5]['widget'].get()
## if cascade=='': cascade=None
## macIndex = self.macNames.index(self.selectedMac)
## macFunc = self.macMacros[macIndex]
## self.addMacro(macFunc, bar, menub, name, cascade)
## self.ifd[0]['widget'].configure(relief='raised')
## def customizeGUI(self):
## """create the cascade menu for selecting modules to be loaded"""
## self.selectedMac = ''
## # create the for descriptor
## ifd = self.ifd = InputFormDescr(title='Load macro commands')
## if len(self.vf.libraries) is None:
## modu = __import__('ViewerFramework')
## else:
## modu = __import__(self.vf.libraries[0])
## idir = os.path.split(modu.__file__)[0] + '/Macros'
## if not os.path.exists(idir):
## idir = None
## ifd.append( {'widgetType':'OpenButton', 'text':'Open Macro library ...',
## 'types':[('Macro Module Library', '*Mac.py'),
## ('Any Python Function', '*.py')],
## 'idir':idir,
## 'title':'Open Macro File',
## 'callback': self.loadMacLib_cb } )
## ifd.append({'title':'Choose a macro',
## 'widgetType':ListChooser,
## 'wcfg':{
## 'command':self.setDefaultEntryName,
## 'title':'Choose a macro'},
## 'gridcfg':{'sticky':Tkinter.E+Tkinter.W}} )
## ifd.append({'widgetType':Tkinter.Entry,
## 'defaultValue':'menuRoot',
## 'wcfg':{'label':'menu bar'},
## 'gridcfg':{'sticky':Tkinter.E}
## })
## ifd.append({'widgetType':Tkinter.Entry,
## 'defaultValue':'Macros',
## 'wcfg':{'label':'menu button'},
## 'gridcfg':{'sticky':Tkinter.E}
## })
## ifd.append({'widgetType':Tkinter.Entry,
## 'defaultValue':'',
## 'wcfg':{'label':'menu entry'},
## 'gridcfg':{'sticky':Tkinter.E}
## })
## ifd.append({'widgetType':Tkinter.Entry,
## 'defaultValue':'',
## 'wcfg':{'label':'cascade'},
## 'gridcfg':{'sticky':Tkinter.E}
## })
## ifd.append({'name': 'Load Macro',
## 'widgetType':Tkinter.Button,
## 'text':'Load Macro',
## 'wcfg':{'bd':6},
## 'gridcfg':{'sticky':Tkinter.E+Tkinter.W},
## 'command': self.loadMacro_cb})
## ifd.append({'widgetType':Tkinter.Button,
## 'text':'Dismiss',
## 'command': self.Dismiss_cb})
## def Dismiss_cb(self):
## #self.cmdForms['loadMacro'].withdraw()
## self.ifd.form.destroy()
## self.active = 0
## def guiCallback(self, event=None, file=None):
## if self.active: return
## self.active = 1
## self.customizeGUI()
## self.form = self.vf.getUserInput(self.ifd, modal=0, blocking=0)
## self.ifd.entryByName['Load Macro']['widget'].configure(state='disabled')
## if file: self.loadMacLib_cb(file)
## def __call__(self, file=None, macroName=None, menuBar='menuRoot',
## menuButton='Macros', name=None, cascade=None):
## """file=None, macroName=None, menuBar='menuRoot', menuButton='Macros',
## name=None, cascade=None"""
## if not macroName: self.guiCallback(file=file)
## else:
## if file[-3:]=='.py': file = file[:-3]
## names, macros, docs = self.getMacros(file)
## i = names.index(macroName)
## if name==None: name=macroName
## self.addMacro(macros[i], menuBar, menuButton, name, cascade)
class ShellCommand(Command):
"""Command to show/Hide the Python shell.
\nPackage : ViewerFramework
\nModule : basicCommand.py
\nClass : ShellCommand
\nCommand : Shell
\nSynopsis:\n
None<---Shell()
"""
def onAddCmdToViewer(self):
if self.vf.hasGui:
self.vf.GUI.pyshell.top.protocol('WM_DELETE_WINDOW',
self.vf.Shell.onDestroy)
def show(self):
self.vf.GUI.pyshell.top.deiconify()
def hide(self):
self.vf.GUI.pyshell.top.withdraw()
def __call__(self, *args):
"""None<---Shell()
"""
if args[0]:
self.show()
self.vf.GUI.toolbarCheckbuttons['Shell']['Variable'].set(1)
else:
self.hide()
self.vf.GUI.toolbarCheckbuttons['Shell']['Variable'].set(0)
def guiCallback(self):
on = self.vf.GUI.toolbarCheckbuttons['Shell']['Variable'].get()
if on: self.show()
else: self.hide()
def onDestroy(self):
self.vf.GUI.toolbarCheckbuttons['Shell']['Variable'].set(0)
self.hide()
ShellCommandGUI = CommandGUI()
ShellCommandGUI.addToolBar('Shell', icon1='PyShell.gif',
balloonhelp='Python IDLE Shell', index=1)
class SaveSessionCommand(Command):
"""Command to allow the user to save the session as it is in a file.
It logs all the transformation.
\nPackage : Pmv
\nModule : customizationCommands.py
\nClass : SaveSessionCommand
"""
def logString(self, *args, **kw):
"""return None as log string as we don't want to log this
"""
pass
def guiCallback(self, event=None):
### FIXME all the logs should be in a stack and not in a file.
if self.vf.logMode == 'no':
self.vf.warningMsg("No log information because logMode was set to no.")
return
newfile = self.vf.askFileSave(types = [
('Pmv sesion files', '*.psf'),
('all files', '*.py')],
defaultextension=".psf",
title = 'Save Session in File:')
if not newfile is None:
self.doitWrapper(newfile, redraw=0)
def doit(self, filename):
#print "SaveSessionCommand.doit"
ext = os.path.splitext(filename)[1].lower()
if ext=='.psf':
self.vf.saveFullSession(filename)
else:
import shutil
# get the current log.
if hasattr(self.vf, 'logAllFile'):
logFileName = self.vf.logAllFile.name
self.vf.logAllFile.close()
if filename!=logFileName:
shutil.copy(logFileName, filename)
self.vf.logAllFile = open(logFileName,'a')
# Add to it the transformation log.
logFile = open(filename,'a')
vi = self.vf.GUI.VIEWER
code = vi.getViewerStateDefinitionCode('self.GUI.VIEWER')
code.extend( vi.getObjectsStateDefinitionCode('self.GUI.VIEWER') )
if code:
for line in code:
logFile.write(line)
if vi.GUI.contourTk.get():
controlpoints=vi.GUI.curvetool.getControlPoints()
sensitivity=vi.GUI.d1scalewheel.get()
logFile.write("self.GUI.VIEWER.GUI.curvetool.setControlPoints(%s)" %controlpoints)
logFile.write("\n")
logFile.write("self.GUI.VIEWER.GUI.curvetool.setSensitivity(%s)" %sensitivity)
#sceneLog = self.vf.Exit.logScene()
#for l in sceneLog:
# l1 = l+'\n'
# logFile.write(l1)
logFile.close()
if hasattr(self.vf, 'recentFiles'):
self.vf.recentFiles.add(filename, 'readSourceMolecule')
# SaveSessionCommand Command GUI
SaveSessionCommandGUI = CommandGUI()
SaveSessionCommandGUI.addMenuCommand(
'menuRoot', 'File', 'Current Session', index=2,
cascadeName='Save', cascadeIndex=2, separatorAboveCascade=1)
SaveSessionCommandGUI.addToolBar('Save', icon1='filesave.gif',
type='ToolBarButton',
balloonhelp='Save Session', index=1)
class ExitCommand(Command):
"""Command to destroy application
\nPackage : ViewerFramework
\nModule : basicCommand.py
\nClass : ExitCommand
\nCommand : Exit
\nSynopsis:\n
None<---Exit(ask)
\nask = Flag when set to 1 a form asking you if you really want to quit
will popup, it will quit directly if set to 0
"""
def onAddCmdToViewer(self):
#print "ExitComand.onAddCmdToViewer"
import warnings
if self.vf.hasGui:
self.vf.GUI.ROOT.protocol('WM_DELETE_WINDOW',self.askquit)
def logObjectTransformations(self, object):
warnings.warn( "logObjectTransformations is deprecated",
DeprecationWarning, stacklevel=2)
log = []
# FIXME won't work with instance matrices
mat = object.GetMatrix(object)
import numpy.oldnumeric as Numeric
log.append("self.transformObject('rotation', '%s', matrix=%s,log=0)"%(object.fullName,tuple(object.rotation)))
log.append("self.transformObject('translation', '%s', matrix=%s, log=0 )"%(object.fullName, tuple(object.translation)))
log.append("self.transformObject('scale', '%s', matrix=%s, log=0 )"%(object.fullName,tuple(object.scale)))
log.append("self.transformObject('pivot', '%s', matrix=%s, log=0 )"%(object.fullName,tuple(object.pivot)))
return log
def logObjectMaterial(self, object):
warnings.warn("logObjectMaterial is deprecated",
DeprecationWarning, stacklevel=2)
log = []
from opengltk.OpenGL import GL
log.append("from opengltk.OpenGL import GL")
mat = object.materials[GL.GL_FRONT]
log.append("self.setObject('%s', materials=%s, propName='ambi', matBind=%d)" % (object.fullName, repr(mat.prop[0])[6:-5],mat.binding[0]))
log.append("self.setObject('%s', materials=%s, propName='diff', matBind=%d)" % (object.fullName, repr(mat.prop[1])[6:-5],mat.binding[1]))
log.append("self.setObject('%s', materials=%s, propName='emis', matBind=%d)" % (object.fullName, repr(mat.prop[2])[6:-5],mat.binding[2]))
log.append("self.setObject('%s', materials=%s, propName='spec', matBind=%d)" % (object.fullName, repr(mat.prop[3])[6:-5],mat.binding[3]))
log.append("self.setObject('%s', materials=%s, propName='shini', matBind=%d)" % (object.fullName, repr(mat.prop[4])[6:-5],mat.binding[4]))
mat = object.materials[GL.GL_BACK]
log.append("self.setObject('%s', materials=%s, polyFace=GL.GL_BACK,propName='ambi', matBind=%d)" % (object.fullName, repr(mat.prop[0])[6:-5],mat.binding[0]))
log.append("self.setObject('%s', materials=%s, polyFace=GL.GL_BACK,propName='diff', matBind=%d)" % (object.fullName, repr(mat.prop[1])[6:-5],mat.binding[1]))
log.append("self.setObject('%s', materials=%s, polyFace=GL.GL_BACK,propName='spec', matBind=%d)" % (object.fullName, repr(mat.prop[2])[6:-5],mat.binding[2]))
log.append("self.setObject('%s', materials=%s, polyFace=GL.GL_BACK,propName='emis', matBind=%d)" % (object.fullName, repr(mat.prop[3])[6:-5],mat.binding[3]))
log.append("self.setObject('%s', materials=%s, polyFace=GL.GL_BACK,propName='shini', matBind=%d)" % (object.fullName, repr(mat.prop[4])[6:-5],mat.binding[4]))
return log
def logCameraTransformations(self, camera):
warnings.warn("logCameraTransformations is deprecated",
DeprecationWarning, stacklevel=2)
logStr = "self.setCamera('%s', \n"%camera.name
logStr = logStr + "rotation=(%9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f),\n"%tuple(camera.rotation)
logStr=logStr + "translation=(%9.3f, %9.3f, %9.3f),\n"%tuple(camera.translation)
logStr = logStr + "scale=(%9.3f, %9.3f, %9.3f),\n"%tuple(camera.scale)
logStr = logStr + "pivot=(%9.3f, %9.3f, %9.3f),\n"%tuple(camera.pivot)
logStr = logStr + "lookAt=(%9.3f, %9.3f, %9.3f),\n"%tuple(camera.lookAt)
logStr = logStr + "lookFrom=(%9.3f, %9.3f, %9.3f),\n"%tuple(camera.lookFrom)
logStr = logStr + "direction=(%9.3f, %9.3f, %9.3f))"%tuple(camera.direction)
return logStr+'\n'
def logCameraProp(self, camera):
warnings.warn("logCameraProp is deprecated",
DeprecationWarning, stacklevel=2)
logStr = "self.setCamera('%s', \n"%camera.name
logStr = logStr + "width=%d, height=%d, rootx=%d, rooty=%d,"%\
(camera.width, camera.height, camera.rootx, camera.rooty)
logStr = logStr + "fov=%f, near=%f, far=%f,"%\
(camera.fovy, camera.near, camera.far)
logStr = logStr + "color=(%6.3f,%6.3f,%6.3f,%6.3f))"%\
tuple(camera.backgroundColor)
return logStr+'\n'
def logLightTransformations(self, light):
warnings.warn("logLightTransformations is deprecated",
DeprecationWarning, stacklevel=2)
logStr = "self.setLight('%s', \n"%light.name
logStr = logStr + "rotation=(%9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, | |
<reponame>rahul2393/python-spanner<filename>samples/samples/snippets_test.py
# Copyright 2016 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import uuid
from google.api_core import exceptions
from google.cloud import spanner
import pytest
from test_utils.retry import RetryErrors
import snippets
CREATE_TABLE_SINGERS = """\
CREATE TABLE Singers (
SingerId INT64 NOT NULL,
FirstName STRING(1024),
LastName STRING(1024),
SingerInfo BYTES(MAX)
) PRIMARY KEY (SingerId)
"""
CREATE_TABLE_ALBUMS = """\
CREATE TABLE Albums (
SingerId INT64 NOT NULL,
AlbumId INT64 NOT NULL,
AlbumTitle STRING(MAX)
) PRIMARY KEY (SingerId, AlbumId),
INTERLEAVE IN PARENT Singers ON DELETE CASCADE
"""
retry_429 = RetryErrors(exceptions.ResourceExhausted, delay=15)
@pytest.fixture(scope="module")
def sample_name():
return "snippets"
@pytest.fixture(scope="module")
def create_instance_id():
"""Id for the low-cost instance."""
return f"create-instance-{uuid.uuid4().hex[:10]}"
@pytest.fixture(scope="module")
def lci_instance_id():
"""Id for the low-cost instance."""
return f"lci-instance-{uuid.uuid4().hex[:10]}"
@pytest.fixture(scope="module")
def database_id():
return f"test-db-{uuid.uuid4().hex[:10]}"
@pytest.fixture(scope="module")
def create_database_id():
return f"create-db-{uuid.uuid4().hex[:10]}"
@pytest.fixture(scope="module")
def cmek_database_id():
return f"cmek-db-{uuid.uuid4().hex[:10]}"
@pytest.fixture(scope="module")
def default_leader_database_id():
return f"leader_db_{uuid.uuid4().hex[:10]}"
@pytest.fixture(scope="module")
def database_ddl():
"""Sequence of DDL statements used to set up the database.
Sample testcase modules can override as needed.
"""
return [CREATE_TABLE_SINGERS, CREATE_TABLE_ALBUMS]
@pytest.fixture(scope="module")
def default_leader():
"""Default leader for multi-region instances."""
return "us-east4"
def test_create_instance_explicit(spanner_client, create_instance_id):
# Rather than re-use 'sample_isntance', we create a new instance, to
# ensure that the 'create_instance' snippet is tested.
retry_429(snippets.create_instance)(create_instance_id)
instance = spanner_client.instance(create_instance_id)
retry_429(instance.delete)()
def test_create_database_explicit(sample_instance, create_database_id):
# Rather than re-use 'sample_database', we create a new database, to
# ensure that the 'create_database' snippet is tested.
snippets.create_database(sample_instance.instance_id, create_database_id)
database = sample_instance.database(create_database_id)
database.drop()
def test_create_instance_with_processing_units(capsys, lci_instance_id):
processing_units = 500
retry_429(snippets.create_instance_with_processing_units)(
lci_instance_id, processing_units,
)
out, _ = capsys.readouterr()
assert lci_instance_id in out
assert "{} processing units".format(processing_units) in out
spanner_client = spanner.Client()
instance = spanner_client.instance(lci_instance_id)
retry_429(instance.delete)()
def test_create_database_with_encryption_config(capsys, instance_id, cmek_database_id, kms_key_name):
snippets.create_database_with_encryption_key(instance_id, cmek_database_id, kms_key_name)
out, _ = capsys.readouterr()
assert cmek_database_id in out
assert kms_key_name in out
def test_get_instance_config(capsys):
instance_config = "nam6"
snippets.get_instance_config(instance_config)
out, _ = capsys.readouterr()
assert instance_config in out
def test_list_instance_config(capsys):
snippets.list_instance_config()
out, _ = capsys.readouterr()
assert "regional-us-central1" in out
def test_list_databases(capsys, instance_id):
snippets.list_databases(instance_id)
out, _ = capsys.readouterr()
assert "has default leader" in out
def test_create_database_with_default_leader(capsys, multi_region_instance, multi_region_instance_id, default_leader_database_id, default_leader):
retry_429 = RetryErrors(exceptions.ResourceExhausted, delay=15)
retry_429(snippets.create_database_with_default_leader)(
multi_region_instance_id, default_leader_database_id, default_leader
)
out, _ = capsys.readouterr()
assert default_leader_database_id in out
assert default_leader in out
def test_update_database_with_default_leader(capsys, multi_region_instance, multi_region_instance_id, default_leader_database_id, default_leader):
retry_429 = RetryErrors(exceptions.ResourceExhausted, delay=15)
retry_429(snippets.update_database_with_default_leader)(
multi_region_instance_id, default_leader_database_id, default_leader
)
out, _ = capsys.readouterr()
assert default_leader_database_id in out
assert default_leader in out
def test_get_database_ddl(capsys, instance_id, sample_database):
snippets.get_database_ddl(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert sample_database.database_id in out
def test_query_information_schema_database_options(capsys, multi_region_instance, multi_region_instance_id, default_leader_database_id, default_leader):
snippets.query_information_schema_database_options(
multi_region_instance_id, default_leader_database_id
)
out, _ = capsys.readouterr()
assert default_leader in out
@pytest.mark.dependency(name="insert_data")
def test_insert_data(capsys, instance_id, sample_database):
snippets.insert_data(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "Inserted data" in out
@pytest.mark.dependency(depends=["insert_data"])
def test_delete_data(capsys, instance_id, sample_database):
snippets.delete_data(instance_id, sample_database.database_id)
# put it back for other tests
snippets.insert_data(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "Deleted data" in out
@pytest.mark.dependency(depends=["insert_data"])
def test_query_data(capsys, instance_id, sample_database):
snippets.query_data(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "SingerId: 1, AlbumId: 1, AlbumTitle: Total Junk" in out
@pytest.mark.dependency(name="add_column", depends=["insert_data"])
def test_add_column(capsys, instance_id, sample_database):
snippets.add_column(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "Added the MarketingBudget column." in out
@pytest.mark.dependency(depends=["insert_data"])
def test_read_data(capsys, instance_id, sample_database):
snippets.read_data(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "SingerId: 1, AlbumId: 1, AlbumTitle: Total Junk" in out
@pytest.mark.dependency(name="update_data", depends=["add_column"])
def test_update_data(capsys, instance_id, sample_database):
# Sleep for 15 seconds to ensure previous inserts will be
# 'stale' by the time test_read_stale_data is run.
time.sleep(15)
snippets.update_data(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "Updated data." in out
@pytest.mark.dependency(depends=["update_data"])
def test_read_stale_data(capsys, instance_id, sample_database):
# This snippet relies on test_update_data inserting data
# at least 15 seconds after the previous insert
snippets.read_stale_data(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "SingerId: 1, AlbumId: 1, MarketingBudget: None" in out
@pytest.mark.dependency(depends=["add_column"])
def test_read_write_transaction(capsys, instance_id, sample_database):
snippets.read_write_transaction(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "Transaction complete" in out
@pytest.mark.dependency(depends=["add_column"])
def test_query_data_with_new_column(capsys, instance_id, sample_database):
snippets.query_data_with_new_column(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "SingerId: 1, AlbumId: 1, MarketingBudget: 300000" in out
assert "SingerId: 2, AlbumId: 2, MarketingBudget: 300000" in out
@pytest.mark.dependency(name="add_index", depends=["insert_data"])
def test_add_index(capsys, instance_id, sample_database):
snippets.add_index(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "Added the AlbumsByAlbumTitle index" in out
@pytest.mark.dependency(depends=["add_index"])
def test_query_data_with_index(capsys, instance_id, sample_database):
snippets.query_data_with_index(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "Go, Go, Go" in out
assert "Forever Hold Your Peace" in out
assert "Green" not in out
@pytest.mark.dependency(depends=["add_index"])
def test_read_data_with_index(capsys, instance_id, sample_database):
snippets.read_data_with_index(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "Go, Go, Go" in out
assert "Forever Hold Your Peace" in out
assert "Green" in out
@pytest.mark.dependency(name="add_storing_index", depends=["insert_data"])
def test_add_storing_index(capsys, instance_id, sample_database):
snippets.add_storing_index(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "Added the AlbumsByAlbumTitle2 index." in out
@pytest.mark.dependency(depends=["add_storing_index"])
def test_read_data_with_storing_index(capsys, instance_id, sample_database):
snippets.read_data_with_storing_index(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "300000" in out
@pytest.mark.dependency(depends=["insert_data"])
def test_read_only_transaction(capsys, instance_id, sample_database):
snippets.read_only_transaction(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
# Snippet does two reads, so entry should be listed twice
assert out.count("SingerId: 1, AlbumId: 1, AlbumTitle: Total Junk") == 2
@pytest.mark.dependency(name="add_timestamp_column", depends=["insert_data"])
def test_add_timestamp_column(capsys, instance_id, sample_database):
snippets.add_timestamp_column(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert 'Altered table "Albums" on database ' in out
@pytest.mark.dependency(depends=["add_timestamp_column"])
def test_update_data_with_timestamp(capsys, instance_id, sample_database):
snippets.update_data_with_timestamp(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "Updated data" in out
@pytest.mark.dependency(depends=["add_timestamp_column"])
def test_query_data_with_timestamp(capsys, instance_id, sample_database):
snippets.query_data_with_timestamp(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "SingerId: 1, AlbumId: 1, MarketingBudget: 1000000" in out
assert "SingerId: 2, AlbumId: 2, MarketingBudget: 750000" in out
@pytest.mark.dependency(name="create_table_with_timestamp")
def test_create_table_with_timestamp(capsys, instance_id, sample_database):
snippets.create_table_with_timestamp(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "Created Performances table on database" in out
@pytest.mark.dependency(depends=["create_table_with_datatypes"])
def test_insert_data_with_timestamp(capsys, instance_id, sample_database):
snippets.insert_data_with_timestamp(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "Inserted data." in out
@pytest.mark.dependency(name="write_struct_data")
def test_write_struct_data(capsys, instance_id, sample_database):
snippets.write_struct_data(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "Inserted sample data for STRUCT queries" in out
@pytest.mark.dependency(depends=["write_struct_data"])
def test_query_with_struct(capsys, instance_id, sample_database):
snippets.query_with_struct(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "SingerId: 6" in out
@pytest.mark.dependency(depends=["write_struct_data"])
def test_query_with_array_of_struct(capsys, instance_id, sample_database):
snippets.query_with_array_of_struct(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "SingerId: 8" in out
assert "SingerId: 7" in out
assert "SingerId: 6" in out
@pytest.mark.dependency(depends=["write_struct_data"])
def test_query_struct_field(capsys, instance_id, sample_database):
snippets.query_struct_field(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "SingerId: 6" in out
@pytest.mark.dependency(depends=["write_struct_data"])
def test_query_nested_struct_field(capsys, instance_id, sample_database):
snippets.query_nested_struct_field(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "SingerId: 6 SongName: Imagination" in out
assert "SingerId: 9 SongName: Imagination" in out
@pytest.mark.dependency(name="insert_data_with_dml")
def test_insert_data_with_dml(capsys, instance_id, sample_database):
snippets.insert_data_with_dml(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "1 record(s) inserted." in out
@pytest.mark.dependency(name="log_commit_stats")
def test_log_commit_stats(capsys, instance_id, sample_database):
snippets.log_commit_stats(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "1 record(s) inserted." in out
assert "3 mutation(s) in transaction." in out
@pytest.mark.dependency(depends=["insert_data"])
def test_update_data_with_dml(capsys, instance_id, sample_database):
snippets.update_data_with_dml(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "1 record(s) updated." in out
@pytest.mark.dependency(depends=["insert_data"])
def test_delete_data_with_dml(capsys, instance_id, sample_database):
snippets.delete_data_with_dml(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "1 record(s) deleted." in out
@pytest.mark.dependency(depends=["add_timestamp_column"])
def test_update_data_with_dml_timestamp(capsys, instance_id, sample_database):
snippets.update_data_with_dml_timestamp(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "2 record(s) updated." in out
@pytest.mark.dependency(name="dml_write_read_transaction")
def test_dml_write_read_transaction(capsys, instance_id, sample_database):
snippets.dml_write_read_transaction(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "1 record(s) inserted." in out
assert "FirstName: Timothy, LastName: Campbell" in out
@pytest.mark.dependency(depends=["dml_write_read_transaction"])
def test_update_data_with_dml_struct(capsys, instance_id, sample_database):
snippets.update_data_with_dml_struct(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "1 record(s) updated" in out
@pytest.mark.dependency(name="insert_with_dml")
def test_insert_with_dml(capsys, instance_id, sample_database):
snippets.insert_with_dml(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "4 record(s) inserted" in out
@pytest.mark.dependency(depends=["insert_with_dml"])
def test_query_data_with_parameter(capsys, instance_id, sample_database):
snippets.query_data_with_parameter(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "SingerId: 12, FirstName: Melissa, LastName: Garcia" in out
@pytest.mark.dependency(depends=["add_column"])
def test_write_with_dml_transaction(capsys, instance_id, sample_database):
snippets.write_with_dml_transaction(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "Transferred 200000 from Album2's budget to Album1's" in out
@pytest.mark.dependency(depends=["add_column"])
def update_data_with_partitioned_dml(capsys, instance_id, sample_database):
snippets.update_data_with_partitioned_dml(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "3 record(s) updated" in out
@pytest.mark.dependency(depends=["insert_with_dml"])
def test_delete_data_with_partitioned_dml(capsys, instance_id, sample_database):
snippets.delete_data_with_partitioned_dml(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "6 record(s) deleted" in out
@pytest.mark.dependency(depends=["add_column"])
def test_update_with_batch_dml(capsys, instance_id, sample_database):
snippets.update_with_batch_dml(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "Executed 2 SQL statements using Batch DML" in out
@pytest.mark.dependency(name="create_table_with_datatypes")
def test_create_table_with_datatypes(capsys, instance_id, sample_database):
snippets.create_table_with_datatypes(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "Created Venues table on database" in out
@pytest.mark.dependency(
name="insert_datatypes_data", depends=["create_table_with_datatypes"],
)
def test_insert_datatypes_data(capsys, instance_id, sample_database):
snippets.insert_datatypes_data(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "Inserted data." in out
@pytest.mark.dependency(depends=["insert_datatypes_data"])
def test_query_data_with_array(capsys, instance_id, sample_database):
snippets.query_data_with_array(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "VenueId: 19, VenueName: Venue 19, AvailableDate: 2020-11-01" in out
assert "VenueId: 42, VenueName: Venue 42, AvailableDate: 2020-10-01" in out
@pytest.mark.dependency(depends=["insert_datatypes_data"])
def test_query_data_with_bool(capsys, instance_id, sample_database):
snippets.query_data_with_bool(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "VenueId: 19, VenueName: Venue 19, OutdoorVenue: True" in out
@pytest.mark.dependency(depends=["insert_datatypes_data"])
def test_query_data_with_bytes(capsys, instance_id, sample_database):
snippets.query_data_with_bytes(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "VenueId: 4, VenueName: Venue 4" in out
@pytest.mark.dependency(depends=["insert_datatypes_data"])
def test_query_data_with_date(capsys, instance_id, sample_database):
snippets.query_data_with_date(instance_id, sample_database.database_id)
out, _ = capsys.readouterr()
assert "VenueId: 4, VenueName: Venue 4, LastContactDate: 2018-09-02" in out
assert "VenueId: 42, | |
from dg_db.db_write import write_countries, write_skews, write_platforms
def populate_accounts():
countries = [
("Germany", "DE", "Central", "Gat"),
("Austria", "AT", "Central", "Gat"),
("Switzerland", "CH", "Central", "None"),
("France", "FR", "South", "None"),
("Italy", "IT", "South", "None"),
("Spain", "ES", "South", "Iberia"),
("Portugal", "PT", "South", "Iberia"),
("Denmark", "DK", "North", "None"),
("Norway", "NO", "North", "None"),
("Sweden", "SE", "North", "None"),
("Finland", "FI", "North", "None"),
("Netherlands", "NL", "North", "Benelux"),
("Belgium", "BE", "North", "Benelux"),
("UK", "UK", "UKI", "None"),
("Ireland", "IE", "UKI", "None"),
]
write_countries(countries)
def populate_platforms():
platforms = [
(1, "Google", "8005495881", "2059894093"),
(2, "Google", "4825989316", "4302291843"),
(3, "Google", "1345515457", "5635690780"),
(4, "Google", "3136320411", "7447885588"),
(5, "Google", "3456991375", "2037891425"),
(6, "Google", "6033808683", "9021596480"),
(7, "Google", "1795086707", "4753585796"),
(8, "Google", "9503587615", "7310537721"),
(9, "Google", "1739364726", "8212508088"),
(10, "Google", "7245755087", "7575293197"),
(11, "Google", "1332379677", "1904670595"),
(12, "Google", "7966843210", "9271529992"),
(13, "Google", "3704484034", "6338903272"),
(14, "Google", "8457701491", "1150412027"),
(15, "Google", "3377816067", "6186544301"),
(1, "Microsoft", "X7356537", "X7356537"),
(2, "Microsoft", "X7004130", "X7004130"),
(3, "Microsoft", "F1071RW6", "F1071RW6"),
(4, "Microsoft", "X8349003", "X8349003"),
(5, "Microsoft", "B004ETLE", "B004ETLE"),
(6, "Microsoft", "B004V7BM", "B004V7BM"),
(7, "Microsoft", "NOTAVAIL", "NOTAVAIL"),
(8, "Microsoft", "X000XGAU", "X000XGAU"),
(9, "Microsoft", "B004SUHL", "B004SUHL"),
(10, "Microsoft", "F107DTZT", "F107DTZT"),
(11, "Microsoft", "F1074ELB", "F1074ELB"),
(12, "Microsoft", "X0006AXW", "X0006AXW"),
(13, "Microsoft", "B004295N", "B004295N"),
(14, "Microsoft", "F107P4UT", "F107P4UT"),
(15, "Microsoft", "F107MVC9", "F107MVC9")
]
write_platforms(platforms)
def populate_skews():
# Q2 22
records_to_insert = [
("UK", 910534.84, 4660585.24, 0.1954, 0.0173, 0.0419, 0.0440, 0.0692, 0.0533, 0.0533, 0.0551, 0.0589, 0.2653, 0.0677, 0.0799, 0.0810, 0.0810, 0.0320),
("IE", 102951.89, 727779.08, 0.1415, 0.0180, 0.0437, 0.0459, 0.0722, 0.0556, 0.0556, 0.0574, 0.0614, 0.2339, 0.0706, 0.0833, 0.0844, 0.0844, 0.0334),
("DE", 1109019.05, 5427544.86, 0.2043, 0.0153, 0.0400, 0.0438, 0.0762, 0.0514, 0.0533, 0.0552, 0.0590, 0.2649, 0.0670, 0.0789, 0.0812, 0.0812, 0.0325),
("AT", 105074.22, 745541.88, 0.1409, 0.0160, 0.0417, 0.0457, 0.0794, 0.0536, 0.0556, 0.0576, 0.0616, 0.2335, 0.0700, 0.0826, 0.0850, 0.0842, 0.0335),
("CH", 240387.78, 1437894.37, 0.1672, 0.0218, 0.0496, 0.0595, 0.0811, 0.0811, 0.0811, 0.0991, 0.0631, 0.1295, 0.0673, 0.0737, 0.0737, 0.0804, 0.0389),
("FR", 529057.49, 2634799.26, 0.2008, 0.0170, 0.0348, 0.0382, 0.0531, 0.0448, 0.0481, 0.0498, 0.0514, 0.2308, 0.0638, 0.0705, 0.1072, 0.1440, 0.0466),
("ES", 153548.47, 807181.61, 0.1902, 0.0170, 0.0348, 0.0382, 0.0531, 0.0448, 0.0481, 0.0498, 0.0514, 0.2308, 0.0638, 0.0705, 0.1072, 0.1440, 0.0466),
("IT", 165266.60, 898683.98, 0.1839, 0.0270, 0.0505, 0.0586, 0.0631, 0.0622, 0.0712, 0.0667, 0.0703, 0.1558, 0.0930, 0.0842, 0.0842, 0.0797, 0.0337),
("PT", 39063.01, 148691.35, 0.2627, 0.0270, 0.0631, 0.0631, 0.0721, 0.0721, 0.0721, 0.0811, 0.0631, 0.0945, 0.0821, 0.0885, 0.0885, 0.0885, 0.0442),
("NO", 49170.23, 213901.64, 0.2299, 0.0270, 0.0505, 0.0586, 0.0631, 0.0622, 0.0712, 0.0667, 0.0703, 0.1558, 0.0930, 0.0842, 0.0842, 0.0797, 0.0337),
("SE", 68291.98, 315653.46, 0.2164, 0.0361, 0.0541, 0.0586, 0.0811, 0.0631, 0.0856, 0.0631, 0.0631, 0.1170, 0.0748, 0.0807, 0.0807, 0.0807, 0.0612),
("FI", 32780.15, 151513.66, 0.2164, 0.0361, 0.0541, 0.0586, 0.0811, 0.0631, 0.0856, 0.0631, 0.0631, 0.1170, 0.0748, 0.0807, 0.0807, 0.0807, 0.0613),
("DK", 122925.57, 635020.50, 0.1936, 0.0361, 0.0541, 0.0586, 0.0811, 0.0631, 0.0856, 0.0631, 0.0631, 0.1170, 0.0748, 0.0807, 0.0807, 0.0807, 0.0612),
("NL", 170456.79, 834216.40, 0.2043, 0.0406, 0.0628, 0.0628, 0.0679, 0.0679, 0.0787, 0.0763, 0.0652, 0.1213, 0.0732, 0.0848, 0.0821, 0.0795, 0.0369),
("BE", 113637.86, 617938.08, 0.1839, 0.0406, 0.0628, 0.0628, 0.0679, 0.0679, 0.0787, 0.0763, 0.0652, 0.1213, 0.0732, 0.0848, 0.0821, 0.0795, 0.0369)
]
# Q1 Final (Reduced Budget)
# records_to_insert = [
# ("UK", 850737, 3511354, 0.24, 0.02, 0.04, 0.04, 0.07, 0.05, 0.05, 0.06, 0.06, 0.27, 0.07, 0.08, 0.08, 0.08, 0.03),
# ("IE", 92370, 564691, 0.16, 0.02, 0.04, 0.05, 0.07, 0.06, 0.06, 0.06, 0.06, 0.23, 0.07, 0.08, 0.08, 0.08, 0.03),
# ("DE", 1085957, 3956971, 0.27, 0.02, 0.04, 0.04, 0.08, 0.05, 0.05, 0.06, 0.06, 0.26, 0.07, 0.08, 0.08, 0.08, 0.03),
# ("AT", 98265, 508753, 0.19, 0.02, 0.04, 0.05, 0.08, 0.05, 0.06, 0.06, 0.06, 0.23, 0.07, 0.08, 0.09, 0.08, 0.03),
# ("CH", 195306, 996198, 0.20, 0.02, 0.05, 0.06, 0.08, 0.08, 0.08, 0.10, 0.06, 0.13, 0.07, 0.07, 0.07, 0.08, 0.04),
# ("FR", 531644, 1648781, 0.32, 0.02, 0.03, 0.04, 0.05, 0.04, 0.05, 0.05, 0.05, 0.23, 0.06, 0.07, 0.11, 0.14, 0.05),
# ("ES", 126968, 590136, 0.22, 0.02, 0.03, 0.04, 0.05, 0.04, 0.05, 0.05, 0.05, 0.23, 0.06, 0.07, 0.11, 0.14, 0.05),
# ("IT", 137325, 567718, 0.24, 0.03, 0.05, 0.06, 0.06, 0.06, 0.07, 0.07, 0.07, 0.16, 0.09, 0.08, 0.08, 0.08, 0.03),
# ("PT", 32134, 101955, 0.32, 0.03, 0.06, 0.06, 0.07, 0.07, 0.07, 0.08, 0.06, 0.09, 0.08, 0.09, 0.09, 0.09, 0.04),
# ("NO", 39934, 168357, 0.24, 0.03, 0.05, 0.06, 0.06, 0.06, 0.07, 0.07, 0.07, 0.16, 0.09, 0.08, 0.08, 0.08, 0.03),
# ("SE", 55464, 167020, 0.33, 0.04, 0.05, 0.06, 0.08, 0.06, 0.09, 0.06, 0.06, 0.12, 0.07, 0.08, 0.08, 0.08, 0.06),
# ("FI", 26623, 101548, 0.26, 0.04, 0.05, 0.06, 0.08, 0.06, 0.09, 0.06, 0.06, 0.12, 0.07, 0.08, 0.08, 0.08, 0.06),
# ("DK", 99835, 440934, 0.23, 0.04, 0.05, 0.06, 0.08, 0.06, 0.09, 0.06, 0.06, 0.12, 0.07, 0.08, 0.08, 0.08, 0.06),
# ("NL", 136775, 542948, 0.25, 0.04, 0.06, 0.06, 0.07, 0.07, 0.08, 0.08, 0.07, 0.12, 0.07, 0.08, 0.08, 0.08, 0.04),
# ("BE", 91184, 398162, 0.23, 0.04, 0.06, 0.06, 0.07, 0.07, 0.08, 0.08, 0.07, 0.12, 0.07, 0.08, 0.08, 0.08, 0.04)
# ]
# Q1 22
# records_to_insert = [
# ("UK", 659636, 3970904, 0.1661, 0.0227, 0.0550, 0.0577, 0.0909, 0.0700, 0.0700, 0.0723, 0.0773, 0.1104, 0.0784, 0.0861, 0.0873, 0.0873, 0.0345),
# ("IE", 73293, 638596, 0.1148, 0.0227, 0.0550, 0.0577, 0.0909, 0.0700, 0.0700, 0.0723, 0.0773, 0.1104, 0.0784, 0.0861, 0.0873, 0.0873, 0.0345),
# ("DE", 826140, 4474841, 0.1846, 0.0201, 0.0525, 0.0575, 0.1000, 0.0675, 0.0700, 0.0725, 0.0775, 0.1100, 0.0775, 0.0850, 0.0875, 0.0875, 0.0350),
# ("AT", 81706, 575337, 0.1420, 0.0201, 0.0525, 0.0575, 0.1000, 0.0675, 0.0700, 0.0725, 0.0775, 0.1100, 0.0775, 0.0850, 0.0875, 0.0875, 0.0350),
# ("CH", 175908, 1126576, 0.1561, 0.0201, 0.0525, 0.0575, 0.1000, 0.0675, 0.0700, 0.0725, 0.0775, 0.1100, 0.0775, 0.0850, 0.0875, 0.0875, 0.0350),
# ("FR", 359010, 1864565, 0.1925, 0.0255, 0.0525, 0.0575, 0.0800, 0.0675, 0.0725, 0.0750, 0.0775, 0.1099, 0.0850, 0.0875, 0.0875, 0.0875, 0.0350),
# ("ES", 116539, 667371, 0.1746, 0.0255, 0.0525, 0.0575, 0.0800, 0.0675, 0.0725, 0.0750, 0.0775, 0.1099, 0.0850, 0.0875, 0.0875, 0.0875, 0.0350),
# ("IT", 123686, 642018, 0.1927, 0.0255, 0.0525, 0.0575, 0.0800, 0.0675, 0.0725, 0.0750, 0.0775, 0.1099, 0.0850, 0.0875, 0.0875, 0.0875, 0.0350),
# ("PT", 28942, 115298, 0.2510, 0.0255, 0.0525, 0.0575, 0.0800, 0.0675, 0.0725, 0.0750, 0.0775, 0.1099, 0.0850, 0.0875, 0.0875, 0.0875, 0.0350),
# ("NO", 35968, 190390, 0.1889, 0.0400, 0.0600, 0.0650, 0.0900, 0.0700, 0.0950, 0.0700, 0.0700, 0.0950, 0.0725, 0.0725, 0.0725, 0.0725, 0.0550),
# ("SE", 49955, 188879, 0.2645, 0.0400, 0.0600, 0.0650, 0.0900, 0.0700, 0.0950, 0.0700, 0.0700, 0.0950, 0.0725, 0.0725, 0.0725, 0.0725, 0.0550),
# ("FI", 23979, 114839, 0.2088, 0.0400, 0.0600, 0.0650, 0.0900, 0.0700, 0.0950, 0.0700, 0.0700, 0.0950, 0.0725, 0.0725, 0.0725, 0.0725, 0.0550),
# ("DK", 89919, 498641, 0.1803, 0.0400, 0.0600, 0.0650, 0.0900, 0.0700, 0.0950, 0.0700, 0.0700, 0.0950, 0.0725, 0.0725, 0.0725, 0.0725, 0.0550),
# ("NL", 123191, 614006, 0.2006, 0.0450, 0.0697, 0.0697, 0.0753, 0.0753, 0.0873, 0.0847, 0.0723, 0.0997, 0.0703, 0.0750, 0.0727, 0.0703, 0.0327),
# ("BE", 82127, 450271, 0.1824, 0.0450, 0.0697, 0.0697, 0.0753, 0.0753, 0.0873, 0.0847, 0.0723, 0.0997, 0.0703, 0.0750, 0.0727, 0.0703, 0.0327)
# ]
# // Q4
# records_to_insert = [
# ("UK", 492693, 2458665, 0.20, 0.02, 0.1, 0.08, 0.08, 0.08, 0.07, 0.07, 0.07, 0.07, 0.07, 0.1, 0.08, 0.08, 0.03),
# ("IE", 51965, 409777, 0.13, 0.02, 0.1, 0.08, 0.08, 0.08, 0.07, 0.07, 0.07, 0.07, 0.07, 0.1, 0.08, 0.08, 0.03),
# ("DE", 729221, 3698601, 0.20, 0.03, 0.07, 0.07, 0.07, 0.08, 0.08, 0.07, 0.08, 0.08, 0.12, 0.08, 0.08, 0.08, 0.04),
# ("AT", 111815, 821338, 0.14, 0.03, 0.07, 0.07, 0.07, 0.08, 0.08, 0.07, 0.08, 0.08, 0.12, 0.08, 0.08, 0.08, 0.04),
# ("CH", 157896, 1044068, 0.15, 0.03, 0.07, 0.07, 0.07, 0.08, 0.08, 0.07, 0.08, 0.08, 0.12, 0.08, 0.08, 0.08, 0.04),
# ("FR", 230684, 1120651, 0.20, 0.02, 0.06, 0.07, 0.09, 0.09, 0.09, 0.11, 0.07, 0.11, 0.06, 0.06, 0.06, 0.07, 0.03),
# ("ES", 95035, 545993, 0.17, 0.06, 0.06, 0.06, 0.07, 0.08, 0.08, 0.08, 0.08, 0.08, 0.09, 0.08, 0.08, 0.08, 0.04),
# ("IT", 127896, 816425, 0.16, 0.03, 0.07, 0.07, 0.08, 0.08, 0.08, 0.09, 0.07, 0.07, 0.08, 0.08, 0.08, 0.08, 0.04),
# ("PT", 16885, 60524, 0.27, 0.05, 0.06, 0.06, 0.07, 0.08, 0.08, 0.09, 0.07, 0.09, 0.08, 0.08, 0.08, 0.08, 0.03),
# ("NO", 26584, 150431, 0.18, 0.02, 0.05, 0.07, 0.08, 0.07, 0.08, 0.13, 0.08, 0.1, 0.08, 0.08, 0.08, 0.08, 0.04),
# ("SE", 44339, 243477, 0.18, 0.02, 0.05, 0.07, 0.08, 0.07, 0.08, 0.13, 0.08, 0.1, 0.08, 0.08, 0.08, | |
<reponame>mydevice/python-openstackclient
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import mock
from unittest.mock import call
from cinderclient import api_versions
from osc_lib import exceptions
from osc_lib import utils
from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes
from openstackclient.volume.v2 import volume_backup
class TestBackup(volume_fakes.TestVolume):
def setUp(self):
super(TestBackup, self).setUp()
self.backups_mock = self.app.client_manager.volume.backups
self.backups_mock.reset_mock()
self.volumes_mock = self.app.client_manager.volume.volumes
self.volumes_mock.reset_mock()
self.snapshots_mock = self.app.client_manager.volume.volume_snapshots
self.snapshots_mock.reset_mock()
self.restores_mock = self.app.client_manager.volume.restores
self.restores_mock.reset_mock()
class TestBackupCreate(TestBackup):
volume = volume_fakes.FakeVolume.create_one_volume()
snapshot = volume_fakes.FakeSnapshot.create_one_snapshot()
new_backup = volume_fakes.FakeBackup.create_one_backup(
attrs={'volume_id': volume.id, 'snapshot_id': snapshot.id})
columns = (
'availability_zone',
'container',
'description',
'id',
'name',
'object_count',
'size',
'snapshot_id',
'status',
'volume_id',
)
data = (
new_backup.availability_zone,
new_backup.container,
new_backup.description,
new_backup.id,
new_backup.name,
new_backup.object_count,
new_backup.size,
new_backup.snapshot_id,
new_backup.status,
new_backup.volume_id,
)
def setUp(self):
super(TestBackupCreate, self).setUp()
self.volumes_mock.get.return_value = self.volume
self.snapshots_mock.get.return_value = self.snapshot
self.backups_mock.create.return_value = self.new_backup
# Get the command object to test
self.cmd = volume_backup.CreateVolumeBackup(self.app, None)
def test_backup_create(self):
arglist = [
"--name", self.new_backup.name,
"--description", self.new_backup.description,
"--container", self.new_backup.container,
"--force",
"--incremental",
"--snapshot", self.new_backup.snapshot_id,
self.new_backup.volume_id,
]
verifylist = [
("name", self.new_backup.name),
("description", self.new_backup.description),
("container", self.new_backup.container),
("force", True),
("incremental", True),
("snapshot", self.new_backup.snapshot_id),
("volume", self.new_backup.volume_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.backups_mock.create.assert_called_with(
self.new_backup.volume_id,
container=self.new_backup.container,
name=self.new_backup.name,
description=self.new_backup.description,
force=True,
incremental=True,
snapshot_id=self.new_backup.snapshot_id,
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
def test_backup_create_with_properties(self):
self.app.client_manager.volume.api_version = \
api_versions.APIVersion('3.43')
arglist = [
"--property", "foo=bar",
"--property", "wow=much-cool",
self.new_backup.volume_id,
]
verifylist = [
("properties", {"foo": "bar", "wow": "much-cool"}),
("volume", self.new_backup.volume_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.backups_mock.create.assert_called_with(
self.new_backup.volume_id,
container=None,
name=None,
description=None,
force=False,
incremental=False,
metadata={"foo": "bar", "wow": "much-cool"},
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
def test_backup_create_with_properties_pre_v343(self):
self.app.client_manager.volume.api_version = \
api_versions.APIVersion('3.42')
arglist = [
"--property", "foo=bar",
"--property", "wow=much-cool",
self.new_backup.volume_id,
]
verifylist = [
("properties", {"foo": "bar", "wow": "much-cool"}),
("volume", self.new_backup.volume_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
exc = self.assertRaises(
exceptions.CommandError,
self.cmd.take_action,
parsed_args)
self.assertIn("--os-volume-api-version 3.43 or greater", str(exc))
def test_backup_create_with_availability_zone(self):
self.app.client_manager.volume.api_version = \
api_versions.APIVersion('3.51')
arglist = [
"--availability-zone", "my-az",
self.new_backup.volume_id,
]
verifylist = [
("availability_zone", "my-az"),
("volume", self.new_backup.volume_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.backups_mock.create.assert_called_with(
self.new_backup.volume_id,
container=None,
name=None,
description=None,
force=False,
incremental=False,
availability_zone="my-az",
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
def test_backup_create_with_availability_zone_pre_v351(self):
self.app.client_manager.volume.api_version = \
api_versions.APIVersion('3.50')
arglist = [
"--availability-zone", "my-az",
self.new_backup.volume_id,
]
verifylist = [
("availability_zone", "my-az"),
("volume", self.new_backup.volume_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
exc = self.assertRaises(
exceptions.CommandError,
self.cmd.take_action,
parsed_args)
self.assertIn("--os-volume-api-version 3.51 or greater", str(exc))
def test_backup_create_without_name(self):
arglist = [
"--description", self.new_backup.description,
"--container", self.new_backup.container,
self.new_backup.volume_id,
]
verifylist = [
("description", self.new_backup.description),
("container", self.new_backup.container),
("volume", self.new_backup.volume_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.backups_mock.create.assert_called_with(
self.new_backup.volume_id,
container=self.new_backup.container,
name=None,
description=self.new_backup.description,
force=False,
incremental=False,
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
class TestBackupDelete(TestBackup):
backups = volume_fakes.FakeBackup.create_backups(count=2)
def setUp(self):
super(TestBackupDelete, self).setUp()
self.backups_mock.get = (
volume_fakes.FakeBackup.get_backups(self.backups))
self.backups_mock.delete.return_value = None
# Get the command object to mock
self.cmd = volume_backup.DeleteVolumeBackup(self.app, None)
def test_backup_delete(self):
arglist = [
self.backups[0].id
]
verifylist = [
("backups", [self.backups[0].id])
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.backups_mock.delete.assert_called_with(
self.backups[0].id, False)
self.assertIsNone(result)
def test_backup_delete_with_force(self):
arglist = [
'--force',
self.backups[0].id,
]
verifylist = [
('force', True),
("backups", [self.backups[0].id])
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.backups_mock.delete.assert_called_with(self.backups[0].id, True)
self.assertIsNone(result)
def test_delete_multiple_backups(self):
arglist = []
for b in self.backups:
arglist.append(b.id)
verifylist = [
('backups', arglist),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
calls = []
for b in self.backups:
calls.append(call(b.id, False))
self.backups_mock.delete.assert_has_calls(calls)
self.assertIsNone(result)
def test_delete_multiple_backups_with_exception(self):
arglist = [
self.backups[0].id,
'unexist_backup',
]
verifylist = [
('backups', arglist),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
find_mock_result = [self.backups[0], exceptions.CommandError]
with mock.patch.object(utils, 'find_resource',
side_effect=find_mock_result) as find_mock:
try:
self.cmd.take_action(parsed_args)
self.fail('CommandError should be raised.')
except exceptions.CommandError as e:
self.assertEqual('1 of 2 backups failed to delete.',
str(e))
find_mock.assert_any_call(self.backups_mock, self.backups[0].id)
find_mock.assert_any_call(self.backups_mock, 'unexist_backup')
self.assertEqual(2, find_mock.call_count)
self.backups_mock.delete.assert_called_once_with(
self.backups[0].id, False
)
class TestBackupList(TestBackup):
volume = volume_fakes.FakeVolume.create_one_volume()
backups = volume_fakes.FakeBackup.create_backups(
attrs={'volume_id': volume.name}, count=3)
columns = (
'ID',
'Name',
'Description',
'Status',
'Size',
)
columns_long = columns + (
'Availability Zone',
'Volume',
'Container',
)
data = []
for b in backups:
data.append((
b.id,
b.name,
b.description,
b.status,
b.size,
))
data_long = []
for b in backups:
data_long.append((
b.id,
b.name,
b.description,
b.status,
b.size,
b.availability_zone,
volume_backup.VolumeIdColumn(b.volume_id),
b.container,
))
def setUp(self):
super(TestBackupList, self).setUp()
self.volumes_mock.list.return_value = [self.volume]
self.backups_mock.list.return_value = self.backups
self.volumes_mock.get.return_value = self.volume
self.backups_mock.get.return_value = self.backups[0]
# Get the command to test
self.cmd = volume_backup.ListVolumeBackup(self.app, None)
def test_backup_list_without_options(self):
arglist = []
verifylist = [
("long", False),
("name", None),
("status", None),
("volume", None),
("marker", None),
("limit", None),
('all_projects', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
search_opts = {
"name": None,
"status": None,
"volume_id": None,
'all_tenants': False,
}
self.volumes_mock.get.assert_not_called()
self.backups_mock.get.assert_not_called()
self.backups_mock.list.assert_called_with(
search_opts=search_opts,
marker=None,
limit=None,
)
self.assertEqual(self.columns, columns)
self.assertItemsEqual(self.data, list(data))
def test_backup_list_with_options(self):
arglist = [
"--long",
"--name", self.backups[0].name,
"--status", "error",
"--volume", self.volume.id,
"--marker", self.backups[0].id,
"--all-projects",
"--limit", "3",
]
verifylist = [
("long", True),
("name", self.backups[0].name),
("status", "error"),
("volume", self.volume.id),
("marker", self.backups[0].id),
('all_projects', True),
("limit", 3),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
search_opts = {
"name": self.backups[0].name,
"status": "error",
"volume_id": self.volume.id,
'all_tenants': True,
}
self.volumes_mock.get.assert_called_once_with(self.volume.id)
self.backups_mock.get.assert_called_once_with(self.backups[0].id)
self.backups_mock.list.assert_called_with(
search_opts=search_opts,
marker=self.backups[0].id,
limit=3,
)
self.assertEqual(self.columns_long, columns)
self.assertItemsEqual(self.data_long, list(data))
class TestBackupRestore(TestBackup):
volume = volume_fakes.FakeVolume.create_one_volume()
backup = volume_fakes.FakeBackup.create_one_backup(
attrs={'volume_id': volume.id})
def setUp(self):
super(TestBackupRestore, self).setUp()
self.backups_mock.get.return_value = self.backup
self.volumes_mock.get.return_value = self.volume
self.restores_mock.restore.return_value = (
volume_fakes.FakeVolume.create_one_volume(
{'id': self.volume['id']}))
# Get the command object to mock
self.cmd = volume_backup.RestoreVolumeBackup(self.app, None)
def test_backup_restore(self):
arglist = [
self.backup.id,
self.backup.volume_id
]
verifylist = [
("backup", self.backup.id),
("volume", self.backup.volume_id)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.restores_mock.restore.assert_called_with(self.backup.id,
self.backup.volume_id)
self.assertIsNotNone(result)
class TestBackupSet(TestBackup):
backup = volume_fakes.FakeBackup.create_one_backup(
attrs={'metadata': {'wow': 'cool'}},
)
def setUp(self):
super(TestBackupSet, self).setUp()
self.backups_mock.get.return_value = self.backup
# Get the command object to test
self.cmd = volume_backup.SetVolumeBackup(self.app, None)
def test_backup_set_name(self):
self.app.client_manager.volume.api_version = \
api_versions.APIVersion('3.9')
arglist = [
'--name', 'new_name',
self.backup.id,
]
verifylist = [
('name', 'new_name'),
('backup', self.backup.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class ShowOne in cliff, abstract method take_action()
# returns nothing
result = self.cmd.take_action(parsed_args)
self.backups_mock.update.assert_called_once_with(
self.backup.id, **{'name': 'new_name'})
self.assertIsNone(result)
def test_backup_set_name_pre_v39(self):
self.app.client_manager.volume.api_version = \
api_versions.APIVersion('3.8')
arglist = [
'--name', 'new_name',
self.backup.id,
]
verifylist = [
('name', 'new_name'),
('backup', self.backup.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
exc = self.assertRaises(
exceptions.CommandError,
self.cmd.take_action,
parsed_args)
self.assertIn("--os-volume-api-version 3.9 or greater", str(exc))
def test_backup_set_description(self):
self.app.client_manager.volume.api_version = \
api_versions.APIVersion('3.9')
arglist = [
'--description', 'new_description',
self.backup.id,
]
verifylist = [
('name', None),
('description', 'new_description'),
('backup', self.backup.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'description': 'new_description'
}
self.backups_mock.update.assert_called_once_with(
self.backup.id,
**kwargs
)
self.assertIsNone(result)
def test_backup_set_description_pre_v39(self):
self.app.client_manager.volume.api_version = \
api_versions.APIVersion('3.8')
arglist = [
'--description', 'new_description',
self.backup.id,
]
verifylist = [
('name', None),
('description', 'new_description'),
('backup', self.backup.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
exc = self.assertRaises(
exceptions.CommandError,
self.cmd.take_action,
parsed_args)
self.assertIn("--os-volume-api-version 3.9 or greater", str(exc))
def test_backup_set_state(self):
arglist = [
'--state', 'error',
self.backup.id
]
verifylist = [
('state', 'error'),
('backup', self.backup.id)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.backups_mock.reset_state.assert_called_once_with(
self.backup.id, 'error')
self.assertIsNone(result)
def test_backup_set_state_failed(self):
self.backups_mock.reset_state.side_effect = exceptions.CommandError()
arglist = [
'--state', 'error',
self.backup.id
]
verifylist = [
('state', 'error'),
('backup', self.backup.id)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
try:
self.cmd.take_action(parsed_args)
self.fail('CommandError should be raised.')
except exceptions.CommandError as e:
self.assertEqual('One or more of the set operations failed',
str(e))
self.backups_mock.reset_state.assert_called_with(
self.backup.id, 'error')
def test_backup_set_no_property(self):
self.app.client_manager.volume.api_version = \
api_versions.APIVersion('3.43')
arglist = [
'--no-property',
self.backup.id,
]
verifylist = [
('no_property', True),
('backup', self.backup.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'metadata': {},
}
self.backups_mock.update.assert_called_once_with(
self.backup.id,
**kwargs
)
self.assertIsNone(result)
def test_backup_set_no_property_pre_v343(self):
self.app.client_manager.volume.api_version = \
api_versions.APIVersion('3.42')
arglist = [
'--no-property',
self.backup.id,
]
verifylist = [
('no_property', True),
('backup', self.backup.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
exc = self.assertRaises(
exceptions.CommandError,
self.cmd.take_action,
parsed_args)
self.assertIn("--os-volume-api-version 3.43 or greater", str(exc))
def test_backup_set_property(self):
self.app.client_manager.volume.api_version = \
api_versions.APIVersion('3.43')
arglist = [
'--property', 'foo=bar',
self.backup.id,
]
verifylist = [
('properties', {'foo': 'bar'}),
('backup', self.backup.id),
]
parsed_args = self.check_parser(self.cmd, arglist, | |
in range(maxmajorticks):
if axis == xaxis:
self.majorticks[axis][axissign].append(label(display=self.display, yoffset=-tmajor,
font=graphfont, height=fontheight, border=0,
linecolor=grey, visible=False, box=False, opacity=0))
else:
self.majorticks[axis][axissign].append(label(display=self.display, xoffset=-tmajor,
font=graphfont, height=fontheight, border=2,
linecolor=grey, visible=False, box=False, opacity=0))
currentdisplay.select()
def __del__(self):
self.visible = False
try:
gdisplays.remove(self)
except:
pass
def mouse(self, evt):
m = evt
if m.press == 'left':
self.mousepos = self.display.mouse.pos+vector(10,20,30) # so mousepos != newpos
self.horline.visible = True
self.vertline.visible = True
self.showxy.visible = True
self.display.cursor.visible = False
elif m.release == 'left':
self.mousepos = None
self.showxy.visible = False
self.horline.visible = False
self.vertline.visible = False
self.display.cursor.visible = True
newpos = self.display.mouse.pos
if newpos != self.mousepos:
self.mousepos = newpos
xmax = self.display.range.x
ymax = self.display.range.y
xcenter = self.display.center.x
ycenter = self.display.center.y
self.horline.pos = [(xcenter-xmax,self.mousepos.y,.01),
(xcenter+xmax,self.mousepos.y,.01)]
self.vertline.pos = [(self.mousepos.x,ycenter-ymax,.01),
(self.mousepos.x,ycenter+ymax,.01)]
v = self.showxy.pos = self.mousepos
if self.logx: x = 10**v.x
if self.logy: y = 10**v.y
if v.x > xcenter:
self.showxy.xoffset = -10
else:
self.showxy.xoffset = 10
self.showxy.text = '({0:0.4g}, {1:0.4g})'.format(v.x,v.y)
def setcenter(self):
x0, y0 = self.getorigin()
xright = self.minmax[xaxis][posaxis]
xleft = self.minmax[xaxis][negaxis]
ytop = self.minmax[yaxis][posaxis]
ybottom = self.minmax[yaxis][negaxis]
rightpixels = self.xtitlewidth
leftpixels = 0
if xleft == x0:
leftpixels = 3*tmajor
toppixels = bottompixels = 0
if self.ytitlewidth:
toppixels = 2*fontheight
if ybottom == y0:
bottompixels = tmajor+fontheight
xrange = 0.55*(xright-xleft)*self.width/(self.width-(rightpixels+leftpixels))
yrange = 0.55*(ytop-ybottom)*self.height/(self.height-(toppixels+bottompixels))
xscale = xrange/(.5*self.width)
yscale = yrange/(.5*self.height)
x1 = xright+rightpixels*xscale
x2 = x0+self.ytitlewidth*xscale
if x2 > x1: # ytitle extends farther to the right than xaxis + xtitle
xrange = 0.55*(x2-xleft)*self.width/(self.width-leftpixels)
xscale = xrange/(.5*self.width)
xright = x2
rightpixels = 0
if xrange == 0: xrange = 1e-300
if yrange == 0: yrange = 1e-300
self.display.range = (xrange,yrange,0.1)
self.display.center = ((xright+xleft+(rightpixels-leftpixels)*xscale)/2.0,
(ytop+ybottom+(toppixels-bottompixels)*yscale)/2.0,0)
def getorigin(self):
return (self.zero[0].pos[0], self.zero[1].pos[1])
def setminorticks(self, axis, axissign, loglabel, dmajor, dminor):
## For log axis labels, show the standard uneven log tick marks if dmajor == 1,
## but for dmajor > 1, show minor tick marks at the decade locations.
## Since we have only minorticks-1 = 4 minor tick marks between major tick marks,
## if dmajor > minorticks (=5), don't show any minor tick marks.
if loglabel and (dmajor > minorticks): return 0
x0,y0 = self.getorigin()
limit = self.minmax[axis][axissign]
if axis == xaxis: limit -= x0
else: limit -= y0
if axissign == negaxis:
dminor = -dminor
ntick = nmajor = nminor = 0
exclude = minorticks
if loglabel and (dmajor > 1):
exclude = dmajor
if dminor > 0:
dminor = 1
else:
dminor = -1
while True:
ntick += 1
tickpos = ntick*dminor
if (ntick % exclude) == 0:
nmajor += 1
continue # no minor tick where there is a major one
if loglabel: # have already excluded dmajor > minorticks (=5)
if dmajor == 1:
if dminor > 0:
tickpos = dmajor*(nmajor+logticks[(ntick-1)%exclude])
else:
tickpos = dmajor*(-(nmajor+1)+logticks[3-((ntick-1)%exclude)])
if dminor > 0:
if tickpos > limit: break
else:
if tickpos < limit: break
obj = self.minorticks[axis][axissign][nminor]
if axis == xaxis:
obj.pos = (x0+tickpos,y0,0)
else:
obj.pos = (x0,y0+tickpos,0)
obj.visible = True
nminor += 1
return nminor
def axisdisplay(self, axis, axissign):
# axis = 0 for x axis, 1 for y axis
# axissign = 0 for negative half-axis, 1 for positive half-axis
if not self.makeaxis[axis][axissign]: return
sign = 1
if axissign == negaxis: sign = -1
x0,y0 = self.getorigin()
if axis == xaxis:
loglabel = self.logx
else:
loglabel = self.logy
if axis == xaxis: origin = x0
else: origin = y0
if self.axis[axis][axissign] is None: # new; no axis displayed up till now
if self.minmax[axis][axissign] == origin: return # zero-length axis
# Display axis and axis title
if axis == xaxis:
axispos = ([(x0,y0,0), (self.minmax[axis][axissign],y0,0)])
titlepos = ([self.minmax[axis][posaxis],y0,0])
else:
axispos = ([(x0,y0,0), (x0,self.minmax[axis][axissign],0)])
titlepos = ([x0,self.minmax[axis][posaxis]+2*fontheight*self.display.range.y/(.5*self.width),0])
self.axis[axis][axissign] = curve(pos=axispos, color=grey, display=self.display)
if axis == xaxis and self.Lxtitle.text != "":
self.Lxtitle.pos = titlepos
self.Lxtitle.visible = True
if axis == yaxis and self.Lytitle.text != "":
self.Lytitle.pos = titlepos
self.Lytitle.visible = True
# Determine major tick marks and labels
if origin != 0:
newmajormarks, form = labelnum(self.minmax[axis][posaxis] -
self.minmax[axis][negaxis], loglabel)
dmajor = newmajormarks[0]
for n, mark in enumerate(newmajormarks):
if origin > 0:
newmajormarks[n] += self.minmax[axis][negaxis]
else:
newmajormarks[n] -= self.minmax[axis][posaxis]
newmajormarks[n] = newmajormarks[n]
else:
if self.minmax[axis][posaxis] >= -self.minmax[axis][negaxis]:
newmajormarks, form = labelnum(self.minmax[axis][posaxis], loglabel)
else:
if loglabel:
newmajormarks, form = labelnum(self.minmax[axis][negaxis], loglabel)
else:
newmajormarks, form = labelnum(-self.minmax[axis][negaxis], loglabel)
dmajor = newmajormarks[0]
self.format[axis] = form
# Display major tick marks and labels
nmajor = 0
marks = []
for x1 in newmajormarks:
if x1 > abs(self.minmax[axis][axissign]): break # newmajormarks can refer to opposite half-axis
if axissign == posaxis and x1 < origin: continue
elif axissign == negaxis and x1 < abs(origin): continue
marks.append(x1)
obj = self.majorticks[axis][axissign][nmajor]
if loglabel:
obj.text = self.format[axis].format(int(sign*x1))
else:
obj.text = cleaneformat(self.format[axis].format(sign*x1))
obj.color = self.foreground
obj.visible = True
if axis == xaxis:
obj.pos = [sign*x1,y0,0]
obj.yoffset = -tmajor
else:
obj.pos = [x0,sign*x1,0]
obj.xoffset = -tmajor
nmajor = nmajor+1
# Display minor tick marks
self.setminorticks(axis, axissign, loglabel, dmajor, dmajor/minorticks)
if marks != []:
self.majormarks[axis][axissign] = marks
self.lastlabel[axis][axissign] = self.majormarks[axis][axissign][-1]
else:
self.lastlabel[axis][axissign] = 0
else:
# Extend axis, which has grown
if axis == xaxis:
self.axis[axis][axissign].pos = [[x0,y0,0],[self.minmax[axis][axissign], y0, 0]]
else:
self.axis[axis][axissign].pos = [[x0,y0,0],[x0,self.minmax[axis][axissign],0]]
# Reposition xtitle (at right) or ytitle (at top)
if axis == xaxis and axissign == posaxis:
self.Lxtitle.pos = (self.minmax[axis][posaxis],y0,0)
if axis == yaxis and axissign == posaxis:
self.Lytitle.pos = ([x0,self.minmax[axis][posaxis]+2*fontheight*self.display.range.y/(.5*self.width),0])
# See how many majormarks are now needed, and in what format
if self.minmax[axis][posaxis] >= -self.minmax[axis][negaxis]:
newmajormarks, form = labelnum(self.minmax[axis][posaxis], loglabel)
else:
newmajormarks, form = labelnum(-self.minmax[axis][negaxis], loglabel)
if (self.majormarks[axis][axissign] is not None) and (len(self.majormarks[axis][axissign]) > 0):
# this axis already has major tick marks/labels
olddmajor = self.majormarks[axis][axissign][0]
else:
olddmajor = 0.
olddminor = olddmajor/minorticks
dmajor = newmajormarks[0]
dminor = dmajor/minorticks
newformat = (form != self.format[axis])
self.format[axis] = form
check = (self.minmax[axis][axissign] >= self.lastlabel[axis][axissign]+dminor)
if axissign == negaxis:
check = (self.minmax[axis][axissign] <= self.lastlabel[axis][axissign]-dminor)
needminor = check or (dminor != olddminor)
needmajor = ((self.majormarks[axis][axissign] is None)
or (newmajormarks[-1] != self.majormarks[axis][axissign][-1]) or newformat)
if needmajor: # need new labels
start = 0
if (self.majormarks[axis][axissign] is None) or newformat or (dmajor != olddmajor):
marks = []
else:
for num in newmajormarks:
if num > self.majormarks[axis][axissign][-1]:
start = num
break
marks = self.majormarks[axis][axissign]
for nmajor in range(maxmajorticks):
obj = self.majorticks[axis][axissign][nmajor]
if nmajor < len(newmajormarks):
x1 = newmajormarks[nmajor]
if abs(self.minmax[axis][axissign]) >= x1:
if x1 < start:
continue
else:
obj.visible = False
continue
else:
obj.visible = False
continue
marks.append(x1)
if loglabel:
obj.text = self.format[axis].format(int(sign*x1))
else:
obj.text = cleaneformat(self.format[axis].format(sign*x1))
obj.color = self.foreground
obj.visible = True
if axis == xaxis:
obj.pos = [sign*x1,y0,0]
else:
obj.pos = [x0,sign*x1,0]
if marks != []:
self.majormarks[axis][axissign] = marks
if needminor: # adjust minor tick marks
nminor = self.setminorticks(axis, axissign, loglabel, dmajor, dminor)
while nminor < maxminorticks:
self.minorticks[axis][axissign][nminor].visible = False
nminor = nminor+1
self.lastlabel[axis][axissign] = dminor*int(self.minmax[axis][axissign]/dminor)
def resize(self, x, y):
redox = redoy = False
if self.autoscale[xaxis]:
if x > self.lastminmax[xaxis][posaxis]:
self.minmax[xaxis][posaxis] = x+frac*self.display.range[0]
if (self.lastminmax[xaxis][posaxis] == 0 or
(self.minmax[xaxis][posaxis] >= self.lastminmax[xaxis][posaxis])):
redox = True
elif x < self.lastminmax[xaxis][negaxis]:
self.minmax[xaxis][negaxis] = x-frac*self.display.range[0]
if (self.lastminmax[xaxis][negaxis] == 0 or
(self.minmax[xaxis][negaxis] <= self.lastminmax[xaxis][negaxis])):
redox = True
elif not self.active:
redox = redoy = True
if self.autoscale[yaxis]:
if y > self.lastminmax[yaxis][posaxis]:
self.minmax[yaxis][posaxis] = y+frac*self.display.range[1]
if (self.lastminmax[yaxis][posaxis] == 0 or
(self.minmax[yaxis][posaxis] >= self.lastminmax[yaxis][posaxis])):
redoy = True
elif y < self.lastminmax[yaxis][negaxis]:
self.minmax[yaxis][negaxis] = y-frac*self.display.range[1]
if (self.lastminmax[yaxis][negaxis] == 0 or
(self.minmax[yaxis][negaxis] <= self.lastminmax[yaxis][negaxis])):
redoy = True
elif not self.active:
redox = redoy = True
if (redox or redoy ):
self.setcenter() # approximate
if redox:
self.axisdisplay(xaxis,posaxis)
self.lastminmax[xaxis][posaxis] = self.minmax[xaxis][posaxis]
self.axisdisplay(xaxis,negaxis)
self.lastminmax[xaxis][negaxis] = self.minmax[xaxis][negaxis]
if redoy:
self.axisdisplay(yaxis,posaxis)
self.lastminmax[yaxis][posaxis] = self.minmax[yaxis][posaxis]
self.axisdisplay(yaxis,negaxis)
self.lastminmax[yaxis][negaxis] = self.minmax[yaxis][negaxis]
self.setcenter() # revised
if not self.active:
self.active = True
gdisplays.append(self)
self.display.bind("mousedown mousemove mouseup", checkGraphMouse, self)
self.zero[xaxis].visible = True
self.zero[yaxis].visible = True
def getgdisplay():
return gdisplay()
def constructorargs(obj,arguments):
if 'gdisplay' in arguments:
obj.gdisplay = arguments['gdisplay']
else:
if lastgdisplay is None:
obj.gdisplay = getgdisplay()
else:
obj.gdisplay = lastgdisplay
if 'color' in | |
from pypipe import formats
class Bcftools:
@staticmethod
def view():
return {
'cmd': 'bcftools view',
'type': None,
'log': 'log',
'out': {
'redirect': True,
'return': [
{'arg': 'out', 'type': {'b': formats.Bcf, '': formats.Vcf}, 'suffix': ''},
]
},
'args': {
'named': {
'-A': bool,
'-b': bool,
'-D': formats.TextFile,
'-F': bool,
'-G': bool,
'-l': formats.TextFile,
'-N': bool,
'-Q': bool,
'-s': formats.TextFile,
'-S': bool,
'-u': bool,
'-c': bool,
'-d': float,
'-e': bool,
'-g': bool,
'-i': float,
'-p': float,
'-P': str,
'-t': float,
'-T': str,
'-v': bool,
'-1': int,
'-U': int,
'-X': float,
},
'unnamed': [
('in_*', {'-S': formats.Vcf, '': formats.Bcf}),
('out*', str),
],
},
}
@staticmethod
def cat():
return {
'cmd': 'bcftools cat',
'type': None,
'log': 'log',
'out': {
'redirect': True,
'return': [
{'arg': 'out', 'type': formats.Bcf, 'suffix': ''},
],
},
'args': {
'named': {
},
'unnamed': [
('in_*', formats.Bcf),
('out*', str),
],
}
}
class Bowtie2:
@staticmethod
def bowtie2():
return {
'cmd': 'bowtie2',
'type': None,
'log': 'log',
'out': {
'redirect': False,
'return': [
{'arg': '-S', 'type': formats.Sam, 'suffix': ''},
]
},
'args': {
'named': {
'-x*': formats.Bowtie2Index,
'-S*': str,
'-U': {
'--qseq': [formats.Qseq, 1, ','],
'-f': [formats.Fasta, 1, ','],
'-r': [formats.TextFile, 1, ','],
'-c': str,
'': [formats.Fastq, 1, ',']
},
'-1': {
'--qseq': [formats.Qseq, 1, ','],
'-f': [formats.Fasta, 1, ','],
'-r': [formats.TextFile, 1, ','],
'-c': str,
'': [formats.Fastq, 1, ',']
},
'-2': {
'--qseq': [formats.Qseq, 1, ','],
'-f': [formats.Fasta, 1, ','],
'-r': [formats.TextFile, 1, ','],
'-c': str,
'': [formats.Fastq, 1, ',']
},
'-q': bool,
'--qseq': bool,
'-f': bool,
'-r': bool,
'-c': bool,
'-s': int,
'-u': int,
'-5': int,
'-3': int,
'--phred33': bool,
'--phred64': bool,
'--solexa-quals': bool,
'--int-quals': bool,
'--very-fast': bool,
'--fast': bool,
'--sensitive': bool,
'--very-sensitive': bool,
'--very-fast-local': bool,
'--fast-local': bool,
'--sensitive-local': bool,
'--very-sensitive-local': bool,
'-N': int,
'-L': int,
'-i': str,
'--n-ceil': str,
'--dpad': int,
'--gbar': int,
'--ignore-quals': bool,
'--nofw': bool,
'--norc': bool,
'--no-1mm-upfront': bool,
'--end-to-end': bool,
'--local': bool,
'-k': int,
'-a': bool,
'-D': int,
'-R': int,
'--ma': int,
'--mp': [int, 2, ','],
'--np': int,
'--rdg': [int, 2, ','],
'--rfg': [int, 2, ','],
'--score-min': str,
'-I': int,
'-X': int,
'--fr': bool,
'--rf': bool,
'--ff': bool,
'--no-mixed': bool,
'--no-discordant': bool,
'--dovetail': bool,
'--no-contain': bool,
'--no-overlap': bool,
'--no-unal': bool,
'--no-hd': bool,
'--no-sq': bool,
'--rg-id': str,
'--rg': str,
'--omit-sec-seq': bool,
'-o': int,
'-p': int,
'--reorder': bool,
'--mm': bool,
'--qc-filter': bool,
'--seed': int,
'--non-deterministic': bool,
'-t': bool,
'--un': str,
'--un-gz': str,
'--un-bz2': str,
'--al': str,
'--al-gz': str,
'--al-bz2': str,
'--un-conc': str,
'--un-conc-gz': str,
'--un-conc-bz2': str,
'--al-conc': str,
'--al-conc-gz': str,
'--al-conc-bz2': str,
'--quiet': bool,
'--met-file': str,
'--met-stderr': str,
'--met': int,
},
'unnamed': [
],
},
}
class Bwa:
@staticmethod
def mem():
return {
'cmd': 'bwa mem',
'type': None,
'log': 'log',
'out': {
'redirect': True,
'return': [
{'arg': 'out', 'type': formats.Sam, 'suffix': ''},
],
},
'args': {
'named': {
'-t': int,
'-k': int,
'-w': int,
'-d': int,
'-r': float,
'-c': int,
'-P': bool,
'-a': int,
'-B': int,
'-O': int,
'-E': int,
'-L': int,
'-U': int,
'-p': bool,
'-R': str,
'-T': int,
'-C': bool,
'-H': bool,
'-M': bool,
'-v': int,
},
'unnamed': [
('ref*', formats.BwaIndex),
('in1*', formats.Fastq),
('in2', formats.Fastq),
('out*', str),
],
},
}
@staticmethod
def aln():
return {
'cmd': 'bwa aln',
'type': None,
'log': 'log',
'out': {
'redirect': True,
'return': [
{'arg': 'out', 'type': formats.Sai, 'suffix': ''},
],
},
'args': {
'named': {
'-n': int,
'-o': int,
'-e': int,
'-d': int,
'-i': int,
'-l': int,
'-k': int,
'-t': int,
'-M': int,
'-O': int,
'-E': int,
'-R': int,
'-c': bool,
'-N': bool,
'-q': int,
'-I': bool,
'-B': int,
'-b': bool,
'-0': bool,
'-1': bool,
'-2': bool,
},
'unnamed': [
('ref*', formats.BwaIndex),
('in_*', formats.Fastq),
('out*', str),
],
},
}
@staticmethod
def samse():
return {
'cmd': 'bwa samse',
'type': None,
'log': 'log',
'out': {
'redirect': True,
'return': [
{'arg': 'out', 'type': formats.Sam, 'suffix': ''},
],
},
'args': {
'named': {
'-n': int,
'-r': str,
},
'unnamed': [
('ref*', formats.BwaIndex),
('sai*', formats.Sai),
('in_*', formats.Fastq),
('out*', str),
],
},
}
@staticmethod
def sampe():
return {
'cmd': 'bwa sampe',
'type': None,
'log': 'log',
'out': {
'redirect': True,
'return': [
{'arg': 'out', 'type': formats.Sam, 'suffix': ''},
],
},
'args': {
'named': {
'-a': int,
'-o': int,
'-P': bool,
'-n': int,
'-N': int,
'-r': str,
},
'unnamed': [
('ref*', formats.BwaIndex),
('sai1*', formats.Sai),
('sai2*', formats.Sai),
('in1*', formats.Fastq),
('in2*', formats.Fastq),
('out*', str),
],
},
}
@staticmethod
def bwasw():
return {
'cmd': 'bwa bwasw',
'type': None,
'log': 'log',
'out': {
'redirect': True,
'return': [
{'arg': 'out', 'type': formats.Sam, 'suffix': ''},
]
},
'args': {
'named': {
'-a': int,
'-b': int,
'-q': int,
'-r': int,
'-t': int,
'-w': int,
'-T': int,
'-c': float,
'-z': int,
'-s': int,
'-N': int,
},
'unnamed': [
('ref*', formats.BwaIndex),
('in1*', formats.Fastq),
('in2', formats.Fastq),
('out*', str),
],
},
}
class Freebayes:
@staticmethod
def freebayes():
return {
'cmd': 'freebayes',
'type': None,
'log': 'log',
'out': {
'redirect': False,
'return': [
{'arg': 'v', 'type': formats.Vcf, 'suffix': ''},
]
},
'args': {
'named': {
'-v*': str,
'-f*': formats.Fasta,
'-b': formats.Bam,
'-t': formats.Bed,
'-r': str,
'-s': formats.TextFile,
'--populations': formats.TextFile,
'-A': formats.Bed,
'--trace': str,
'--failed-alleles': formats.Bed,
'--variant-input': formats.Vcf,
'-l': bool,
'--haplotype-basis-alleles': bool,
'--report-all-haplotype-alleles': bool,
'--report-monorphic': bool,
'-P': float,
'-T': float,
'-p': int,
'-J': bool,
'-Z': bool,
'--reference_quality': [int, 2, ','],
'-I': bool,
'-i': bool,
'-X': bool,
'-u': bool,
'-n': int,
'-E': int,
'--max-complex-gap': int,
'--haplotype-length': int,
'--min-repeat-length': int,
'--min-repeat-entropy': int,
'--no-partial-observation': bool,
'-O': bool,
'-4': bool,
'-m': int,
'-q': int,
'-R': int,
'-Y': int,
'-Q': int,
'-U': int,
'-z': int,
'--read-snp-limit': int,
'-e': int,
'-0': int,
'-F': int,
'-C': int,
'-3': int,
'-G': int,
'--min-coverage': int,
'-w': bool,
'-V': bool,
'-a': bool,
'--observation-bias': formats.TextFile,
'--base-quality-cap': int,
'--experimental-gls': bool,
'--prob-contamination': float,
'--contamination-estimates': formats.TextFile,
'--report-genotype-likelihood-max': bool,
'-B': int,
'--genotype-max-iterations': int,
'-W': [int, 2, ','],
'-S': int,
'-j': bool,
'-H': bool,
'-D': int,
'--genotype-qualities': bool,
},
'unnamed': [
('in_*', [formats.Bam, 1, ' ']),
]
}
}
class Samtools:
@staticmethod
def view():
return {
'cmd': 'samtools view',
'type': None,
'log': 'log',
'out': {
'redirect': False,
'return': [
{'arg': '-o', 'type': {'-b': formats.Bam, '': formats.Sam}, 'suffix': ''},
]
},
'args': {
'named': {
'-o*': str,
'-b': bool,
'-f': int,
'-F': int,
'-h': bool,
'-H': bool,
'-l': str,
'-q': int,
'-r': str,
'-R': formats.Bed,
'-S': bool,
'-c': bool,
'-t': formats.TextFile,
'-u': bool,
},
'unnamed': [
('in_*', {'-S': formats.Sam, '': formats.Bam}),
],
},
}
@staticmethod
def mpileup():
return {
'cmd': 'samtools mpileup',
'type': None,
'log': 'log',
'out': {
'redirect': True,
'return': [
{'arg': 'out', 'type': {'-u': formats.Bcf, '-g': formats.Bcf, '': formats.Pileup}, 'suffix': ''},
],
},
'args': {
'named': {
'-6': bool,
'-A': bool,
'-B': bool,
'-b': formats.TextFile,
'-C': int,
'-E': bool,
'-f': formats.Fasta,
'-l': formats.Bed,
'-q': int,
'-Q': int,
'-r': str,
'-D': bool,
'-g': bool,
'-S': bool,
'-u': bool,
'-e': int,
'-h': int,
'-I': bool,
'-L': bool,
'-o': int,
'-P': str,
},
'unnamed': [
('in_*', [formats.Bam, 1, ' ']),
('out*', str),
]
}
}
@staticmethod
def cat():
return {
'cmd': 'samtools cat',
'type': None,
'log': 'log',
'out': {
'redirect': False,
'return': [
{'arg': 'o', 'type': formats.Bam, 'suffix': ''},
],
},
'args': {
'named': {
'-h': formats.Sam,
},
'unnamed': [
('in_*', formats.Bam),
('o*', str)
],
},
}
@staticmethod
def sort():
return {
'cmd': 'samtools sort',
'type': None,
'log': 'log',
'out': {
'redirect': False,
'return': [
{'arg': 'out', 'type': formats.Bam, 'suffix': '.bam'},
],
},
'args': {
'named': {
'-n': bool,
'-m': int,
},
'unnamed': [
('in_*', formats.Bam),
('out*', str)
],
},
}
@staticmethod
def merge():
return {
'cmd': 'samtools merge',
'type': None,
'log': 'log',
'out': {
'redirect': False,
'return': [
{'arg': 'out', 'type': formats.Bam, 'suffix': ''},
],
},
'args': {
'named': {
'-1': bool,
'-f': bool,
'-h': formats.Sam,
'-n': bool,
'-R': str,
'-r': bool,
'-u': bool,
},
'unnamed': [
('out*', str),
('in_*', [formats.Bam, 2, ' ']),
],
},
}
@staticmethod
def rmdup():
return {
'cmd': 'samtools rmdup',
'type': None,
'log': 'log',
'out': {
'redirect': False,
'return': [
{'arg': 'out', 'type': formats.Bam, 'suffix': ''},
],
},
'args': {
'named': {
'-s': bool,
'-S': bool,
},
'unnamed': [
('in_*', formats.Bam),
('out*', | |
hmn_dhcp_bootstrap = self.sls_networks["HMN"].subnets()["bootstrap_dhcp"]
for name, reservation in hmn_dhcp_bootstrap.reservations().items():
if str(bmc_ip) == str(reservation.ipv4_address()):
reservation_found = True
action_log(action, f'Removing existing IP Reservation for {self.bmc_alias} in the bootstrap_dhcp subnet of the HMN network: {reservation.name()} {reservation.ipv4_address()} {reservation.aliases()} {reservation.comment()}')
del hmn_dhcp_bootstrap.reservations()[name]
break
if not reservation_found:
action_log(action, f"Error BMC IP Reservation for {self.bmc_alias} missing from the HMN bootstrap_dhcp subnet")
failed_to_find_ip = True
if failed_to_find_ip:
print_action(action)
sys.exit(1)
# Update State
self.ncn_ips = ncn_ips
self.bmc_ip = bmc_ip
def allocate_ncn_ips(self, action: dict,):
#
# Allocate new NCN BMC
#
action_log(action, "Allocating NCN BMC IP address")
bmc_ip = allocate_ip_address_in_subnet(action, self.sls_networks, "HMN", "bootstrap_dhcp")
# Add BMC IP reservation to the HMN network.
# Example: {"Aliases":["ncn-s001-mgmt"],"Comment":"x3000c0s13b0","IPAddress":"10.254.1.31","Name":"x3000c0s13b0"}
bmc_ip_reservation = IPReservation(self.bmc_xname, bmc_ip, comment=self.bmc_xname, aliases=[self.bmc_alias])
action_log(action, f"Temporally adding NCN BMC IP reservation to bootstrap_dhcp subnet in the HMN network: {bmc_ip_reservation.to_sls()}")
self.sls_networks["HMN"].subnets()["bootstrap_dhcp"].reservations().update(
{
bmc_ip_reservation.name(): bmc_ip_reservation
}
)
#
# Allocate new NCN IPs in SLS
#
action_log(action, "")
action_log(action, "Allocating NCN IP addresses")
ncn_ips = {}
for network_name in ["CAN", "CHN", "CMN", "HMN", "MTL", "NMN"]:
if network_name not in self.sls_networks:
continue
ncn_ips[network_name] = allocate_ip_address_in_subnet(action, self.sls_networks, network_name, "bootstrap_dhcp", self.networks_allowed_in_dhcp_range)
action_log(action, "Removing temporary NCN BMC IP reservation in the bootstrap_dhcp subnet for the HMN network")
del self.sls_networks["HMN"].subnets()["bootstrap_dhcp"].reservations()[bmc_ip_reservation.name()]
# Update State
self.ncn_ips = ncn_ips
self.bmc_ip = bmc_ip
self.action_log_ncn_ips(action)
# Only for new IP addresses that have been allocated:
# Validate the NCN and its BMC to be added does not have an IP reservation already defined for it
# Also validate that none of the IP addresses we have allocated are currently in use in SLS.
fail_sls_network_check = False
for network_name, sls_network in self.sls_networks.items():
for subnet in sls_network.subnets().values():
for ip_reservation in subnet.reservations().values():
# Verify no IP Reservations exist for the NCN
if ip_reservation.name() == self.ncn_alias:
fail_sls_network_check = True
action_log(action, f'Error found existing NCN IP Reservation in subnet {subnet.name()} network {network_name} in SLS: {ip_reservation.to_sls()}')
# Verify no IP Reservations exist for the NCN BMC
if ip_reservation.name() == self.bmc_xname:
fail_sls_network_check = True
action_log(action, f'Error found existing NCN BMC IP Reservation in subnet {subnet.name()} network {network_name} in SLS: {ip_reservation.to_sls()}')
# Verify no IP Reservations exist with any NCN IP
if sls_network.name() in ncn_ips:
allocated_ip = ncn_ips[network_name]
if ip_reservation.ipv4_address() == allocated_ip:
fail_sls_network_check = True
action_log(action, f'Error found allocated NCN IP {allocated_ip} in subnet {subnet.name()} network {network_name} in SLS: {ip_reservation.to_sls()}')
# Verify no IP Reservations exist with the NCN BMC IP
if sls_network.name() == "HMN" and ip_reservation.ipv4_address() == bmc_ip:
fail_sls_network_check = True
action_log(action, f'Error found allocated NCN BMC IP {allocated_ip} in subnet {subnet.name()} network {network_name} in SLS: {ip_reservation.to_sls()}')
if fail_sls_network_check:
print_action(action)
sys.exit(1)
action_log(action, f'Pass {self.ncn_xname} ({self.ncn_alias}) does not currently exist in SLS Networks')
action_log(action, f'Pass {self.bmc_xname} ({self.bmc_alias}) does not currently exist in SLS Networks')
action_log(action, f'Pass allocated IPs for NCN {self.ncn_xname} ({self.ncn_alias}) are not currently in use in SLS Networks')
action_log(action, f'Pass allocated IP for NCN BMC {self.bmc_xname} ({self.bmc_alias}) are not currently in use in SLS Networks')
def action_log_ncn_ips(self, action: dict):
action_log(action, "")
action_log(action, "=================================")
action_log(action, "Management NCN IP Allocation")
action_log(action, "=================================")
action_log(action, "Network | IP Address")
action_log(action, "--------|-----------")
for network in sorted(self.ncn_ips):
ip = self.ncn_ips[network]
action_log(action, f'{network:<8}| {ip}')
action_log(action, "")
action_log(action, "=================================")
action_log(action, "Management NCN BMC IP Allocation")
action_log(action, "=================================")
action_log(action, "Network | IP Address")
action_log(action, "--------|-----------")
action_log(action, f'HMN | {self.bmc_ip}')
action_log(action, "")
def print_ncn_ips(self):
print("")
print(" =================================")
print(" Management NCN IP Allocation")
print(" =================================")
print(" Network | IP Address")
print(" --------|-----------")
for network in sorted(self.ncn_ips):
ip = self.ncn_ips[network]
print(f' {network:<8}| {ip}')
print("")
print(" =================================")
print(" Management NCN BMC IP Allocation")
print(" =================================")
print(" Network | IP Address")
print(" --------|-----------")
print(f' HMN | {self.bmc_ip}')
print("")
def validate_global_bss_bootparameters(self, action: dict):
if not self.use_existing_ip_addresses:
# Validate the NCN is not referenced in the Global boot parameters
fail_host_records = False
for host_record in self.global_bootparameters["cloud-init"]["meta-data"]["host_records"]:
# Check for NCN and NCN BMC
for alias in host_record["aliases"]:
if alias.startswith(self.ncn_alias):
action_log(action, f'Error found NCN alias in Global host_records in BSS: {host_record}')
fail_host_records = True
# Check for if this IP is one of our allocated IPs
for network, ip in self.ncn_ips.items():
if host_record["ip"] == ip:
action_log(action, f'Error found {network} IP Address {ip} in Global host_records in BSS: {host_record}')
fail_host_records = True
if host_record["ip"] == self.bmc_ip:
action_log(action, f'Error found NCN BMC IP Address {self.bmc_ip} in Global host_records in BSS: {host_record}')
fail_host_records = True
if fail_host_records:
print_action(action)
sys.exit(1)
action_log(action, f"Pass {self.ncn_xname} does not currently exist in BSS Global host_records")
print_action(action)
else:
# Validate the NCN has the expected data in the BSS Global boot parameters
fail_host_records = False
for host_record in self.global_bootparameters["cloud-init"]["meta-data"]["host_records"]:
for network_name, ip in self.ncn_ips.items():
# Verify each NCN IP is associated with correct NCN
expected_alias = f'{self.ncn_alias}.{network_name.lower()}'
if str(ip) == host_record["ip"]:
expected_aliases = [expected_alias]
alternate_aliases = [] # ncn-m001 on the NMN can have an alternate host record for the the PIT
if network_name == "NMN":
expected_aliases.append(self.ncn_alias)
alternate_aliases = ["pit", "pit.nmn"]
if expected_aliases == host_record["aliases"] or alternate_aliases == host_record["aliases"]:
action_log(action, f"Pass found existing host_record with the IP address {ip} which contains the expected aliases of {expected_aliases}")
else:
fail_host_records = True
action_log(action, f'Error existing host_record with IP address {ip} with aliases {host_record["aliases"]}, instead of {expected_aliases}')
# Verify each NCN alias is associated with the correct IP
if expected_alias in host_record["aliases"]:
if str(ip) == host_record["ip"]:
action_log(action, f"Pass found existing host_record for alias {expected_alias} which has the expected IP address of {ip}")
else:
fail_host_records = True
action_log(action, f'Error existing host_record for alias {expected_alias} has the IP address of {host_record["ip"]}, instead of the expected {ip}')
# Verify the NCN BMC IP is associated with correct BMC
if str(self.bmc_ip) == host_record["ip"]:
expected_aliases = [self.bmc_alias]
if expected_aliases == host_record["aliases"]:
action_log(action, f"Pass found existing BMC host_record with the IP address {self.bmc_ip} which contains the expected aliases of {expected_aliases}")
else:
fail_host_records = True
action_log(action, f'Error existing BMC host_record with IP address {self.bmc_ip} with aliases {host_record["aliases"]}, instead of {expected_aliases}')
if self.bmc_alias in host_record["aliases"]:
if str(self.bmc_ip) == host_record["ip"]:
action_log(action, f"Pass found existing BMC host_record for alias {self.bmc_alias} which has the expected IP address of {self.bmc_ip}")
else:
fail_host_records = True
action_log(action, f'Error existing BMC host_record for alias {expected_alias} has the IP address of {host_record["ip"]}, instead of the expected {self.bmc_ip}')
if fail_host_records:
print_action(action)
sys.exit(1)
# Validate the NCN being added is not configured as the 'first-master-hostname'
first_master_hostname = self.global_bootparameters["cloud-init"]["meta-data"]["first-master-hostname"]
if first_master_hostname == self.ncn_alias:
action_log(action, f'Error the NCN being added {self.ncn_alias} is currently configured as the "first-master-hostname" in the Global BSS Bootparameters')
print_action(action)
sys.exit(1)
else:
action_log(action, f'Pass the NCN being added {self.ncn_alias} is not configured as the "first-master-hostname", currently {first_master_hostname} is in the Global BSS Bootparameters.')
print_action(action)
def update_sls_networking(self, session: requests.Session):
# Add IP Reservations for all of the networks that make sense
for network_name, ip in self.ncn_ips.items():
sls_network = self.sls_networks[network_name]
# CAN
# Master: {"Aliases":["ncn-m002-can","time-can","time-can.local"],"Comment":"x3000c0s3b0n0","IPAddress":"10.101.5.134","Name":"ncn-m002"}
# Worker: {"Aliases":["ncn-w001-can","time-can","time-can.local"],"Comment":"x3000c0s7b0n0","IPAddress":"10.101.5.136","Name":"ncn-w001"}
# Storage: {"Aliases":["ncn-s001-can","time-can","time-can.local"],"Comment":"x3000c0s13b0n0","IPAddress":"10.101.5.147","Name":"ncn-s001"}
# CHN
# Master: {"Aliases":["ncn-m002-chn","time-chn","time-chn.local"],"Comment":"x3000c0s3b0n0","IPAddress":"10.101.5.198","Name":"ncn-m002"}
# Worker: {"Aliases":["ncn-w001-chn","time-chn","time-chn.local"],"Comment":"x3000c0s7b0n0","IPAddress":"10.101.5.200","Name":"ncn-w001"}
# Storage: {"Aliases":["ncn-s001-chn","time-chn","time-chn.local"],"Comment":"x3000c0s13b0n0","IPAddress":"10.101.5.211","Name":"ncn-s001"}
# CMN
# Master: {"Aliases":["ncn-m002-cmn","time-cmn","time-cmn.local"],"Comment":"x3000c0s3b0n0","IPAddress":"10.101.5.20","Name":"ncn-m002"}
# Worker: {"Aliases":["ncn-w001-cmn","time-cmn","time-cmn.local"],"Comment":"x3000c0s7b0n0","IPAddress":"10.101.5.22","Name":"ncn-w001"}
# Storage: {"Aliases":["ncn-s001-cmn","time-cmn","time-cmn.local"],"Comment":"x3000c0s13b0n0","IPAddress":"10.101.5.33","Name":"ncn-s001"}
# HMN
# Master: {"Aliases":["ncn-m002-hmn","time-hmn","time-hmn.local"],"Comment":"x3000c0s3b0n0","IPAddress":"10.254.1.6","Name":"ncn-m002"}
# Worker: {"Aliases":["ncn-w001-hmn","time-hmn","time-hmn.local"],"Comment":"x3000c0s7b0n0","IPAddress":"10.254.1.10","Name":"ncn-w001"}
# Storage: {"Aliases":["ncn-s001-hmn","time-hmn","time-hmn.local","rgw-vip.hmn"],"Comment":"x3000c0s13b0n0","IPAddress":"10.254.1.32","Name":"ncn-s001"}
# MTL
# Master: {"Aliases":["ncn-m002-mtl","time-mtl","time-mtl.local"],"Comment":"x3000c0s3b0n0","IPAddress":"10.1.1.3","Name":"ncn-m002"}
# Worker: {"Aliases":["ncn-w001-mtl","time-mtl","time-mtl.local"],"Comment":"x3000c0s7b0n0","IPAddress":"10.1.1.5","Name":"ncn-w001"}
# Storage: {"Aliases":["ncn-s001-mtl","time-mtl","time-mtl.local"],"Comment":"x3000c0s13b0n0","IPAddress":"10.1.1.16","Name":"ncn-s001"}
# NMN
# Master: {"Aliases":["ncn-m002-nmn","time-nmn","time-nmn.local","x3000c0s3b0n0","ncn-m002.local"],"Comment":"x3000c0s3b0n0","IPAddress":"10.252.1.5","Name":"ncn-m002"}
# Worker: {"Aliases":["ncn-w001-nmn","time-nmn","time-nmn.local","x3000c0s7b0n0","ncn-w001.local"],"Comment":"x3000c0s7b0n0","IPAddress":"10.252.1.7","Name":"ncn-w001"}
# Storage: {"Aliases":["ncn-s001-nmn","time-nmn","time-nmn.local","x3000c0s13b0n0","ncn-s001.local"],"Comment":"x3000c0s13b0n0","IPAddress":"10.252.1.18","Name":"ncn-s001"}
# Generalizations
# - All IP reservations have the NCN xname for the comment
# - Following rules apply to all but CHN
# - NCN Alias is the IP reservation name
# - Each master/worker/storage have the following aliases
# - ncn-{*}-{network}
# - time-{network}
# - time-{network}.local
# - Storage nodes on the HMN have additional alias rgw-vip.hmn
# - All NCNs on the NMN have the additional aliases:
# - xname
# - ncn-{*}.local
# - The CHN
# - No reservations
# - have IP reservations with the node xname for the reservation name
# All networks except for the CHN have the NCNs alias as the name for the reservation. The CHN has the node xname.
name = self.ncn_alias
# All NCN types have thier xname as the comment for their IP reservation
comment = self.ncn_xname
# For all | |
2;
# break;
# BIT instructions
if instruction == 0x24: # $24/36 BIT zp
self.BIT(OperandRef(LOC_VAL, self.zeropage()))
self.pc += 1
return 1
if instruction == 0x2c: # $2C/44 BIT abs
self.BIT(OperandRef(LOC_VAL, self.absolute()))
self.pc += 2
return 1
# case 0x30:
# if (flags & FN) BRANCH()
# else pc++;
# break;
# BMI instruction
if instruction == 0x30: # $30/48 BMI rel
if (self.flags & FN):
self.branch()
else:
self.pc += 1
return 1
# case 0xd0:
# if (!(flags & FZ)) BRANCH()
# else pc++;
# break;
# BNE instruction
if instruction == 0xd0: # $D0/208 BNE rel
if not (self.flags & FZ):
self.branch()
else:
self.pc += 1
return 1
# case 0x10:
# if (!(flags & FN)) BRANCH()
# else pc++;
# break;
# BPL instruction
if instruction == 0x10: # $10/16 BPL rel
if not (self.flags & FN):
self.branch()
else:
self.pc += 1
return 1
# case 0x50:
# if (!(flags & FV)) BRANCH()
# else pc++;
# break;
# BVC instruction
if instruction == 0x50: # $50/80 BVC rel
if not (self.flags & FV):
self.branch()
else:
self.pc += 1
return 1
# case 0x70:
# if (flags & FV) BRANCH()
# else pc++;
# break;
# BVS instruction
if instruction == 0x70: # $70/112 BVS rel
if (self.flags & FV):
self.branch()
else:
self.pc += 1
return 1
# case 0x18:
# flags &= ~FC;
# break;
# CLC instruction
if instruction == 0x18: # $18/24 CLC
self.flags &= (~FC & 0xff)
return 1
# case 0xd8:
# flags &= ~FD;
# break;
# CLD instruction
if instruction == 0xd8: # $D8/216 CLD
self.flags &= (~FD & 0xff)
return 1
# case 0x58:
# flags &= ~FI;
# break;
# CLI instruction
if instruction == 0x58: # $58/88 CLI
self.flags &= (~FI & 0xff)
return 1
# case 0xb8:
# flags &= ~FV;
# break;
# CLV instruction
if instruction == 0xb8: # $B8/184 CLV
self.flags &= (~FV & 0xff)
return 1
# case 0xc9:
# CMP(a, IMMEDIATE());
# pc++;
# break;
#
# case 0xc5:
# CMP(a, MEM(ZEROPAGE()));
# pc++;
# break;
#
# case 0xd5:
# CMP(a, MEM(ZEROPAGEX()));
# pc++;
# break;
#
# case 0xcd:
# CMP(a, MEM(ABSOLUTE()));
# pc += 2;
# break;
#
# case 0xdd:
# cpucycles += EVALPAGECROSSING_ABSOLUTEX();
# CMP(a, MEM(ABSOLUTEX()));
# pc += 2;
# break;
#
# case 0xd9:
# cpucycles += EVALPAGECROSSING_ABSOLUTEY();
# CMP(a, MEM(ABSOLUTEY()));
# pc += 2;
# break;
#
# case 0xc1:
# CMP(a, MEM(INDIRECTX()));
# pc++;
# break;
#
# case 0xd1:
# cpucycles += EVALPAGECROSSING_INDIRECTY();
# CMP(a, MEM(INDIRECTY()));
# pc++;
# break;
# CMP instructions
if instruction == 0xc9: # $C9/201 CMP #n
self.CMP(A_OPREF, OperandRef(BYTE_VAL, self.immediate()))
self.pc += 1
return 1
if instruction == 0xc5: # $C5/197 CMP zp
self.CMP(A_OPREF, OperandRef(LOC_VAL, self.zeropage()))
self.pc += 1
return 1
if instruction == 0xd5: # $D5/213 CMP zp,X
self.CMP(A_OPREF, OperandRef(LOC_VAL, self.zeropage_x()))
self.pc += 1
return 1
if instruction == 0xcd: # $CD/205 CMP abs
self.CMP(A_OPREF, OperandRef(LOC_VAL, self.absolute()))
self.pc += 2
return 1
if instruction == 0xdd: # $DD/221 CMP abs,X
self.cpucycles += self.eval_page_crossing_absolute_x()
self.CMP(A_OPREF, OperandRef(LOC_VAL, self.absolute_x()))
self.pc += 2
return 1
if instruction == 0xd9: # $D9/217 CMP abs,Y
self.cpucycles += self.eval_page_crossing_absolute_y()
self.CMP(A_OPREF, OperandRef(LOC_VAL, self.absolute_y()))
self.pc += 2
return 1
if instruction == 0xc1: # $C1/193 CMP (zp,X)
self.CMP(A_OPREF, OperandRef(LOC_VAL, self.indirect_x()))
self.pc += 1
return 1
if instruction == 0xd1: # $D1/209 CMP (zp),Y
self.cpucycles += self.eval_page_crossing_indirect_y()
self.CMP(A_OPREF, OperandRef(LOC_VAL, self.indirect_y()))
self.pc += 1
return 1
# case 0xe0:
# CMP(x, IMMEDIATE());
# pc++;
# break;
#
# case 0xe4:
# CMP(x, MEM(ZEROPAGE()));
# pc++;
# break;
#
# case 0xec:
# CMP(x, MEM(ABSOLUTE()));
# pc += 2;
# break;
# CPX instructions
if instruction == 0xe0: # $E0/224 CPX #n
self.CMP(X_OPREF, OperandRef(BYTE_VAL, self.immediate()))
self.pc += 1
return 1
if instruction == 0xe4: # $E4/228 CPX zp
self.CMP(X_OPREF, OperandRef(LOC_VAL, self.zeropage()))
self.pc += 1
return 1
if instruction == 0xec: # $EC/236 CPX abs
self.CMP(X_OPREF, OperandRef(LOC_VAL, self.absolute()))
self.pc += 2
return 1
# case 0xc0:
# CMP(y, IMMEDIATE());
# pc++;
# break;
#
# case 0xc4:
# CMP(y, MEM(ZEROPAGE()));
# pc++;
# break;
#
# case 0xcc:
# CMP(y, MEM(ABSOLUTE()));
# pc += 2;
# break;
# CPY instructions
if instruction == 0xc0: # $C0/192 CPY #n
self.CMP(Y_OPREF, OperandRef(BYTE_VAL, self.immediate()))
self.pc += 1
return 1
if instruction == 0xc4: # $C4/196 CPY zp
self.CMP(Y_OPREF, OperandRef(LOC_VAL, self.zeropage()))
self.pc += 1
return 1
if instruction == 0xcc: # $CC/204 CPY abs
self.CMP(Y_OPREF, OperandRef(LOC_VAL, self.absolute()))
self.pc += 2
return 1
# case 0xc6:
# DEC(MEM(ZEROPAGE()));
# WRITE(ZEROPAGE());
# pc++;
# break;
#
# case 0xd6:
# DEC(MEM(ZEROPAGEX()));
# WRITE(ZEROPAGEX());
# pc++;
# break;
#
# case 0xce:
# DEC(MEM(ABSOLUTE()));
# WRITE(ABSOLUTE());
# pc += 2;
# break;
#
# case 0xde:
# DEC(MEM(ABSOLUTEX()));
# WRITE(ABSOLUTEX());
# pc += 2;
# break;
# DEC instructions
if instruction == 0xc6: # $C6/198 DEC zp
self.DEC(OperandRef(LOC_VAL, self.zeropage()))
self.pc += 1
return 1
if instruction == 0xd6: # $D6/214 DEC zp,X
self.DEC(OperandRef(LOC_VAL, self.zeropage_x()))
self.pc += 1
return 1
if instruction == 0xce: # $CE/206 DEC abs
self.DEC(OperandRef(LOC_VAL, self.absolute()))
self.pc += 2
return 1
if instruction == 0xde: # $DE/222 DEC abs,X
self.DEC(OperandRef(LOC_VAL, self.absolute_x()))
self.pc += 2
return 1
# case 0xca:
# x--;
# SETFLAGS(x);
# break;
# DEX instruction
if instruction == 0xca: # $CA/202 DEX
self.x -= 1
self.x &= 0xff
self.set_flags(self.x)
return 1
# case 0x88:
# y--;
# SETFLAGS(y);
# break;
# DEY instruction
if instruction == 0x88: # $88/136 DEY
self.y -= 1
self.y &= 0xff
self.set_flags(self.y)
return 1
# case 0x49:
# EOR(IMMEDIATE());
# pc++;
# break;
#
# case 0x45:
# EOR(MEM(ZEROPAGE()));
# pc++;
# break;
#
# case 0x55:
# EOR(MEM(ZEROPAGEX()));
# pc++;
# break;
#
# case 0x4d:
# EOR(MEM(ABSOLUTE()));
# pc += 2;
# break;
#
# case 0x5d:
# cpucycles += EVALPAGECROSSING_ABSOLUTEX();
# EOR(MEM(ABSOLUTEX()));
# pc += 2;
# break;
#
# case 0x59:
# cpucycles += EVALPAGECROSSING_ABSOLUTEY();
# EOR(MEM(ABSOLUTEY()));
# pc += 2;
# break;
#
# case 0x41:
# EOR(MEM(INDIRECTX()));
# pc++;
# break;
#
# case 0x51:
# cpucycles += EVALPAGECROSSING_INDIRECTY();
# EOR(MEM(INDIRECTY()));
# pc++;
# break;
# EOR instructions
if instruction == 0x49: # $49/73 EOR #n
self.EOR(OperandRef(BYTE_VAL, self.immediate()))
self.pc += 1
return 1
if instruction == 0x45: # $45/69 EOR zp
self.EOR(OperandRef(LOC_VAL, self.zeropage()))
self.pc += 1
return 1
if instruction == 0x55: # $55/85 EOR zp,X
self.EOR(OperandRef(LOC_VAL, self.zeropage_x()))
self.pc += 1
return 1
if instruction == 0x4d: # $4D/77 EOR abs
self.EOR(OperandRef(LOC_VAL, self.absolute()))
self.pc += 2
return 1
if instruction == 0x5d: # $5D/93 EOR abs,X
self.cpucycles += self.eval_page_crossing_absolute_x()
self.EOR(OperandRef(LOC_VAL, self.absolute_x()))
self.pc += 2
return 1
if instruction == 0x59: # $59/89 EOR abs,Y
self.cpucycles += self.eval_page_crossing_absolute_y()
self.EOR(OperandRef(LOC_VAL, self.absolute_y()))
self.pc += 2
return 1
if instruction == 0x41: # $41/65 EOR (zp,X)
self.EOR(OperandRef(LOC_VAL, self.indirect_x()))
self.pc += 1
return 1
if instruction == 0x51: # $51/81 EOR (zp),Y
self.cpucycles += self.eval_page_crossing_indirect_y()
self.EOR(OperandRef(LOC_VAL, self.indirect_y()))
self.pc += 1
return 1
# case 0xe6:
# INC(MEM(ZEROPAGE()));
# WRITE(ZEROPAGE());
# pc++;
# break;
#
# case 0xf6:
# INC(MEM(ZEROPAGEX()));
# WRITE(ZEROPAGEX());
# pc++;
# break;
#
# case 0xee:
# INC(MEM(ABSOLUTE()));
# WRITE(ABSOLUTE());
# pc += 2;
# break;
#
# case 0xfe:
# INC(MEM(ABSOLUTEX()));
# WRITE(ABSOLUTEX());
# pc += 2;
# break;
# INC instructions
if instruction == 0xe6: # $E6/230 INC zp
self.INC(OperandRef(LOC_VAL, self.zeropage()))
self.pc += 1
return 1
if instruction == 0xf6: # $F6/246 INC zp,X
self.INC(OperandRef(LOC_VAL, self.zeropage_x()))
self.pc += 1
return 1
if instruction == 0xee: # $EE/238 INC abs
self.INC(OperandRef(LOC_VAL, self.absolute()))
self.pc += 2
return 1
if instruction == 0xfe: # $FE/254 INC abs,X
self.INC(OperandRef(LOC_VAL, self.absolute_x()))
self.pc += 2
return 1
# case 0xe8:
# x++;
# SETFLAGS(x);
# break;
# INX instruction
if instruction == 0xe8: # $E8/232 INX
self.x += 1
self.x &= 0xff
self.set_flags(self.x)
return 1
# case 0xc8:
# y++;
# SETFLAGS(y);
# break;
# INY instruction
if instruction == 0xc8: # $C8/200 INY
self.y += 1
self.y &= 0xff
self.set_flags(self.y)
return 1
# case 0x20:
# PUSH((pc+1) >> 8);
# PUSH((pc+1) & | |
#If gate.qubits is None, gate is assumed to be single-qubit gate
#acting in parallel on all qubits. If the gate is a global idle, then
#Pragma blocks are inserted (for tests like idle tomography) even
#if block_between_layers==False. Set block_idles=False to disable this as well.
if gate.qubits is None:
if quil_for_gate == 'I':
if block_idles:
quil += 'PRAGMA PRESERVE_BLOCK\n'
for q in gate_qubits:
quil += quil_for_gate + ' ' + str(qubit_conversion[q]) + '\n'
if block_idles:
quil += 'PRAGMA END_PRESERVE_BLOCK\n'
else:
for q in gate_qubits:
quil += quil_for_gate + ' ' + str(qubit_conversion[q]) + '\n'
#If gate.qubits is not None, then apply the one- or multi-qubit gate to
#the explicitly specified qubits.
else:
for q in gate_qubits: quil_for_gate += ' ' + str(qubit_conversion[q])
quil_for_gate += '\n'
# Add the quil for the gate to the quil string.
quil += quil_for_gate
# Keeps track of the qubits that have been accounted for, and checks that hadn't been used
# although that should already be checked in the .get_layer_label(), which checks for its a valid
# circuit layer.
assert(not set(gate_qubits).issubset(set(qubits_used)))
qubits_used.extend(gate_qubits)
# All gates that don't have a non-idle gate acting on them get an idle in the layer.
for q in self.line_labels:
if q not in qubits_used:
quil += 'I' + ' ' + str(qubit_conversion[q]) + '\n'
# Add in a barrier after every circuit layer if block_between_layers==True.
# Including pragma blocks are critical for QCVV testing, as circuits should usually
# experience minimal "behind-the-scenes" compilation (beyond necessary
# conversion to native instructions)
# To do: Add "barrier" as native pygsti circuit instruction, and use for indicating
# where pragma blocks should be.
if block_between_layers:
quil += 'PRAGMA PRESERVE_BLOCK\nPRAGMA END_PRESERVE_BLOCK\n'
# Add in a measurement at the end.
if readout_conversion is None:
for q in self.line_labels:
# quil += "MEASURE {0} [{1}]\n".format(str(qubit_conversion[q]),str(qubit_conversion[q]))
quil += "MEASURE {0} ro[{1}]\n".format(str(qubit_conversion[q]), str(qubit_conversion[q]))
else:
for q in self.line_labels:
quil += "MEASURE {0} ro[{1}]\n".format(str(qubit_conversion[q]), str(readout_conversion[q]))
return quil
def convert_to_openqasm(self, num_qubits=None,
gatename_conversion=None, qubit_conversion=None,
block_between_layers=True): # TODO
"""
Converts this circuit to an openqasm string.
Parameters
----------
gatename_conversion : dict, optional
If not None, a dictionary that converts the gatenames in the circuit to the
gatenames that will appear in the openqasm output. If only standard pyGSTi names
are used (e.g., 'Gh', 'Gp', 'Gcnot', 'Gcphase', etc) this dictionary need not
be specified, and an automatic conversion to the standard openqasm names will be
implemented.
qubit_conversion : dict, optional
If not None, a dictionary converting the qubit labels in the circuit to the
desired qubit labels in the openqasm output. Can be left as None if the qubit
labels are either (1) integers, or (2) of the form 'Qi' for integer i. In
this case they are converted to integers (i.e., for (1) the mapping is trivial,
for (2) the mapping strips the 'Q').
Returns
-------
str
An openqasm string.
"""
# create standard conversations.
if gatename_conversion is None:
gatename_conversion = _itgs.get_standard_gatenames_openqasm_conversions()
if qubit_conversion is None:
# To tell us whether we have found a standard qubit labelling type.
standardtype = False
# Must first check they are strings, because cannot query q[0] for int q.
if all([isinstance(q, str) for q in self.line_labels]):
if all([q[0] == 'Q' for q in self.line_labels]):
standardtype = True
qubit_conversion = {llabel: int(llabel[1:]) for llabel in self.line_labels}
if all([isinstance(q, int) for q in self.line_labels]):
qubit_conversion = {q: q for q in self.line_labels}
standardtype = True
if not standardtype:
raise ValueError(
"No standard qubit labelling conversion is available! Please provide `qubit_conversion`.")
if num_qubits is None:
num_qubits = len(self.line_labels)
#Currently only using 'Iz' as valid intermediate measurement ('IM') label.
#Todo: Expand to all intermediate measurements.
if 'Iz' in self.str:
# using_IMs = True
num_IMs = self.str.count('Iz')
else:
# using_IMs = False
num_IMs = 0
num_IMs_used = 0
# Init the openqasm string.
openqasm = 'OPENQASM 2.0;\ninclude "qelib1.inc";\n\n'
openqasm += 'qreg q[{0}];\n'.format(str(num_qubits))
# openqasm += 'creg cr[{0}];\n'.format(str(num_qubits))
openqasm += 'creg cr[{0}];\n'.format(str(num_qubits + num_IMs))
openqasm += '\n'
depth = self.num_layers()
# Go through the layers, and add the openqasm for each layer in turn.
for l in range(depth):
# Get the layer, without identity gates and containing each gate only once.
layer = self.get_layer_label(l)
# For keeping track of which qubits have a gate on them in the layer.
qubits_used = []
# Go through the (non-self.identity) gates in the layer and convert them to openqasm
for gate in layer.components:
gate_qubits = gate.qubits if (gate.qubits is not None) else self.line_labels
assert(len(gate_qubits) <= 2), 'Gates on more than 2 qubits given; this is currently not supported!'
# Find the openqasm for the gate.
if gate.name.__str__() != 'Iz':
openqasm_for_gate = gatename_conversion[gate.name]
#If gate.qubits is None, gate is assumed to be single-qubit gate
#acting in parallel on all qubits.
if gate.qubits is None:
for q in gate_qubits:
openqasm += openqasm_for_gate + ' q[' + str(qubit_conversion[q]) + '];\n'
else:
for q in gate_qubits:
openqasm_for_gate += ' q[' + str(qubit_conversion[q]) + ']'
if q != gate_qubits[-1]:
openqasm_for_gate += ', '
openqasm_for_gate += ';\n'
else:
assert len(gate.qubits) == 1
q = gate.qubits[0]
# classical_bit = num_IMs_used
openqasm_for_gate = "measure q[{0}] -> cr[{1}];\n".format(str(qubit_conversion[q]), num_IMs_used)
num_IMs_used += 1
# Add the openqasm for the gate to the openqasm string.
openqasm += openqasm_for_gate
# Keeps track of the qubits that have been accounted for, and checks that hadn't been used
# although that should already be checked in the .get_layer_label(), which checks for its a valid
# circuit layer.
assert(not set(gate_qubits).issubset(set(qubits_used)))
qubits_used.extend(gate_qubits)
# All gates that don't have a non-idle gate acting on them get an idle in the layer.
for q in self.line_labels:
if q not in qubits_used:
openqasm += 'id' + ' q[' + str(qubit_conversion[q]) + '];\n'
# Add in a barrier after every circuit layer if block_between_layers==True.
# Including barriers is critical for QCVV testing, circuits should usually
# experience minimal "behind-the-scenes" compilation (beyond necessary
# conversion to native instructions).
# To do: Add "barrier" as native pygsti circuit instruction, and use for indicating
# where pragma blocks should be.
if block_between_layers:
openqasm += 'barrier '
for q in self.line_labels[:-1]:
openqasm += 'q[{0}], '.format(str(qubit_conversion[q]))
openqasm += 'q[{0}];\n'.format(str(qubit_conversion[self.line_labels[-1]]))
# openqasm += ';'
# Add in a measurement at the end.
for q in self.line_labels:
# openqasm += "measure q[{0}] -> cr[{1}];\n".format(str(qubit_conversion[q]), str(qubit_conversion[q]))
openqasm += "measure q[{0}] -> cr[{1}];\n".format(str(qubit_conversion[q]),
str(num_IMs_used + qubit_conversion[q]))
return openqasm
def simulate(self, model, return_all_outcomes=False):
"""
Compute the outcome probabilities of this Circuit using `model` as a
model for the gates. The order of the outcome strings (e.g., '0100') is
w.r.t. to the ordering of the qubits in the circuit. That is, the ith
element of the outcome string corresponds to the qubit with label
`self.qubit_labels[i]`.
Parameters
----------
model : Model
A description of the gate and SPAM operations corresponding to the
labels stored in this Circuit. If this model is over more qubits
than the circuit, the output will be the probabilities for the qubits
in the circuit marginalized over the other qubits. But, the simulation
is over the full set of qubits in the model, and so the time taken for
the simulation scales with the number of qubits in the model. For
models whereby "spectator" qubits do not affect the qubits in this
circuit (such as with perfect gates), more efficient simulations will
be obtained by first creating a model only over the qubits in this
circuit.
return_all_outcomes: bool, optional
Whether to include outcomes in the returned dictionary that have zero
probability. When False, the threshold for discarding an outcome as z
ero probability is 10^-12.
Returns
-------
probs : dictionary
A dictionary with keys equal to all (`return_all_outcomes` is True) or
possibly only some (`return_all_outcomes` is False) of the possible
outcomes, and values that are float probabilities.
"""
# These results is a dict with strings of outcomes (normally bits) ordered according | |
:type DnsQueryType: str
:param UserName: 登录服务器的账号
:type UserName: str
:param PassWord: 登录服务器的密码
:type PassWord: str
:param UseSecConn: 是否使用安全链接SSL, 0 不使用,1 使用
:type UseSecConn: int
:param NeedAuth: FTP登录验证方式 0 不验证 1 匿名登录 2 需要身份验证
:type NeedAuth: int
:param ReqDataType: 请求数据类型。0 表示请求为字符串类型。1表示为二进制类型
:type ReqDataType: int
:param ReqData: 发起TCP, UDP请求的协议请求数据
:type ReqData: str
:param RespDataType: 响应数据类型。0 表示响应为字符串类型。1表示为二进制类型
:type RespDataType: int
:param RespData: 预期的UDP请求的回应数据
:type RespData: str
:param RedirectFollowNum: 跟随跳转次数
:type RedirectFollowNum: int
"""
self.TaskId = None
self.TaskName = None
self.Period = None
self.CatTypeName = None
self.CgiUrl = None
self.AgentGroupId = None
self.PolicyGroupId = None
self.Status = None
self.AddTime = None
self.Type = None
self.TopicId = None
self.AlarmStatus = None
self.Host = None
self.Port = None
self.CheckStr = None
self.CheckType = None
self.UserAgent = None
self.Cookie = None
self.PostData = None
self.SslVer = None
self.IsHeader = None
self.DnsSvr = None
self.DnsCheckIp = None
self.DnsQueryType = None
self.UserName = None
self.PassWord = None
self.UseSecConn = None
self.NeedAuth = None
self.ReqDataType = None
self.ReqData = None
self.RespDataType = None
self.RespData = None
self.RedirectFollowNum = None
def _deserialize(self, params):
self.TaskId = params.get("TaskId")
self.TaskName = params.get("TaskName")
self.Period = params.get("Period")
self.CatTypeName = params.get("CatTypeName")
self.CgiUrl = params.get("CgiUrl")
self.AgentGroupId = params.get("AgentGroupId")
self.PolicyGroupId = params.get("PolicyGroupId")
self.Status = params.get("Status")
self.AddTime = params.get("AddTime")
self.Type = params.get("Type")
self.TopicId = params.get("TopicId")
self.AlarmStatus = params.get("AlarmStatus")
self.Host = params.get("Host")
self.Port = params.get("Port")
self.CheckStr = params.get("CheckStr")
self.CheckType = params.get("CheckType")
self.UserAgent = params.get("UserAgent")
self.Cookie = params.get("Cookie")
self.PostData = params.get("PostData")
self.SslVer = params.get("SslVer")
self.IsHeader = params.get("IsHeader")
self.DnsSvr = params.get("DnsSvr")
self.DnsCheckIp = params.get("DnsCheckIp")
self.DnsQueryType = params.get("DnsQueryType")
self.UserName = params.get("UserName")
self.PassWord = params.get("PassWord")
self.UseSecConn = params.get("UseSecConn")
self.NeedAuth = params.get("NeedAuth")
self.ReqDataType = params.get("ReqDataType")
self.ReqData = params.get("ReqData")
self.RespDataType = params.get("RespDataType")
self.RespData = params.get("RespData")
self.RedirectFollowNum = params.get("RedirectFollowNum")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateAgentGroupRequest(AbstractModel):
"""CreateAgentGroup请求参数结构体
"""
def __init__(self):
r"""
:param GroupName: 拨测分组名称,不超过32个字符
:type GroupName: str
:param IsDefault: 是否为默认分组,取值可为 0 或 1。取 1 时表示设置为默认分组
:type IsDefault: int
:param Agents: Province, Isp 需要成对地进行选择。参数对的取值范围。参见:DescribeAgents 的返回结果。
:type Agents: list of CatAgent
"""
self.GroupName = None
self.IsDefault = None
self.Agents = None
def _deserialize(self, params):
self.GroupName = params.get("GroupName")
self.IsDefault = params.get("IsDefault")
if params.get("Agents") is not None:
self.Agents = []
for item in params.get("Agents"):
obj = CatAgent()
obj._deserialize(item)
self.Agents.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateAgentGroupResponse(AbstractModel):
"""CreateAgentGroup返回参数结构体
"""
def __init__(self):
r"""
:param GroupId: 拨测分组Id
:type GroupId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.GroupId = None
self.RequestId = None
def _deserialize(self, params):
self.GroupId = params.get("GroupId")
self.RequestId = params.get("RequestId")
class CreateProbeTasksRequest(AbstractModel):
"""CreateProbeTasks请求参数结构体
"""
def __init__(self):
r"""
:param BatchTasks: 批量任务名-地址
:type BatchTasks: list of ProbeTaskBasicConfiguration
:param TaskType: 任务类型
:type TaskType: int
:param Nodes: 拨测节点
:type Nodes: list of str
:param Interval: 拨测间隔
:type Interval: int
:param Parameters: 拨测参数
:type Parameters: str
:param TaskCategory: 任务分类
<li>1 = PC</li>
<li> 2 = Mobile </li>
:type TaskCategory: int
:param Cron: 定时任务cron表达式
:type Cron: str
:param Tag: 资源标签值
:type Tag: list of Tag
"""
self.BatchTasks = None
self.TaskType = None
self.Nodes = None
self.Interval = None
self.Parameters = None
self.TaskCategory = None
self.Cron = None
self.Tag = None
def _deserialize(self, params):
if params.get("BatchTasks") is not None:
self.BatchTasks = []
for item in params.get("BatchTasks"):
obj = ProbeTaskBasicConfiguration()
obj._deserialize(item)
self.BatchTasks.append(obj)
self.TaskType = params.get("TaskType")
self.Nodes = params.get("Nodes")
self.Interval = params.get("Interval")
self.Parameters = params.get("Parameters")
self.TaskCategory = params.get("TaskCategory")
self.Cron = params.get("Cron")
if params.get("Tag") is not None:
self.Tag = []
for item in params.get("Tag"):
obj = Tag()
obj._deserialize(item)
self.Tag.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateProbeTasksResponse(AbstractModel):
"""CreateProbeTasks返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateTaskExRequest(AbstractModel):
"""CreateTaskEx请求参数结构体
"""
def __init__(self):
r"""
:param CatTypeName: http, https, ping, tcp, ftp, smtp, udp, dns 之一
:type CatTypeName: str
:param Url: 拨测的URL, 例如:www.qq.com (URL域名解析需要能解析出具体的IP)
:type Url: str
:param Period: 拨测周期。取值可为1,5,15,30之一, 单位:分钟。精度不能低于用户等级规定的最小精度
:type Period: int
:param TaskName: 拨测任务名称不能超过32个字符。同一个用户创建的任务名不可重复
:type TaskName: str
:param AgentGroupId: 拨测分组ID,体现本拨测任务要采用哪些运营商作为拨测源。一般可直接填写本用户的默认拨测分组。参见:DescribeAgentGroups 接口,本参数使用返回结果里的GroupId的值。注意: Type为0时,AgentGroupId为必填
:type AgentGroupId: int
:param Host: 指定域名(如需要)
:type Host: str
:param IsHeader: 是否为Header请求(非0 发起Header 请求。为0,且PostData 非空,发起POST请求。为0,PostData 为空,发起GET请求)
:type IsHeader: int
:param SslVer: URL中含有"https"时有用。缺省为SSLv23。需要为 TLSv1_2, TLSv1_1, TLSv1, SSLv2, SSLv23, SSLv3 之一
:type SslVer: str
:param PostData: POST请求数据。空字符串表示非POST请求
:type PostData: str
:param UserAgent: 用户Agent信息
:type UserAgent: str
:param CheckStr: 要在结果中进行匹配的字符串
:type CheckStr: str
:param CheckType: 1 表示通过检查结果是否包含CheckStr 进行校验
:type CheckType: int
:param Cookie: 需要设置的Cookie信息
:type Cookie: str
:param TaskId: 任务ID,用于验证且修改任务时传入原任务ID
:type TaskId: int
:param UserName: 登录服务器的账号。如果为空字符串,表示不用校验用户密码。只做简单连接服务器的拨测
:type UserName: str
:param PassWord: 登录服务器的密码
:type PassWord: str
:param ReqDataType: 缺省为0。0 表示请求为字符串类型。1表示为二进制类型
:type ReqDataType: int
:param ReqData: 发起TCP, UDP请求的协议请求数据
:type ReqData: str
:param RespDataType: 缺省为0。0 表示响应为字符串类型。1表示为二进制类型
:type RespDataType: int
:param RespData: 预期的UDP请求的回应数据。字符串型,只需要返回的结果里包含本字符串算校验通过。二进制型,则需要严格等于才算通过
:type RespData: str
:param DnsSvr: 目的DNS服务器 可以为空字符串
:type DnsSvr: str
:param DnsCheckIp: 需要检验是否在DNS IP列表的IP。可以为空字符串,表示不校验
:type DnsCheckIp: str
:param DnsQueryType: 需要为下列值之一。缺省为A。A, MX, NS, CNAME, TXT, ANY
:type DnsQueryType: str
:param UseSecConn: 是否使用安全链接SSL, 0 不使用,1 使用
:type UseSecConn: int
:param NeedAuth: FTP登录验证方式, 0 不验证 , 1 匿名登录, 2 需要身份验证
:type NeedAuth: int
:param Port: 拨测目标的端口号
:type Port: int
:param Type: Type=0 默认 (站点监控)Type=2 可用率监控
:type Type: int
:param IsVerify: IsVerify=0 非验证任务 IsVerify=1 验证任务,不传则默认为0
:type IsVerify: int
:param RedirectFollowNum: 跟随跳转次数,取值范围0-5,不传则表示不跟随
:type RedirectFollowNum: int
"""
self.CatTypeName = None
self.Url = None
self.Period = None
self.TaskName = None
self.AgentGroupId = None
self.Host = None
self.IsHeader = None
self.SslVer = None
self.PostData = None
self.UserAgent = None
self.CheckStr = None
self.CheckType = None
self.Cookie = None
self.TaskId = None
self.UserName = None
self.PassWord = <PASSWORD>
self.ReqDataType = None
self.ReqData = None
self.RespDataType = None
self.RespData = None
self.DnsSvr = None
self.DnsCheckIp = None
self.DnsQueryType = None
self.UseSecConn = None
self.NeedAuth = None
self.Port = None
self.Type = None
self.IsVerify = None
self.RedirectFollowNum = None
def _deserialize(self, params):
self.CatTypeName = params.get("CatTypeName")
self.Url = params.get("Url")
self.Period = params.get("Period")
self.TaskName = params.get("TaskName")
self.AgentGroupId = params.get("AgentGroupId")
self.Host = params.get("Host")
self.IsHeader = params.get("IsHeader")
self.SslVer = params.get("SslVer")
self.PostData = params.get("PostData")
self.UserAgent = params.get("UserAgent")
self.CheckStr = params.get("CheckStr")
self.CheckType = params.get("CheckType")
self.Cookie = params.get("Cookie")
self.TaskId = params.get("TaskId")
self.UserName = params.get("UserName")
self.PassWord = params.get("PassWord")
self.ReqDataType = params.get("ReqDataType")
self.ReqData = params.get("ReqData")
self.RespDataType = params.get("RespDataType")
self.RespData = params.get("RespData")
self.DnsSvr = params.get("DnsSvr")
self.DnsCheckIp = params.get("DnsCheckIp")
self.DnsQueryType = params.get("DnsQueryType")
self.UseSecConn = params.get("UseSecConn")
self.NeedAuth = params.get("NeedAuth")
self.Port = params.get("Port")
self.Type = params.get("Type")
self.IsVerify = params.get("IsVerify")
self.RedirectFollowNum = params.get("RedirectFollowNum")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateTaskExResponse(AbstractModel):
"""CreateTaskEx返回参数结构体
"""
def __init__(self):
r"""
:param ResultId: 拨测结果查询ID。接下来可以使用查询拨测是否能够成功,验证能否通过。
:type ResultId: int
:param TaskId: 拨测任务ID。验证通过后,创建任务时使用,传递给CreateTask 接口。
:type TaskId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ResultId = None
self.TaskId = None
self.RequestId = None
def _deserialize(self, params):
self.ResultId = params.get("ResultId")
self.TaskId = params.get("TaskId")
self.RequestId = params.get("RequestId")
class DataPoint(AbstractModel):
"""时延等数据,数据点
"""
def __init__(self):
r"""
:param LogTime: 数据点的时间
:type LogTime: str
:param MetricValue: 数据值
:type MetricValue: float
"""
self.LogTime = None
self.MetricValue = None
def _deserialize(self, params):
self.LogTime = params.get("LogTime")
self.MetricValue = params.get("MetricValue")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DataPointMetric(AbstractModel):
"""包含MetricName的DataPoint数据
"""
def __init__(self):
r"""
:param MetricName: 数据项
:type MetricName: str
:param Points: 数据点的时间和值
:type Points: list of DataPoint
"""
self.MetricName = None
self.Points = None
def _deserialize(self, params):
self.MetricName = params.get("MetricName")
if params.get("Points") is not None:
self.Points = []
for item in params.get("Points"):
obj = DataPoint()
obj._deserialize(item)
self.Points.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteAgentGroupRequest(AbstractModel):
"""DeleteAgentGroup请求参数结构体
"""
def __init__(self):
r"""
:param GroupId: 拨测分组id
:type GroupId: int
"""
self.GroupId = None
def _deserialize(self, params):
self.GroupId = params.get("GroupId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class | |
<filename>likeyoubot_kaiser.py
import likeyoubot_game as lybgame
import likeyoubot_kaiser_scene as lybscene
from likeyoubot_configure import LYBConstant as lybconstant
import time
import sys
import tkinter
from tkinter import ttk
from tkinter import font
import copy
class LYBKaiser(lybgame.LYBGame):
work_list = [
'게임 시작',
'로그인',
'자동 사냥',
'메인 퀘스트',
'지역 퀘스트',
'퀵슬롯 등록',
'퀘스트',
'우편',
'일괄 분해',
'알림',
'[반복 시작]',
'[반복 종료]',
'[작업 대기]',
'[작업 예약]',
'' ]
nox_kaiser_icon_list = [
'nox_kaiser_icon'
]
momo_kaiser_icon_list = [
'momo_kaiser_icon'
]
character_move_list = [
"↑",
"↗",
"→",
"↘",
"↓",
"↙",
"←",
"↖"
]
slot_item_list = [
'없음',
'소형 체력 물약',
'중형 체력 물약',
'속도의 물약',
'전투의 물약',
'증폭 마법석',
'펫 소환 주문서',
]
def __init__(self, game_name, game_data_name, window):
lybgame.LYBGame.__init__(self, lybconstant.LYB_GAME_KAISER, lybconstant.LYB_GAME_DATA_KAISER, window)
def process(self, window_image):
rc = super(LYBKaiser, self).process(window_image)
if rc < 0:
return rc
return rc
def custom_check(self, window_image, window_pixel):
pb_name = 'skip'
(loc_x, loc_y), match_rate = self.locationOnWindowPart(
self.window_image,
self.resource_manager.pixel_box_dic[pb_name],
custom_below_level=(130, 130, 130),
custom_top_level=(255, 255, 255),
custom_threshold=0.9,
custom_flag=1,
custom_rect=(560, 240, 600, 280)
)
if loc_x != -1:
self.logger.warn('건너뛰기: ' + str(match_rate))
self.mouse_click(pb_name)
# 패배!
# (loc_x, loc_y), match_rate = self.locationResourceOnWindowPart(
# self.window_image,
# 'defeat_press_key_loc',
# custom_below_level=(250, 250, 250),
# custom_top_level=(255, 255, 255),
# custom_threshold=0.7,
# custom_flag=1,
# custom_rect=(280, 190, 360, 230)
# )
# if loc_x != -1:
# self.logger.warn('전투 패배: ' + str(match_rate))
# self.mouse_click('defeat_press_key_0')
return ''
def get_screen_by_location(self, window_image):
scene_name = self.scene_init_screen(window_image)
if len(scene_name) > 0:
return scene_name
scene_name = self.popup_scene(window_image)
if len(scene_name) > 0:
return scene_name
# scene_name = self.jeontoo_scene(window_image)
# if len(scene_name) > 0:
# return scene_name
# scene_name = self.scene_google_play_account_select(window_image)
# if len(scene_name) > 0:
# return scene_name
return ''
def popup_scene(self, window_image):
loc_name = 'popup_scene_loc'
match_rate = self.rateMatchedResource(self.window_pixels, loc_name, custom_below_level=100, custom_top_level=255)
self.logger.debug(loc_name + ' ' + str(match_rate))
if match_rate > 0.7:
return 'popup_scene'
return ''
# def jeontoo_scene(self, window_image):
# (loc_x, loc_y), match_rate = self.locationResourceOnWindowPart(
# self.window_image,
# 'jeontoo_scene_loc',
# custom_below_level=(100, 100, 100),
# custom_top_level=(255, 255, 255),
# custom_threshold=0.7,
# custom_flag=1,
# custom_rect=(5, 90, 80, 130)
# )
# if match_rate > 0.7:
# return 'jeontoo_scene'
# return ''
def scene_init_screen(self, window_image):
loc_x = -1
loc_y = -1
if self.player_type == 'nox':
for each_icon in LYBKaiser.nox_kaiser_icon_list:
(loc_x, loc_y), match_rate = self.locationOnWindowPart(
window_image,
self.resource_manager.pixel_box_dic[each_icon],
custom_threshold=0.8,
custom_flag=1,
custom_rect=(80, 110, 570, 300)
)
# print('[DEBUG] nox yh icon:', (loc_x, loc_y), match_rate)
if loc_x != -1:
break
elif self.player_type == 'momo':
for each_icon in LYBKaiser.momo_kaiser_icon_list:
(loc_x, loc_y), match_rate = self.locationOnWindowPart(
window_image,
self.resource_manager.pixel_box_dic[each_icon],
custom_threshold=0.8,
custom_flag=1,
custom_rect=(30, 10, 610, 300)
)
# print('[DEBUG] momo yh icon:', (loc_x, loc_y), match_rate)
if loc_x != -1:
break
if loc_x == -1:
return ''
return 'init_screen_scene'
def scene_google_play_account_select(self, window_image):
loc_x_list = []
loc_y_list = []
(loc_x, loc_y) = lybgame.LYBGame.locationOnWindow(
window_image,
self.resource_manager.pixel_box_dic['google_play_letter']
)
loc_x_list.append(loc_x)
loc_y_list.append(loc_y)
for i in range(6):
(loc_x, loc_y) = lybgame.LYBGame.locationOnWindow(
window_image,
self.resource_manager.pixel_box_dic['google_play_letter_' + str(i)]
)
loc_x_list.append(loc_x)
loc_y_list.append(loc_y)
for each_loc in loc_x_list:
if each_loc == -1:
return ''
else:
continue
return 'google_play_account_select_scene'
def clear_scene(self):
last_scene = self.scene_dic
self.scene_dic = {}
for scene_name, scene in last_scene.items():
if ( 'google_play_account_select_scene' in scene_name or
'logo_screen_scene' in scene_name or
'connect_account_scene' in scene_name
):
self.scene_dic[scene_name] = last_scene[scene_name]
def add_scene(self, scene_name):
self.scene_dic[scene_name] = lybscene.LYBKaiserScene(scene_name)
self.scene_dic[scene_name].setLoggingQueue(self.logging_queue)
self.scene_dic[scene_name].setGameObject(self)
class LYBKaiserTab(lybgame.LYBGameTab):
def __init__(self, root_frame, configure, game_options, inner_frame_dics, width, height, game_name=lybconstant.LYB_GAME_KAISER):
lybgame.LYBGameTab.__init__(self, root_frame, configure, game_options, inner_frame_dics, width, height, game_name)
def set_work_list(self):
lybgame.LYBGameTab.set_work_list(self)
for each_work in LYBKaiser.work_list:
self.option_dic['work_list_listbox'].insert('end', each_work)
self.configure.common_config[self.game_name]['work_list'].append(each_work)
def set_option(self):
###############################################
# 메인 퀘스트 진행 #
###############################################
# frame = ttk.Frame(self.inner_frame_dic['frame_top'], relief=self.frame_relief)
# label = tkinter.Label(
# master = frame,
# text = "메인 퀘스트를 ",
# anchor = tkinter.W,
# justify = tkinter.LEFT,
# font = lybconstant.LYB_FONT
# # fg='White' if brightness < 120 else 'Black',
# # bg=bg_colour
# )
# # countif.place(
# # x=lybconstant.LYB_PADDING,
# # y=lybconstant.LYB_PADDING,
# # width=lybconstant.LYB_LABEL_WIDTH, height=lybconstant.LYB_LABEL_HEIGHT
# # )
# label.pack(side=tkinter.LEFT)
# option_name_mq = lybconstant.LYB_DO_STRING_DURATION_MAIN_QUEST
# self.option_dic[option_name_mq] = tkinter.StringVar(frame)
# self.option_dic[option_name_mq].trace('w', lambda *args: self.callback_main_quest_stringvar(args, option_name=option_name_mq))
# if not option_name_mq in self.configure.common_config[self.game_name]:
# self.configure.common_config[self.game_name][option_name_mq] = 20
# entry = tkinter.Entry(
# master = frame,
# relief = 'sunken',
# textvariable = self.option_dic[option_name_mq],
# justify = tkinter.RIGHT,
# width = 5,
# font = lybconstant.LYB_FONT
# )
# entry.pack(side=tkinter.LEFT)
# label = tkinter.Label(
# master = frame,
# text = "분 동안 진행합니다.",
# justify = tkinter.LEFT,
# font = lybconstant.LYB_FONT
# # fg='White' if brightness < 120 else 'Black',
# # bg=bg_colour
# )
# label.pack(side=tkinter.LEFT)
# frame.pack(anchor=tkinter.W)
# PADDING
frame = ttk.Frame(
master = self.master,
relief = self.frame_relief
)
frame.pack(pady=5)
self.inner_frame_dic['options'] = ttk.Frame(
master = self.master,
relief = self.frame_relief
)
self.option_dic['option_note'] = ttk.Notebook(
master = self.inner_frame_dic['options']
)
self.inner_frame_dic['common_tab_frame'] = ttk.Frame(
master = self.option_dic['option_note'],
relief = self.frame_relief
)
self.inner_frame_dic['common_tab_frame'].pack(anchor=tkinter.NW, fill=tkinter.BOTH, expand=True)
self.option_dic['option_note'].add(self.inner_frame_dic['common_tab_frame'], text='일반')
self.inner_frame_dic['work_tab_frame'] = ttk.Frame(
master = self.option_dic['option_note'],
relief = self.frame_relief
)
self.inner_frame_dic['work_tab_frame'].pack(anchor=tkinter.NW, fill=tkinter.BOTH, expand=True)
self.option_dic['option_note'].add(self.inner_frame_dic['work_tab_frame'], text='작업')
self.inner_frame_dic['notify_tab_frame'] = ttk.Frame(
master = self.option_dic['option_note'],
relief = self.frame_relief
)
self.inner_frame_dic['notify_tab_frame'].pack(anchor=tkinter.NW, fill=tkinter.BOTH, expand=True)
self.option_dic['option_note'].add(self.inner_frame_dic['notify_tab_frame'], text='알림')
# ------
# 일반 탭 좌측
frame_l = ttk.Frame(self.inner_frame_dic['common_tab_frame'])
frame_label = ttk.LabelFrame(frame_l, text='설정')
frame_label_inner = ttk.LabelFrame(frame_label, text='소형 체력 물약')
frame = ttk.Frame(frame_label_inner)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'auto_potion_set'] = tkinter.BooleanVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'auto_potion_set'].trace(
'w', lambda *args: self.callback_auto_potion_set(args, lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'auto_potion_set')
)
if not lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'auto_potion_set' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'auto_potion_set'] = False
check_box = ttk.Checkbutton(
master = frame,
text = '물약 소진시 현재 작업 종료',
variable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'auto_potion_set'],
onvalue = True,
offvalue = False
)
check_box.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame = ttk.Frame(frame_label_inner)
label = ttk.Label(
master = frame,
text = self.get_option_text("물약 슬롯 번호")
)
label.pack(side=tkinter.LEFT)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'auto_potion_number'] = tkinter.StringVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'auto_potion_number'].trace(
'w', lambda *args: self.callback_auto_potion_number(args, lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'auto_potion_number')
)
combobox_list = []
for i in range(1, 5):
combobox_list.append(str(i))
if not lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'auto_potion_number' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'auto_potion_number'] = 1
combobox = ttk.Combobox(
master = frame,
values = combobox_list,
textvariable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'auto_potion_number'],
state = "readonly",
height = 10,
width = 5,
font = lybconstant.LYB_FONT
)
combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'auto_potion_number'])
combobox.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame_label_inner.pack(anchor=tkinter.NW, padx=5, pady=5)
frame_label_inner = ttk.LabelFrame(frame_label, text='수동 체력 물약')
frame = ttk.Frame(frame_label_inner)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_set'] = tkinter.BooleanVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_set'].trace(
'w', lambda *args: self.callback_potion_set(args, lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_set')
)
if not lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_set' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_set'] = False
check_box = ttk.Checkbutton(
master = frame,
text = '물약 소진시 현재 작업 종료',
variable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_set'],
onvalue = True,
offvalue = False
)
check_box.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame = ttk.Frame(frame_label_inner)
label = ttk.Label(
master = frame,
text = self.get_option_text("수동 회복 물약 사용(HP %)")
)
label.pack(side=tkinter.LEFT)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_hp'] = tkinter.StringVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_hp'].trace(
'w', lambda *args: self.callback_potion_hp(args, lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_hp')
)
combobox_list = []
for i in range(50, 91):
combobox_list.append(str(i))
if not lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_hp' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_hp'] = 70
combobox = ttk.Combobox(
master = frame,
values = combobox_list,
textvariable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_hp'],
state = "readonly",
height = 10,
width = 5,
font = lybconstant.LYB_FONT
)
combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_hp'])
combobox.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame = ttk.Frame(frame_label_inner)
label = ttk.Label(
master = frame,
text = self.get_option_text("수동 회복 물약 슬롯 번호")
)
label.pack(side=tkinter.LEFT)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_number'] = tkinter.StringVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_number'].trace(
'w', lambda *args: self.callback_potion_number(args, lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_number')
)
combobox_list = []
for i in range(1, 5):
combobox_list.append(str(i))
if not lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_number' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_number'] = 2
combobox = ttk.Combobox(
master = frame,
values = combobox_list,
textvariable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_number'],
state = "readonly",
height = 10,
width = 5,
font = lybconstant.LYB_FONT
)
combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_number'])
combobox.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame_label_inner.pack(anchor=tkinter.NW, padx=5, pady=5)
frame_label.pack(anchor=tkinter.NW, padx=5, pady=5)
frame_l.pack(side=tkinter.LEFT, anchor=tkinter.NW)
# 일반 탭 중간
frame_m = ttk.Frame(self.inner_frame_dic['common_tab_frame'])
frame_m.pack(side=tkinter.LEFT, anchor=tkinter.NW)
# 일반 탭 우측
frame_r = ttk.Frame(self.inner_frame_dic['common_tab_frame'])
frame_r.pack(side=tkinter.LEFT, anchor=tkinter.NW)
# 작업 탭 좌측
frame_l = ttk.Frame(self.inner_frame_dic['work_tab_frame'])
frame_label = ttk.LabelFrame(frame_l, text='자동 사냥')
frame = ttk.Frame(frame_label)
label = ttk.Label(
master = frame,
text = self.get_option_text("진행 시간(초)")
)
label.pack(side=tkinter.LEFT)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'auto_play_duration'] = tkinter.StringVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'auto_play_duration'].trace(
'w', lambda *args: self.callback_auto_play_duration(args, lybconstant.LYB_DO_STRING_KAISER_WORK + 'auto_play_duration')
)
combobox_list = []
for i in range(0, 86401, 60):
combobox_list.append(str(i))
if not lybconstant.LYB_DO_STRING_KAISER_WORK + 'auto_play_duration' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'auto_play_duration'] = 1800
combobox = ttk.Combobox(
master = frame,
values = combobox_list,
textvariable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'auto_play_duration'],
state = "readonly",
height = 10,
width = 5,
font = lybconstant.LYB_FONT
)
combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'auto_play_duration'])
combobox.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame = ttk.Frame(frame_label)
label = ttk.Label(
master = frame,
text = self.get_option_text("자동 전환 감지 횟수")
)
label.pack(side=tkinter.LEFT)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'auto_limit_count'] = tkinter.StringVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'auto_limit_count'].trace(
'w', lambda *args: self.callback_auto_limit_count(args, lybconstant.LYB_DO_STRING_KAISER_WORK + 'auto_limit_count')
)
combobox_list = []
for i in range(2, 101):
combobox_list.append(str(i))
if not lybconstant.LYB_DO_STRING_KAISER_WORK + 'auto_limit_count' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'auto_limit_count'] = 5
combobox = ttk.Combobox(
master = frame,
values = combobox_list,
textvariable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'auto_limit_count'],
state = "readonly",
height = 10,
width = 5,
font = lybconstant.LYB_FONT
)
combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'auto_limit_count'])
combobox.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame_label.pack(anchor=tkinter.NW, padx=5, pady=5)
frame_label = ttk.LabelFrame(frame_l, text='메인 퀘스트')
frame = ttk.Frame(frame_label)
label = ttk.Label(
master = frame,
text = self.get_option_text("진행 시간(초)")
)
label.pack(side=tkinter.LEFT)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_duration'] = tkinter.StringVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_duration'].trace(
'w', lambda *args: self.callback_main_quest_duration(args, lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_duration')
)
combobox_list = []
for i in range(0, 86401, 60):
combobox_list.append(str(i))
if not lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_duration' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_duration'] = 1800
combobox = ttk.Combobox(
master = frame,
values = combobox_list,
textvariable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_duration'],
state = "readonly",
height = 10,
width = 5,
font = lybconstant.LYB_FONT
)
combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_duration'])
combobox.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame = ttk.Frame(frame_label)
label = ttk.Label(
master = frame,
text = self.get_option_text("퀘스트 지역 이탈 판정 횟수")
)
label.pack(side=tkinter.LEFT)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_distance'] = tkinter.StringVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_distance'].trace(
'w', lambda *args: self.callback_main_quest_distance(args, lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_distance')
)
combobox_list = []
for i in range(1, 101):
combobox_list.append(str(i))
if not lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_distance' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_distance'] = 3
combobox = ttk.Combobox(
master = frame,
values = combobox_list,
textvariable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_distance'],
state = "readonly",
height = 10,
width = 5,
font = lybconstant.LYB_FONT
)
combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_distance'])
combobox.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame = ttk.Frame(frame_label)
label = ttk.Label(
master = frame,
text = self.get_option_text("자동 전환 감지 횟수")
)
label.pack(side=tkinter.LEFT)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_auto'] = tkinter.StringVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_auto'].trace(
'w', lambda *args: self.callback_main_quest_auto(args, lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_auto')
)
combobox_list = []
for i in range(2, 101):
combobox_list.append(str(i))
if not lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_auto' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_auto'] = 5
combobox = ttk.Combobox(
master = frame,
values = combobox_list,
textvariable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_auto'],
state = "readonly",
height = 10,
width = 5,
font = lybconstant.LYB_FONT
)
combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_auto'])
combobox.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame_label.pack(anchor=tkinter.NW, padx=5, pady=5)
frame_label = ttk.LabelFrame(frame_l, text='지역 퀘스트')
frame = ttk.Frame(frame_label)
label = ttk.Label(
master = frame,
text = self.get_option_text("진행 시간(초)")
)
label.pack(side=tkinter.LEFT)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_duration'] = tkinter.StringVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_duration'].trace(
'w', lambda *args: self.callback_local_quest_duration(args, lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_duration')
)
combobox_list = []
for i in range(0, 86401, 60):
combobox_list.append(str(i))
if not lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_duration' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_duration'] = 1800
combobox = ttk.Combobox(
master = frame,
values = combobox_list,
textvariable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_duration'],
state = "readonly",
height = 10,
width = 5,
font = lybconstant.LYB_FONT
)
combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_duration'])
combobox.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame = ttk.Frame(frame_label)
label = ttk.Label(
master = frame,
text = self.get_option_text("퀘스트 지역 이탈 판정 거리(m)")
)
label.pack(side=tkinter.LEFT)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_distance_limit'] = tkinter.StringVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_distance_limit'].trace(
'w', lambda *args: self.callback_local_quest_distance_limit(args, lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_distance_limit')
)
combobox_list = []
for i in range(1, 11):
combobox_list.append(str(i * 10))
if not lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_distance_limit' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_distance_limit'] = 40
combobox = ttk.Combobox(
master = frame,
values = combobox_list,
textvariable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_distance_limit'],
state = "readonly",
height = 10,
width = 5,
font = lybconstant.LYB_FONT
)
combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_distance_limit'])
combobox.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame = ttk.Frame(frame_label)
label = ttk.Label(
master | |
<filename>Bucket 1.0/Bucket Interpreter.py<gh_stars>0
#_Bucket Compiler by Pixet Bits {Version : 1.0.0.0}_---------------------------#
import random as rand
#Libraries
LibCtrls = {"[B]" : False, "[F]" : False, "[C]" : False}
#Objects
SfSystVr = {"FLCnt" : 0}
ActivInt = {}
ActivStr = {}
ActivBol = {}
ActivFlt = {}
IfElseSy = {"Result" : "", "inCent" : "", "isIf" : "", "isEl" : False, "Enable" : False}
ActivMth = {"inMain" : False, "inTask" : False, "CallIt" : False}
ActivTsk = {}
#_Principal_-------------------------------------------------------------------#
def Compiler(FileName) : #
Name = FileName.replace(".bk", "")
try :
File = open(FileName)
Lines = File.readlines()
File.close()
except FileNotFoundError : Error("File ["+Name+"] not found", "")
#Tab and Enter
Lines = [l.replace(" ", "") for l in Lines]
Lines = [l.replace("\n", "") for l in Lines]
Lines = [l.replace("\t", "") for l in Lines]
#Blank lines and Coments
Lines = [l for l in Lines if l != ""]
Lines = [l for l in Lines if not l.startswith("//")]
if Lines[0] != "[to Basic]" : Error("All Bucket classes need [to Baisc] library", Lines[0])
for y in range(0, len(Lines)) :
#Tab error
if Lines[y].startswith(" ") : Error("Tab error", Lines[y])
#Class Error
if Lines[y].endswith("in Bucket :") :
if Lines[y].startswith(Name) : continue
else : Error("Namespace of class is wrong", Lines[y])
#Vars
if Lines[y].startswith("int ") : Int(Lines[y])
if Lines[y].startswith("str ") : Str(Lines[y])
if Lines[y].startswith("bol ") : Bol(Lines[y])
if Lines[y].startswith("flt ") : Flt(Lines[y])
#Methods
if "task :" in Lines[y] : NewTask(Lines, y)
#
Runner(Lines)
#
def Runner(lines) : #
for x in range(0, len(lines)) : #
#bucket open
if lines[x] == "bucket open :" : ActivMth["inMain"] = True
#any task
if "task :" in lines[x] : ActivMth["inTask"] = True
#Output
if lines[x].startswith("show ") :
if ActivMth["inMain"] == True : Show(lines[x])
elif ActivMth["inTask"] == True : continue
else : Error("Funtion out of an method", lines[x])
#Call an Task
if lines[x].startswith("call ") :
if ActivMth["inMain"] == True : TaskCtrl(lines, lines[x])
else : Error("Only 'bucket open' can call tasks", lines[x])
#Set
if lines[x].startswith("set ") :
if ActivMth["inMain"] == True : Set(lines[x])
elif ActivMth["inTask"] == True : continue
else : Error("Funtion out of an method", lines[x])
#Make
if lines[x].startswith("make ") :
if ActivMth["inMain"] == True : Make(lines[x])
elif ActivMth["inTask"] == True : continue
else : Error("Funtion out of an method", lines[x])
#Up
if lines[x].startswith("up ") :
if ActivMth["inMain"] == True : Up(lines[x])
elif ActivMth["inTask"] == True : continue
else : Error("Funtion out of an method", lines[x])
#Convert
if lines[x].startswith("convert ") :
if ActivMth["inMain"] == True : Convert(lines[x])
elif ActivMth["inTask"] == True : continue
else : Error("Funtion out of an method", lines[x])
#If-Else system
#If
if lines[x].startswith("if ") :
if ActivMth["inMain"] == True :
IfElseSy["inCent"] = True
IfElseSy["isIf"] = True
IfElseSy["Result"] = If(lines[x])
elif ActivMth["inTask"] == True : continue
else : Error("Funtion out of an method", lines[x])
#Else
elif lines[x].startswith("else ") and IfElseSy["Result"] == False :
if ActivMth["inMain"] == True :
IfElseSy["inCent"] = True
IfElseSy["isEl"] = True
elif ActivMth["inTask"] == True : continue
else : Error("Funtion out of an method", lines[x])
#End of statlement
#Do System
if lines[x].startswith("do : ") and IfElseSy["inCent"] == True : #
line = lines[x].replace("do : ", "")
#Is if?
if IfElseSy["isIf"] == True and IfElseSy["Result"] == True : IfElseSy["Enable"] = True
#Is Else?
if IfElseSy["isEl"] == True and IfElseSy["Result"] == False : IfElseSy["Enable"] = True
#Then
if IfElseSy["Enable"] == True :
#Functions
if line.startswith("show ") : Show(line)
if line.startswith("call ") : ActivMth["CallIt"] = True
if line.startswith("set ") : Set(line)
if line.startswith("up ") : Up(line)
#Errors
if line.startswith("int ") : Error("If-else systems cannot declare vars", line)
if line.startswith("str ") : Error("If-else systems cannot declare vars", line)
if line.startswith("bol ") : Error("If-else systems cannot declare vars", line)
if line.startswith("flt ") : Error("If-else systems cannot declare vars", line)
#
#Compiler errors
elif lines[x].startswith("do : ") and not IfElseSy["inCent"] == False : Error("Do out o if statlement", lines[x])
#For loop
if lines[x].startswith("for ") :
lLine = lines[x].replace("for ", "")
limit = lLine.find("times ")
count = lLine[:limit]
#N° Times
try : count = int(count)
except ValueError :
if count in ActivInt : count = ActivInt[count]
else : Error("For loop needs an int", lines[x])
#Loop
ForLoop(lines, x, count, lLine.replace(lLine[:limit + 6], ""))
#Enders
if lines[x] == "finish" :
IfElseSy["inCent"] = False
IfElseSy["Enable"] = False
IfElseSy["isIf"] = False
IfElseSy["isEl"] = False
if lines[x] == "end l." : ActivMth["inMain"] = False
if lines[x] == "end t." : ActivMth["inTask"] = False
#
return 0
#
def Error(ErrorType, Line) : #
print("\nCompiler error {"+ErrorType+"}\nin line : \""+str(Line)+"\"")
return 0
#
#_If-Else System_--------------------------------------------------------------#
def If(string) : #
String = (string.replace("if ", "")).replace("then :", "")
String = String.replace(" ", "")
limit = ""
isG = False
isL = False
isE = False
isGoE = False
isLoE = False
#Common
if ">" in String :
isG = True
limit = String.find(">")
if "<" in string :
isL = True
limit = String.find("<")
#Equal
if "==" in String :
isE = True
limit = String.find("==")
#Or Equal's
if "<=" in String :
isGoE = True
limit = String.find("<=")
if ">=" in String :
isLoE = True
limit = String.find(">=")
#------------------------------------------------------------------------------#
#This is greather than?
if isG == True :
FstArg = String[:limit]
SstArg = String[limit + 1:]
#1st Arg
#Declareted Vars
if FstArg in ActivInt : FstArg = ActivInt[FstArg]
elif FstArg in ActivFlt : FstArg = ActivFlt[FstArg]
#Int
elif not FstArg.endswith(".f") :
try : FstArg = int(FstArg)
except ValueError : Error("Unknow value", string)
#Float
else :
FstArg = FstArg.replace(".f", "")
try : FstArg = float(FstArg)
except ValueError : Error("Unknow value", string)
#------------------------------------------------------------------------------#
#2st Arg
#Declareted Vars
if SstArg in ActivInt : SstArg = ActivInt[SstArg]
elif SstArg in ActivFlt : SstArg = ActivFlt[SstArg]
#Int
elif not SstArg.endswith(".f") :
try : SstArg = int(SstArg)
except ValueError : Error("Unknow value", string)
#Float
else :
SstArg = SstArg.replace(".f", "")
try : SstArg = float(SstArg)
except ValueError : Error("Unknow value", string)
#Finally
if FstArg > SstArg : return True
else :return False
#------------------------------------------------------------------------------#
#This is less than?
elif isL == True :
FstArg = String[:limit]
SstArg = String[limit + 1:]
#1st Arg
#Declareted Vars
if FstArg in ActivInt : FstArg = ActivInt[FstArg]
elif FstArg in ActivFlt : FstArg = ActivFlt[FstArg]
#Int
elif not FstArg.endswith(".f") :
try : FstArg = int(FstArg)
except ValueError : Error("Unknow value", string)
#Float
else :
FstArg = FstArg.replace(".f", "")
try : FstArg = float(FstArg)
except ValueError : Error("Unknow value", string)
#------------------------------------------------------------------------------#
#2st Arg
#Declareted Vars
if SstArg in ActivInt : SstArg = ActivInt[SstArg]
elif SstArg in ActivFlt : SstArg = ActivFlt[SstArg]
#Int
elif not SstArg.endswith(".f") :
try : SstArg = int(SstArg)
except ValueError : Error("Unknow value", string)
#Float
else :
SstArg = SstArg.replace(".f", "")
try : SstArg = float(SstArg)
except ValueError : Error("Unknow value", string)
#Finally
if FstArg < SstArg : return True
else : return False
#------------------------------------------------------------------------------#
elif isE == True :
FstArg = String[:limit]
SstArg = String[limit + 2:]
#1st Arg
#Declareted Vars
if FstArg in ActivInt : FstArg = ActivInt[FstArg]
elif FstArg in ActivStr : FstArg = ActivStr[FstArg]
elif FstArg in ActivFlt : FstArg = ActivFlt[FstArg]
#String
elif FstArg.startswith("'") : FstArg = FstArg.replace("'", "")
elif FstArg.startswith("\"") : Error("Strings must be in ''", string)
#Int
elif not FstArg.endswith(".f") :
try : FstArg = int(FstArg)
except ValueError : Error("Unknow value", string)
#Float
else :
FstArg = FstArg.replace(".f", "")
try : FstArg = float(FstArg)
except ValueError : Error("Unknow value", string)
#------------------------------------------------------------------------------#
#2st Arg
#Declareted Vars
if SstArg in ActivInt : SstArg = ActivInt[SstArg]
elif SstArg in ActivStr : SstArg = ActivStr[SstArg]
elif SstArg in ActivFlt : SstArg = ActivFlt[SstArg]
#String
elif SstArg.startswith("'") : SstArg = SstArg.replace("'", "")
elif SstArg.startswith("\"") : Error("Strings must be in ''", string)
#Int
elif not SstArg.endswith(".f") :
try : SstArg = int(SstArg)
except ValueError : Error("Unknow value", string)
#Float
else :
SstArg = SstArg.replace(".f", "")
try : SstArg = float(SstArg)
except ValueError : Error("Unknow | |
in self.interface_stp_cfg:
self.cur_cfg["bpdu_filter"] = "enable"
self.existing["bpdu_filter"] = "enable"
else:
self.cur_cfg["bpdu_filter"] = "disable"
self.existing["bpdu_filter"] = "disable"
if self.bpdu_protection:
if "stp bpdu-protection" in self.stp_cfg:
self.cur_cfg["bpdu_protection"] = "enable"
self.existing["bpdu_protection"] = "enable"
else:
self.cur_cfg["bpdu_protection"] = "disable"
self.existing["bpdu_protection"] = "disable"
if self.tc_protection:
if "stp tc-protection" in self.stp_cfg:
self.cur_cfg["tc_protection"] = "enable"
self.existing["tc_protection"] = "enable"
else:
self.cur_cfg["tc_protection"] = "disable"
self.existing["tc_protection"] = "disable"
if self.tc_protection_interval:
if "stp tc-protection interval" in self.stp_cfg:
tmp_value = re.findall(r'stp tc-protection interval (.*)', self.stp_cfg)
if not tmp_value:
self.module.fail_json(
msg='Error: Can not find tc-protection interval on the device.')
self.cur_cfg["tc_protection_interval"] = tmp_value[0]
self.existing["tc_protection_interval"] = tmp_value[0]
else:
self.cur_cfg["tc_protection_interval"] = "null"
self.existing["tc_protection_interval"] = "null"
if self.tc_protection_threshold:
if "stp tc-protection threshold" in self.stp_cfg:
tmp_value = re.findall(r'stp tc-protection threshold (.*)', self.stp_cfg)
if not tmp_value:
self.module.fail_json(
msg='Error: Can not find tc-protection threshold on the device.')
self.cur_cfg["tc_protection_threshold"] = tmp_value[0]
self.existing["tc_protection_threshold"] = tmp_value[0]
else:
self.cur_cfg["tc_protection_threshold"] = "1"
self.existing["tc_protection_threshold"] = "1"
if self.cost:
tmp_value = re.findall(r'stp instance (.*) cost (.*)', self.interface_stp_cfg)
if not tmp_value:
self.cur_cfg["cost"] = "null"
self.existing["cost"] = "null"
else:
self.cur_cfg["cost"] = tmp_value[0][1]
self.existing["cost"] = tmp_value[0][1]
# root_protection and loop_protection should get configuration at the same time
if self.root_protection or self.loop_protection:
if "stp root-protection" in self.interface_stp_cfg:
self.cur_cfg["root_protection"] = "enable"
self.existing["root_protection"] = "enable"
else:
self.cur_cfg["root_protection"] = "disable"
self.existing["root_protection"] = "disable"
if "stp loop-protection" in self.interface_stp_cfg:
self.cur_cfg["loop_protection"] = "enable"
self.existing["loop_protection"] = "enable"
else:
self.cur_cfg["loop_protection"] = "disable"
self.existing["loop_protection"] = "disable"
def get_end_state(self):
""" Get end state """
self.cli_get_stp_config()
if self.interface and self.interface != "all":
self.cli_get_interface_stp_config()
if self.stp_mode:
if "stp mode stp" in self.stp_cfg:
self.end_state["stp_mode"] = "stp"
elif "stp mode rstp" in self.stp_cfg:
self.end_state["stp_mode"] = "rstp"
else:
self.end_state["stp_mode"] = "mstp"
if self.stp_enable:
if "stp disable" in self.stp_cfg:
self.end_state["stp_enable"] = "disable"
else:
self.end_state["stp_enable"] = "enable"
if self.stp_converge:
if "stp converge fast" in self.stp_cfg:
self.end_state["stp_converge"] = "fast"
else:
self.end_state["stp_converge"] = "normal"
if self.edged_port:
if self.interface == "all":
if "stp edged-port default" in self.stp_cfg:
self.end_state["edged_port"] = "enable"
else:
self.end_state["edged_port"] = "disable"
else:
if "stp edged-port enable" in self.interface_stp_cfg:
self.end_state["edged_port"] = "enable"
else:
self.end_state["edged_port"] = "disable"
if self.bpdu_filter:
if self.interface == "all":
if "stp bpdu-filter default" in self.stp_cfg:
self.end_state["bpdu_filter"] = "enable"
else:
self.end_state["bpdu_filter"] = "disable"
else:
if "stp bpdu-filter enable" in self.interface_stp_cfg:
self.end_state["bpdu_filter"] = "enable"
else:
self.end_state["bpdu_filter"] = "disable"
if self.bpdu_protection:
if "stp bpdu-protection" in self.stp_cfg:
self.end_state["bpdu_protection"] = "enable"
else:
self.end_state["bpdu_protection"] = "disable"
if self.tc_protection:
if "stp tc-protection" in self.stp_cfg:
self.end_state["tc_protection"] = "enable"
else:
self.end_state["tc_protection"] = "disable"
if self.tc_protection_interval:
if "stp tc-protection interval" in self.stp_cfg:
tmp_value = re.findall(r'stp tc-protection interval (.*)', self.stp_cfg)
if not tmp_value:
self.module.fail_json(
msg='Error: Can not find tc-protection interval on the device.')
self.end_state["tc_protection_interval"] = tmp_value[0]
else:
self.end_state["tc_protection_interval"] = "null"
if self.tc_protection_threshold:
if "stp tc-protection threshold" in self.stp_cfg:
tmp_value = re.findall(r'stp tc-protection threshold (.*)', self.stp_cfg)
if not tmp_value:
self.module.fail_json(
msg='Error: Can not find tc-protection threshold on the device.')
self.end_state["tc_protection_threshold"] = tmp_value[0]
else:
self.end_state["tc_protection_threshold"] = "1"
if self.cost:
tmp_value = re.findall(r'stp instance (.*) cost (.*)', self.interface_stp_cfg)
if not tmp_value:
self.end_state["cost"] = "null"
else:
self.end_state["cost"] = tmp_value[0][1]
if self.root_protection:
if "stp root-protection" in self.interface_stp_cfg:
self.end_state["root_protection"] = "enable"
else:
self.end_state["root_protection"] = "disable"
if self.loop_protection:
if "stp loop-protection" in self.interface_stp_cfg:
self.end_state["loop_protection"] = "enable"
else:
self.end_state["loop_protection"] = "disable"
def present_stp(self):
""" Present stp configuration """
cmds = list()
# cofig stp global
if self.stp_mode:
if self.stp_mode != self.cur_cfg["stp_mode"]:
cmd = "stp mode %s" % self.stp_mode
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.stp_enable:
if self.stp_enable != self.cur_cfg["stp_enable"]:
cmd = "stp %s" % self.stp_enable
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.stp_converge:
if self.stp_converge != self.cur_cfg["stp_converge"]:
cmd = "stp converge %s" % self.stp_converge
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.edged_port:
if self.interface == "all":
if self.edged_port != self.cur_cfg["edged_port"]:
if self.edged_port == "enable":
cmd = "stp edged-port default"
cmds.append(cmd)
self.updates_cmd.append(cmd)
else:
cmd = "undo stp edged-port default"
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.bpdu_filter:
if self.interface == "all":
if self.bpdu_filter != self.cur_cfg["bpdu_filter"]:
if self.bpdu_filter == "enable":
cmd = "stp bpdu-filter default"
cmds.append(cmd)
self.updates_cmd.append(cmd)
else:
cmd = "undo stp bpdu-filter default"
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.bpdu_protection:
if self.bpdu_protection != self.cur_cfg["bpdu_protection"]:
if self.bpdu_protection == "enable":
cmd = "stp bpdu-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
else:
cmd = "undo stp bpdu-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.tc_protection:
if self.tc_protection != self.cur_cfg["tc_protection"]:
if self.tc_protection == "enable":
cmd = "stp tc-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
else:
cmd = "undo stp tc-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.tc_protection_interval:
if self.tc_protection_interval != self.cur_cfg["tc_protection_interval"]:
cmd = "stp tc-protection interval %s" % self.tc_protection_interval
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.tc_protection_threshold:
if self.tc_protection_threshold != self.cur_cfg["tc_protection_threshold"]:
cmd = "stp tc-protection threshold %s" % self.tc_protection_threshold
cmds.append(cmd)
self.updates_cmd.append(cmd)
# config interface stp
if self.interface and self.interface != "all":
tmp_changed = False
cmd = "interface %s" % self.interface
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.edged_port:
if self.edged_port != self.cur_cfg["edged_port"]:
if self.edged_port == "enable":
cmd = "stp edged-port enable"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
else:
cmd = "undo stp edged-port"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
if self.bpdu_filter:
if self.bpdu_filter != self.cur_cfg["bpdu_filter"]:
if self.bpdu_filter == "enable":
cmd = "stp bpdu-filter enable"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
else:
cmd = "undo stp bpdu-filter"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
if self.root_protection:
if self.root_protection == "enable" and self.cur_cfg["loop_protection"] == "enable":
self.module.fail_json(
msg='Error: The interface has enable loop_protection, can not enable root_protection.')
if self.root_protection != self.cur_cfg["root_protection"]:
if self.root_protection == "enable":
cmd = "stp root-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
else:
cmd = "undo stp root-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
if self.loop_protection:
if self.loop_protection == "enable" and self.cur_cfg["root_protection"] == "enable":
self.module.fail_json(
msg='Error: The interface has enable root_protection, can not enable loop_protection.')
if self.loop_protection != self.cur_cfg["loop_protection"]:
if self.loop_protection == "enable":
cmd = "stp loop-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
else:
cmd = "undo stp loop-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
if self.cost:
if self.cost != self.cur_cfg["cost"]:
cmd = "stp cost %s" % self.cost
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
if not tmp_changed:
cmd = "interface %s" % self.interface
self.updates_cmd.remove(cmd)
cmds.remove(cmd)
if cmds:
self.cli_load_config(cmds)
self.changed = True
def absent_stp(self):
""" Absent stp configuration """
cmds = list()
if self.stp_mode:
if self.stp_mode == self.cur_cfg["stp_mode"]:
if self.stp_mode != "mstp":
cmd = "undo stp mode"
cmds.append(cmd)
self.updates_cmd.append(cmd)
self.changed = True
if self.stp_enable:
if self.stp_enable != self.cur_cfg["stp_enable"]:
cmd = "stp %s" % self.stp_enable
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.stp_converge:
if self.stp_converge == self.cur_cfg["stp_converge"]:
cmd = "undo stp converge"
cmds.append(cmd)
self.updates_cmd.append(cmd)
self.changed = True
if self.edged_port:
if self.interface == "all":
if self.edged_port != self.cur_cfg["edged_port"]:
if self.edged_port == "enable":
cmd = "stp edged-port default"
cmds.append(cmd)
self.updates_cmd.append(cmd)
else:
cmd = "undo stp edged-port default"
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.bpdu_filter:
if self.interface == "all":
if self.bpdu_filter != self.cur_cfg["bpdu_filter"]:
if self.bpdu_filter == "enable":
cmd = "stp bpdu-filter default"
cmds.append(cmd)
self.updates_cmd.append(cmd)
else:
cmd = "undo stp bpdu-filter default"
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.bpdu_protection:
if self.bpdu_protection != self.cur_cfg["bpdu_protection"]:
if self.bpdu_protection == "enable":
cmd = "stp bpdu-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
else:
cmd = "undo stp bpdu-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.tc_protection:
if self.tc_protection != self.cur_cfg["tc_protection"]:
if self.tc_protection == "enable":
cmd = "stp tc-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
else:
cmd = "undo stp tc-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.tc_protection_interval:
if self.tc_protection_interval == self.cur_cfg["tc_protection_interval"]:
cmd = "undo stp tc-protection interval"
cmds.append(cmd)
self.updates_cmd.append(cmd)
self.changed = True
if self.tc_protection_threshold:
if self.tc_protection_threshold == self.cur_cfg["tc_protection_threshold"]:
if self.tc_protection_threshold != "1":
cmd = "undo stp tc-protection threshold"
cmds.append(cmd)
self.updates_cmd.append(cmd)
self.changed = True
# undo interface stp
if self.interface and self.interface != "all":
tmp_changed = False
cmd = "interface %s" % self.interface
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.edged_port:
if self.edged_port != self.cur_cfg["edged_port"]:
if self.edged_port == "enable":
cmd = "stp edged-port enable"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
else:
cmd = "undo stp edged-port"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
if self.bpdu_filter:
if self.bpdu_filter != self.cur_cfg["bpdu_filter"]:
if self.bpdu_filter == "enable":
cmd = "stp bpdu-filter enable"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
else:
cmd = "undo stp bpdu-filter"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
if self.root_protection:
if self.root_protection == "enable" and self.cur_cfg["loop_protection"] == "enable":
self.module.fail_json(
msg='Error: The interface has enable loop_protection, can not enable root_protection.')
if self.root_protection != self.cur_cfg["root_protection"]:
if self.root_protection == "enable":
cmd = "stp root-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
else:
cmd = "undo stp root-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
if self.loop_protection:
if self.loop_protection == "enable" and self.cur_cfg["root_protection"] == "enable":
self.module.fail_json(
msg='Error: The interface has enable root_protection, can not enable loop_protection.')
if self.loop_protection != self.cur_cfg["loop_protection"]:
if self.loop_protection == "enable":
cmd = "stp loop-protection"
cmds.append(cmd)
| |
inverse=False, init=init, hparams=self._fparams,
disable_dropout=disable_dropout, **kwargs)
if self.is_evaluating and check_invertibility:
z_inv_inv, _, _, _ = glow.glow(
"glow", z_inv, targets_mask, decoder_self_attention_bias,
inverse=True, split_zs=zs, init=False, hparams=self._fparams,
disable_dropout=True, **kwargs)
z_diff = z_q - z_inv_inv
tf.summary.scalar("flow_recon_forward", tf.reduce_max(tf.abs(z_diff)))
return log_p_z_base, log_abs_det
def sample_p(
self, targets_length, temp, check_invertibility=False, targets_mask=None,
**kwargs):
hparams = self._hparams
if targets_mask is None:
targets_mask = ops.sequence_mask(targets_length, hparams)
decoder_self_attention_bias = (
common_attention.attention_bias_ignore_padding(1.0 - targets_mask))
batch_size, targets_max_length = (
common_layers.shape_list(targets_mask)[:2])
prior_shape = [batch_size, targets_max_length, hparams.latent_size]
noise = tf.random.normal(prior_shape, stddev=temp)
p_dist = None
if hparams.prior_type == "standard_normal":
z_p = noise
elif hparams.prior_type == "diagonal_normal":
diag_prior_params = ops.cond_prior(
"diag_prior", hparams, tf.zeros(prior_shape), targets_mask,
hparams.latent_size*2, decoder_self_attention_bias, **kwargs)
p_dist = gops.diagonal_normal(diag_prior_params, "diag_prior")
z_p = p_dist.loc + p_dist.scale * noise
elif hparams.prior_type in ["affine", "additive", "rq"]:
n_levels = len(hparams.depths.split("/"))
divi = max(1, hparams.factor**(n_levels-1))
flow_prior_shape = [
batch_size, targets_max_length//divi, hparams.latent_size]
noise = tf.random_normal(flow_prior_shape, stddev=temp)
z_p, _, _, _ = glow.glow(
"glow", noise, targets_mask, decoder_self_attention_bias,
inverse=True, init=False, hparams=self._fparams,
disable_dropout=True, temp=temp, **kwargs)
if self.is_evaluating and check_invertibility:
noise_inv, _, _, _ = glow.glow(
"glow", z_p, targets_mask, decoder_self_attention_bias,
inverse=False, init=False, hparams=self._fparams,
disable_dropout=True, **kwargs)
z_diff = noise - noise_inv
tf.summary.scalar("flow_recon_inverse", tf.reduce_max(tf.abs(z_diff)))
return z_p, p_dist
def optimize(self, loss, num_async_replicas=1, use_tpu=False, variables=None):
"""Return a training op minimizing loss."""
lr = ops.learning_rate_schedule(self.hparams)
if num_async_replicas > 1:
t2t_model.log_info("Dividing learning rate by num_async_replicas: %d",
num_async_replicas)
lr /= math.sqrt(float(num_async_replicas))
train_op = optimize.optimize(
loss, lr, self.hparams, use_tpu=use_tpu, variables=variables)
return train_op
def body(self, features, real_features):
return self.internal(features, real_features)
def infer(self,
features,
*args,
**kwargs):
"""Produce predictions from the model."""
del args, kwargs
inputs_old = None
if "inputs" in features and len(features["inputs"].shape) < 4:
inputs_old = features["inputs"]
features["inputs"] = tf.expand_dims(features["inputs"], 2)
features["targets"] = tf.identity(features["inputs"])
# logits, _ = self(features)
t2t_model.set_custom_getter_compose(self._custom_getter)
tf.get_variable_scope().set_initializer(
optimize.get_variable_initializer(self.hparams))
with self._eager_var_store.as_default():
self._fill_problem_hparams_features(features)
# intentionally disable sharding during inference (in multi GPU)
with tf.variable_scope(self.name):
logits, _, _, targets_mask = self.model_fn(features)
samples = tf.argmax(logits, axis=-1)
samples = tf.where(
tf.cast(targets_mask[..., tf.newaxis, tf.newaxis], tf.bool),
samples, tf.ones_like(samples))
if inputs_old is not None: # Restore to not confuse Estimator.
features["inputs"] = inputs_old
return samples
def model_fn(self, features):
with tf.variable_scope(
tf.get_variable_scope(), use_resource=True, reuse=tf.AUTO_REUSE):
transformed_features = self.bottom(features)
if self.hparams.activation_dtype == "bfloat16":
for k, v in sorted(six.iteritems(transformed_features)):
if v.dtype == tf.float32:
transformed_features[k] = tf.cast(v, tf.bfloat16)
t2t_model.log_info("Building model body")
output, losses, monitor, targets_mask = self.body(
transformed_features, features)
output, losses = self._normalize_body_output((output, losses))
if "training" in losses:
t2t_model.log_info(
"Skipping T2TModel top and loss because training loss "
"returned from body")
logits = output
else:
logits = self.top(output, features)
losses["training"] = 0.0
if (self._hparams.mode != tf.estimator.ModeKeys.PREDICT and
self._hparams.mode != "attack"):
losses["training"] = self.loss(logits, features)
return logits, losses, monitor, targets_mask
def model_fn_sharded(self, sharded_features):
"""Estimator model_fn sharded along batch dimension.
Args:
sharded_features: {str: [Tensor]}. Features sharded along batch dimension.
Each list is the same length (== number of shards).
Returns:
sharded_logits: [Tensor]. Logits for each shard of examples.
losses: {str: 0-D Tensor}. Loss averaged across shards.
"""
dp = self._data_parallelism
# [{str: Tensor}]. Transpose of 'sharded_features'.
datashard_to_features = self._to_features_per_datashard(sharded_features)
sharded_logits, sharded_losses, sharded_monitors, _ = (
dp(self.model_fn, datashard_to_features))
sharded_logits, sharded_losses = dp(
self.maybe_scheduled_sampling,
datashard_to_features, sharded_logits, sharded_losses)
if isinstance(sharded_logits[0], dict):
temp_dict = {k: [] for k, _ in six.iteritems(sharded_logits[0])}
for k, _ in six.iteritems(sharded_logits[0]):
for l in sharded_logits:
temp_dict[k].append(l[k])
sharded_logits = temp_dict
losses = t2t_model.average_sharded_losses(sharded_losses)
monitor = {}
for key in list(sharded_monitors[0].keys()):
monitor[key] = (
tf.add_n([m[key] for m in sharded_monitors]) / len(sharded_monitors))
ops.save_summary(monitor, "monitor")
return sharded_logits, losses
@registry.register_hparams
def wmt_enro_tpu():
"""HParams for Transformer model on TPU."""
hparams = transformer.transformer_base()
hparams = transformer.update_hparams_for_tpu(hparams)
hparams.batch_size = 512
return hparams
@registry.register_hparams
def iwslt_baseline_gpu():
"""HParams for Transformer model on TPU."""
hparams = transformer.transformer_base()
hparams.hidden_size = 256
hparams.filter_size = 1024
hparams.num_hidden_layers = 5
hparams.num_heads = 2
hparams.layer_prepostprocess_dropout = 0.1
hparams.attention_dropout = 0.1
hparams.relu_dropout = 0.1
hparams.dropout = 0.1
return hparams
@registry.register_hparams
def iwslt_baseline_single_gpu():
"""HParams for Transformer model on TPU."""
hparams = iwslt_baseline_gpu()
hparams.batch_size = 1024
hparams.learning_rate_schedule = "constant*linear_warmup*rsqrt_decay"
hparams.learning_rate_constant = 0.1
hparams.learning_rate_warmup_steps = 16000
return hparams
@registry.register_hparams
def iwslt_baseline_tpu():
"""HParams for Transformer model on TPU."""
hparams = transformer.transformer_base()
transformer.update_hparams_for_tpu(hparams)
hparams.hidden_size = 256
hparams.filter_size = 1024
hparams.num_hidden_layers = 5
hparams.num_heads = 2
hparams.layer_prepostprocess_dropout = 0.1
hparams.attention_dropout = 0.1
hparams.relu_dropout = 0.1
hparams.dropout = 0.1
hparams.add_hparam("pos_attn", False)
return hparams
@registry.register_hparams
def iwslt_base():
"""Set of hyperparameters."""
# Model architecture flags.
hparams = transformer.transformer_base()
hparams.num_hidden_layers = 5
hparams.hidden_size = 256
hparams.filter_size = 1024
hparams.num_heads = 4
# Other flags.
hparams.summarize_grads = False
hparams.summarize_vars = False
# Optimization-related flags.
hparams.clip_grad_norm = 1.0
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate_warmup_steps = 8000
hparams.learning_rate = 0.2
hparams.learning_rate_schedule = (
"constant*linear_warmup*rsqrt_decay*rsqrt_hidden_size")
hparams.learning_rate_constant = 2.0
hparams.add_hparam("predict_target_length", True)
hparams.add_hparam("lendiff_bound", 30)
hparams = update_hparams_for_tpu(hparams)
hparams.add_hparam("pos_attn", False)
return hparams
@registry.register_hparams
def iwslt_diag():
"""Set of hyperparameters."""
hparams = iwslt_base()
hparams.batch_size = 4096
# Other flags.
hparams.force_full_predict = True
hparams.causal_decoder_self_attention = False
# VAE-related flags.
hparams.add_hparam("latent_size", 256)
hparams.add_hparam("anneal_min_value", 0.0)
hparams.add_hparam("kl_startup_steps", 5000)
hparams.add_hparam("kl_anneal_steps", 20000)
hparams.add_hparam("n_posterior_layers", 3)
hparams.add_hparam("n_decoder_layers", 3)
hparams.add_hparam("posterior_2d_dropout", 0.20)
# diagonal_normal / affine / additive / rq
hparams.add_hparam("posterior_type", "diagonal_normal")
# standard_normal / diagonal_normal
hparams.add_hparam("prior_type", "diagonal_normal")
hparams.add_hparam("decoder_2d_dropout", 0.00)
# Optimization-related flags.
hparams.learning_rate_warmup_steps = 8000
hparams.learning_rate_constant = 2.0
hparams.layer_prepostprocess_dropout = 0.2
hparams.attention_dropout = 0.2
hparams.relu_dropout = 0.2
hparams.dropout = 0.2
# Optimization-related flags.
hparams.add_hparam("kl_reg", 0.0)
hparams.add_hparam("n_gibbs_steps", 0)
hparams.add_hparam("compute_kl_refinement", False)
hparams.add_hparam("compute_iw_marginal", False)
hparams.add_hparam("n_samples", 1)
return hparams
@registry.register_hparams
def wmt_diag_base():
"""Set of hyperparameters."""
hparams = iwslt_diag()
hparams.batch_size = 4096
hparams.num_hidden_layers = 6
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.num_heads = 8
# VAE-related flags.
hparams.latent_size = 512
hparams.n_posterior_layers = 4
hparams.n_decoder_layers = 6
hparams.dropout = 0.1
hparams.layer_prepostprocess_dropout = 0.1
hparams.attention_dropout = 0.1
hparams.relu_dropout = 0.1
return hparams
@registry.register_hparams
def wmt_diag_small():
"""Set of hyperparameters."""
hparams = wmt_diag_base()
hparams.n_posterior_layers = 3
hparams.n_decoder_layers = 3
hparams.kl_reg = 1e-4
return hparams
@registry.register_hparams
def wmt_diag_small_trueadam():
"""Set of hyperparameters."""
hparams = wmt_diag_small()
hparams.optimizer = "true_adam"
return hparams
@registry.register_hparams
def wmt_diag_small_trueadam_longer():
"""Set of hyperparameters."""
hparams = wmt_diag_small_trueadam()
hparams.learning_rate_constant = 4.0
hparams.learning_rate_warmup_steps = 20000
return hparams
@registry.register_hparams
def wmt_diag_small_trueadam_shorter():
"""Set of hyperparameters."""
hparams = wmt_diag_small_trueadam()
hparams.learning_rate_constant = 2.0
hparams.learning_rate_warmup_steps = 4000
return hparams
@registry.register_hparams
def wmt_diag_base_trueadam_1e4():
"""Set of hyperparameters."""
hparams = wmt_diag_base()
hparams.kl_reg = 1e-4
hparams.optimizer = "true_adam"
hparams.learning_rate_constant = 2.0
hparams.learning_rate_warmup_steps = 8000
return hparams
@registry.register_hparams
def wmt_diag_base_trueadam_longer_1e4():
"""Set of hyperparameters."""
hparams = wmt_diag_base_trueadam_1e4()
hparams.learning_rate_constant = 4.0
hparams.learning_rate_warmup_steps = 20000
return hparams
@registry.register_hparams
def wmt_diag_base_trueadam_shorter_1e4():
"""Set of hyperparameters."""
hparams = wmt_diag_base_trueadam_1e4()
hparams.learning_rate_constant = 2.0
hparams.learning_rate_warmup_steps = 4000
return hparams
@registry.register_hparams
def wmt_diag_base_1e4_trueadam():
"""Set of hyperparameters."""
hparams = wmt_diag_base()
hparams.kl_reg = 1e-4
hparams.optimizer = "true_adam"
return hparams
@registry.register_hparams
def wmt_diag_base_1e4_trueadam_longer():
"""Set of hyperparameters."""
hparams = wmt_diag_base_1e4_trueadam()
hparams.learning_rate_constant = 4.0
hparams.learning_rate_warmup_steps = 20000
return hparams
@registry.register_hparams
def wmt_diag_base_1e4_trueadam_shorter():
"""Set of hyperparameters."""
hparams = wmt_diag_base_1e4_trueadam()
hparams.learning_rate_constant = 2.0
hparams.learning_rate_warmup_steps = 4000
return hparams
@registry.register_hparams
def wmt_diag_base_1e4():
"""Set of hyperparameters."""
hparams = wmt_diag_base()
hparams.kl_reg = 1e-4
return hparams
@registry.register_hparams
def wmt_diag_base_longer_1e4():
"""Set of hyperparameters."""
hparams = wmt_diag_base_1e4()
hparams.learning_rate_constant = 4.0
hparams.learning_rate_warmup_steps = 20000
return hparams
@registry.register_hparams
def wmt_diag_base_shorter_1e4():
"""Set of hyperparameters."""
hparams = wmt_diag_base_1e4()
hparams.learning_rate_constant = 2.0
hparams.learning_rate_warmup_steps = 4000
return hparams
@registry.register_hparams
def iwslt_diag_1e5():
"""Set of hyperparameters."""
hparams = iwslt_diag()
hparams.kl_reg = 1e-5
return hparams
@registry.register_hparams
def iwslt_diag_1e4():
"""Set of hyperparameters."""
hparams = iwslt_diag()
hparams.kl_reg = 1e-4
return hparams
@registry.register_hparams
def iwslt_affine():
"""Set of hyperparameters."""
hparams = iwslt_diag()
hparams.prior_type = "affine"
hparams.batch_size = 2048
hparams.latent_size = 256
# Glow-related flags.
hparams.add_hparam("depths", "4/8/8") # infer n_levels from depths
hparams.add_hparam("step_fn", "glow") # glow / chunting
hparams.add_hparam("affine_scale", "glow") # glow / jason
hparams.add_hparam("conv_fn", "np") # np / tf
hparams.add_hparam("split_plans", "cat/cat/ca")
hparams.add_hparam("factor", 2) # squeezing factor
hparams.add_hparam("n_layers_transform_params", 1)
hparams.add_hparam("n_1x1_heads", 4)
hparams.add_hparam("flow_num_heads", 4)
hparams.add_hparam("flow_hidden_size", 256)
hparams.add_hparam("flow_filter_size", 512)
# Control max scale change.
hparams.add_hparam("scale_width", 0.999)
# Optimization-related flags.
# hparams.learning_rate_warmup_steps = 20000
hparams.add_hparam("flow_layer_prepostprocess_dropout", 0.0)
hparams.add_hparam("flow_attention_dropout", 0.0)
hparams.add_hparam("flow_relu_dropout", 0.0)
# hparams.optimizer_adam_beta1 = 0.9
# hparams.optimizer_adam_beta2 = 0.999
# hparams.optimizer_adam_epsilon = 1e-8
# Precision-related flags.
hparams.activation_dtype = "float32"
hparams.weight_dtype = "float32"
return hparams
@registry.register_hparams
def wmt_affine():
"""Set of hyperparameters."""
hparams = iwslt_affine()
hparams.batch_size = 2048 # TODO(jason) : address this later.
hparams.num_hidden_layers = 6
hparams.hidden_size = 256
hparams.filter_size = 1024
hparams.num_heads = 8
# VAE-related flags.
hparams.latent_size = 256
hparams.n_posterior_layers = 4
hparams.n_decoder_layers = 4
hparams.layer_prepostprocess_dropout = 0.1
hparams.attention_dropout = 0.1
hparams.relu_dropout = 0.1
# Glow-related flags.
hparams.flow_num_heads = 8
hparams.flow_filter_size = 512
return hparams
@registry.register_hparams
def wmt_affine_base():
"""Set of hyperparameters."""
hparams = wmt_affine()
hparams.batch_size = 2048
hparams.hidden_size = 320
hparams.latent_size = 320
hparams.flow_filter_size = 640
return hparams
@registry.register_hparams
def wmt_affine_base_small():
"""Set of hyperparameters."""
hparams = wmt_affine_base()
hparams.depths = "4/4/4"
hparams.kl_reg = 1e-4
hparams.learning_rate_constant = 2.0
hparams.learning_rate_warmup_steps = 8000
return hparams
@registry.register_hparams
def wmt_affine_base_trueadam_small():
"""Set of hyperparameters."""
hparams | |
<gh_stars>0
import os
import re
import ast
import sys
import json
import uuid
import MySQLdb
import functools
import threading
import subprocess
import unicodedata
import flask, flask.views
app = flask.Flask(__name__)
# Don't do this!
app.secret_key = "bacon"
#get app directory
loc = os.getcwd()+"/"
#variables for registry
registry = ""
regmail = ""
reguser = ""
regpas = ""
#variables for databse access
dbhost = ""
dbuser = ""
dbpasswd = ""
#read config.txt file
with open(loc+"static/configs/config.txt") as f:
details = f.read()
f.close()
for line in details.splitlines():
line = line.split()
if line == []:
pass
elif line[0] == "registry":
registry = line[2]
elif line[0] == "regmail":
regmail = line[2]
elif line[0] == "reguser":
reguser = line[2]
elif line[0] == "regmail":
regmail = line[2]
elif line[0] == "regpas":
regpas = line[2]
elif line[0] == "dbhost":
dbhost = line[2]
elif line[0] == "dbuser":
dbuser = line[2]
elif line[0] == "dbpasswd":
dbpasswd = line[2]
grps = []
radio1 = ''
radio2 = ''
search1 = ''
search2 = ''
dld = []
dld_lsn = []
output = []
def executer(cmd):
p = subprocess.Popen(cmd, stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
out, err = p.communicate()
return out, err
def thread_executer(cmd):
global dld
print "in thread",cmd
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
out, err = p.communicate()
temp=out.splitlines()[-2].split()[0]
if temp=='Digest:':
dld.remove(cmd[-1])
def downloader(cmd,image,info):
global dld,loc
dld.append(image)
print "Downloading ",image
out,err = executer(cmd)
print "finished dld ",image
if 'Digest' in out:
try:
cmd = ['docker','tag',cmd[2],image]
out,err = executer(cmd)
try:
print loc+"static/lessons/"+image.replace('/','-')
with open(loc+"static/lessons/"+image.replace('/','-'),'w') as f:
f.write(info[0]+"\n"+info[1])
f.close()
except:
print "error writing file -",image
except:
print "error renaming ",image
else:
print "failed downloading",image
while image in dld : dld.remove(image)
print "exiting ",image
def add_lesson(old_lesn,lesn,index,line,info):
global dld,loc
global dld_lsn
flag = 1
print "enter loop - add_lesson"
while flag:
flag = 0
for item in index:
if item in dld:
flag = 1
print "exit loop - add_lesson"
dld_lsn.remove(old_lesn)
target = loc+'static/configs/lesson.txt'
try:
cmd=['grep','^'+lesn+' ',target]
val,err = executer(cmd)
#add or replace line in the configs/lesson.txt file
if val:
print "Replacing line"
cmd=['sed','-i','/^'+lesn+' /c '+line,target]
val = executer(cmd)
else:
print "Adding line"
with open(target, 'a') as f:
f.write(line)
f.close()
#add description about lesson in the static/lessons/ folder
with open(loc+'static/lessons/'+lesn,'w') as f:
f.write(info[0]+'\n'+info[1])
f.close()
except:
print "error writing file",lesn
def thread_executer_2(cmd,args):
global dld
print "in thread",cmd
if args[0] == 'play':
try:
f = open(cmd[2],'w')
f.write(args[1])
f.close()
f = open(cmd[3],'w')
f.write(args[2])
f.close()
except:
print "Error creating playbook ",cmd
p = subprocess.Popen(cmd,shell=False,stdin=None,stdout=None,stderr=None,close_fds=True)
print "out of process",cmd
def reader(fname):
index=[]
try:
with open(fname) as f:
index = f.read().splitlines()
f.close()
except:
pass
return index
def db_ops(cmds,arg):
global dbuser, dbpasswd, dbhost
db = MySQLdb.connect(host=dbhost,
user=dbuser,
passwd=<PASSWORD>,
db="lense")
cur = db.cursor()
for cmd in cmds:
cur.execute(cmd)
result = cur.fetchall()
#commit if arg = 1
if arg == 1:
db.commit()
#return the results
return result
db.close()
def filechecker():
#check and create lessons.txt if doesnot exist already
path="static/configs/lesson.txt"
if not os.path.exists(path):
print "asdad"
fh = open(path, "w")
fh.write(' ')
fh.close()
class Main(flask.views.MethodView):
def get(self):
return flask.render_template('index.html')
def post(self):
flag = []
if 'logout' in flask.request.form:
flask.session.pop('username', None)
return flask.redirect(flask.url_for('index'))
required = ['username', 'passwd']
for r in required:
if r not in flask.request.form:
flask.flash("Error: {0} is required.".format(r))
return flask.redirect(flask.url_for('index'))
username = flask.request.form['username']
passwd = flask.request.form['passwd']
cmd = "SELECT * FROM users WHERE passwd='"+passwd+"' AND uname='"+username+"'"
flag=db_ops([cmd],0)
#flag = 1
#check if all files are available
filechecker()
#if username in users and users[username] == passwd:
if flag:
flask.session['username'] = username
with open('/tmp/.esnel','w') as f:
f.write(username)
f.close()
else:
flask.flash("Username doesn't exist or incorrect password")
return flask.redirect(flask.url_for('home'))
def login_required(method):
@functools.wraps(method)
def wrapper(*args, **kwargs):
if 'username' in flask.session:
return method(*args, **kwargs)
else:
flask.flash("A login is required to proceed!")
return flask.redirect(flask.url_for('index'))
return wrapper
class Repo(flask.views.MethodView):
@login_required
def get(self):
global dld_lsn
global dld
global registry,regmail,reguser,regpas
#dld=['lesson3']
cmd=['curl','https://'+reguser+':'+regpas+'@'+registry+'/v2/_catalog']
out1, out2 = executer(cmd)
temp = {'index':{},'lesns':{},'comps':{},'dld':dld,'dld_lsn':dld_lsn}
try:
images= ast.literal_eval(out1.splitlines()[0])['repositories']
for image in images:
#check if description for component exist and add it to temp
#cmd=['curl','http://test:user@registry.cs.uno.edu/'+image.replace('/','-')]
cmd=['curl','http://'+reguser+':'+regpas+'@'+registry+'/'+image.replace('/','-')]
out1, out2 = executer(cmd)
desc=out1.splitlines()
if desc[0]!='<html>' and desc[0]!='':
temp['comps'][image]=[desc[0],'\n'.join(desc[1:])]
#check if description for lesson exist and add it to temp, if absent
image=image.split('/')[0]
try:
if temp['lesns'][image]:
pass
except:
#cmd=['curl','http://test:<EMAIL>@<EMAIL>/'+image]
cmd=['curl','http://'+reguser+':'+regpas+'@'+registry+'/'+image]
out1, out2 = executer(cmd)
desc=out1.splitlines()
if desc[0]!='<html>' and desc[0]!='':
temp['lesns'][image]=[desc[0],'\n'.join(desc[1:])]
#check if index for lesson exist and add to temp, if absent
try:
if temp['index'][image]:
pass
except:
#cmd=['curl','http://test:user@registry.cs.uno.edu/'+image+'_index']
cmd=['curl','http://'+reguser+':'+regpas+'@'+registry+'/'+image+'_index']
out1, out2 = executer(cmd)
desc=out1.splitlines()[0]
if desc!='<html>' and desc!='':
temp['index'][image]=desc
else:
temp['lesns'][image]=['n/a','n/a']
else:
temp['comps'][image]=['n/a','n/a']
except:
print "some error in getting repo data"
result = temp
print result
flask.flash(result)
return flask.render_template('repo.html')
@login_required
def post(self):
global dld_lsn
global loc
global registry,regmail,reguser,regpas
flag = 0
#login to the registry server
#cmd = ['docker','login','-u','test','-p','user','--email="<EMAIL>"','https://registry.cs.uno.edu']
cmd = ['docker','login','-u',reguser,'-p',regpas,'--email="'+regmail+'"','https://'+registry]
out1,out2=executer(cmd)
try:
request = flask.request.form['lesn']
request = ast.literal_eval(request)
lesn = request[0]
cont = request[1]
#info = cont['comps'][image]
flag = 1
except:
request = flask.request.form['comp']
request = ast.literal_eval(request)
image = request[0]
cont = request[1]
info = cont['comps'][image]
#download just the component image from the registry server in a thread
cmd = ['docker','pull',registry+'/'+image]
t = threading.Thread(name='child procs', target=downloader, args=[cmd,image,info])
t.daemon = True
t.start()
#return to back to web page
return flask.redirect(flask.url_for('repo'))
#add code if lesson is to be saved under a new name
new_lsn = lesn
#add lesson to the download list for lessons
dld_lsn.append(lesn)
#print lesn,'\n', cont
new_cont = []
for comp in cont['index'][lesn].split()[1:]:
print "loop main",comp
image1 = comp.replace(lesn,new_lsn)
image = image1.replace('-','/')
new_cont.append(image1)
#download image from the registry server in a thread
cmd = ['docker','pull',registry+'/'+image]
info = cont['comps'][image]
t = threading.Thread(name='child procs', target=downloader, args=[cmd,image,info])
t.daemon = True
t.start()
#get description from POST and other attributes required for the lesson
desc = cont['lesns'][lesn]
line = new_lsn+' '+' '.join(new_cont)
index = new_cont
t = threading.Thread(name='child procs', target=add_lesson, args=[lesn,new_lsn,index,line,desc])
t.daemon = True
t.start()
return flask.redirect(flask.url_for('repo'))
class Home(flask.views.MethodView):
@login_required
def get(self):
global loc
#index2 {'lesson1': {'status': 'Y', 'comps': {'lesson1/comp1': {'status': ['Y'], 'index': ['lesson1/comp1', 'latest', '252f198a8beb', 'ago 380MB', 'Y', []], 'desc': ['Web Server', 'LAMP server hosting a PHP webpage.']}}, 'desc': ['SQL Injection to Shell II', 'This exercise explains how you can, from a blind SQL injection, gain access to the administration console. Then once in the administration console, how you can run commands on the system. ']}}
#check if all files are available
filechecker()
#---------------------------------
#check for status of containers
cmd = ['docker', 'ps', '-a']
out1, out2 = executer(cmd)
index3={}
index4=[]
tag = ""
if out1:
temp2=[]
temp3=[]
flag=0
for line in out1.splitlines():
if 'lesson' in line:
var1=line.split()
if var1[var1.index('ago')+1] == 'Up':
index3[var1[-1]]=[var1[1],'Y']
else:
index3[var1[-1]]=[var1[1],'S']
index4.append(var1[-1])
print "Home",index3,index4
index1={}
temp2=[]
#check downloaded images
cmd = ['docker', 'images']
out1, out2 = executer(cmd)
for line in out1.splitlines():
temp3 = []
flags = []
temp = line.split()
if line.startswith('lesson'):
status=''
#555 history command no longer gives you image id of intermediate containers
cmd = ["docker","history","--no-trunc",temp[0]]
temp2=executer(cmd)
image = []
flags = 0
for step in temp2[0].splitlines():
if '"@STEP@' in step:
step = step.split()
image = step[0][0:3]
temp1=[]
try:
temp1=index3[temp[0].replace('/','-')]
if image == temp1[0] :
#print temp1
flags=temp1[1]
else:
temp1=['','']
except:
temp1=['','']
temp3.append([image,temp1[1],' '.join(step[step.index('"@STEP@')+1:-2])[:-1]])
if image:
temp[2]=image
if not flags:
try:
flags=index3[temp[0].replace('/','-')][1]
except:
flags='N'
index1[temp[0]]=[temp[0],temp[1],temp[2],' '.join(temp[-2:]),flags,temp3[::-1]]
print "index",index1
temp=[]
index2={}
fname=loc+'static/configs/lesson.txt'
with open(fname) as f:
temp=f.read().splitlines()
for item in temp:
count1 = count2 = 0
item = item.split()
index2[item[0]]={}
if True:
#check files and add the lesson title and description
try:
fbuf=[]
fname=loc+'static/lessons/'+item[0]
with open(fname) as f:
fbuf=f.read().splitlines()
index2[item[0]]['desc']=[fbuf[0],''.join(fbuf[1:])]
except:
index2[item[0]]['desc']=['','']
index2[item[0]]['comps']={}
index2[item[0]]['status']=''
#print item,index2
for key in item[1:]:
#check files and add the component title and description
print "--",key
try:
fbuf=[]
fname='static/lessons/'+key
with open(fname) as f:
fbuf=f.read().splitlines()
comp_desc = [fbuf[0],''.join(fbuf[1:])]
except:
comp_desc = ['','']
ip = 'n/a'
try:
temp3=index1[key.replace('-','/')]
if temp3[4]=='Y':
cmd = ['docker','inspect','--format','{{ .NetworkSettings.IPAddress}}',key]
ip,err = executer(cmd)
ip = ip.rstrip()
count1+=1
elif temp3[4]=='N':
count2+=1
except:
temp3=[]
index2[item[0]]['comps'][key.replace('-','/')]={'index':temp3,'desc':comp_desc,'status':[temp3[4]],'network':[ip]}
#print key,comp_desc,temp3
#print index2
print item[1:],count1,count2
if count1 == len(item[1:]):
index2[item[0]]['status']='Y'
elif count2 == len(item[1:]) :
index2[item[0]]['status']='N'
else:
index2[item[0]]['status']='S'
#print "new"
#print index3
#print index1
print "index2",index2
flask.flash(index2,'lesson')
return flask.render_template('home.html')
@login_required
def post(self):
request = flask.request.form
result = {}
temp1 = []
temp2 = []
print request
try:
if request['start-all']:
print request['start-all']
try:
temp=ast.literal_eval(request['start-all'])
targets=temp.keys()
except:
pass
print targets
for cont in targets:
image = temp[cont]['index'][2]
print "starting container ",cont,image
cmd = ['docker', 'run', '-Pitd', '--name='+cont.replace('/','-'), image]
out1, out2 = executer(cmd)
print "out-",cont,out2
except:
try:
if request['stop-all']:
try:
temp=ast.literal_eval(request['stop-all'])
request=temp.keys()
except:
request=[request['stop-all']]
print "stop all containers ",request
for cont in request:
cont = cont.replace('/','-')
print "stopping container "+cont
cmd = ['docker', 'stop', cont]
out1, out2 = executer(cmd)
except:
try:
if request['reset-all']:
try:
conts = ast.literal_eval(request['reset-all'])
targets = conts.keys()
except:
targets = [request['reset-all']]
for cont in targets:
print | |
<gh_stars>0
import sys
import time
import stat
from typing import Any
import random
import subprocess
import glob
import os
import pandas as pd # type: ignore
from pathlib import Path
from typing import List
from sys import platform
import pathlib
import shutil
import traceback
from pylpg.lpgdata import *
from pylpg.lpgpythonbindings import *
def execute_lpg_tsib(
year: int,
number_of_households: int,
number_of_people_per_household: int,
startdate: str = None,
enddate: str = None,
transportation: bool = False,
energy_intensity: str = "Random",
) -> pd.DataFrame:
lpe: LPGExecutor = LPGExecutor(1, False)
if number_of_households < 1:
print("too few households")
raise Exception("Need at least one household")
# basic default spec
request = lpe.make_default_lpg_settings(year)
# create random households
for idx in range(number_of_households):
hhd: HouseholdData = HouseholdData()
hhd.HouseholdDataSpecification = HouseholdDataSpecificationType.ByPersons
hhd.HouseholdDataPersonSpec = HouseholdDataPersonSpecification()
hhd.HouseholdDataPersonSpec.Persons = []
hhd.ChargingStationSet = (
ChargingStationSets.Charging_At_Home_with_03_7_kW_output_results_to_Car_Electricity
)
hhd.TravelRouteSet = (
TravelRouteSets.Travel_Route_Set_for_30km_Commuting_Distance
)
hhd.TransportationDeviceSet = TransportationDeviceSets.Bus_and_two_30_km_h_Cars
hhd.HouseholdDataPersonSpec.Persons = make_reasonable_family(
number_of_people_per_household
)
request.House.Households.append(hhd)
# set more parameters
if request.CalcSpec is None:
raise Exception("Failed to initialize the calculation spec")
if startdate is not None:
request.CalcSpec.set_StartDate(startdate)
if enddate is not None:
request.CalcSpec.set_EndDate(enddate)
request.CalcSpec.EnergyIntensityType = energy_intensity
calcspecfilename = Path(lpe.calculation_directory, "calcspec.json")
if transportation:
request.CalcSpec.EnableTransportation = True
request.CalcSpec.CalcOptions.append(CalcOption.TansportationDeviceJsons)
# write to json
with open(calcspecfilename, "w") as calcspecfile:
jsonrequest = request.to_json(indent=4) # type: ignore
calcspecfile.write(jsonrequest)
# execute
lpe.execute_lpg_binaries()
# read the results and return as dataframe
return lpe.read_all_json_results_in_directory()
def make_reasonable_family(person_count: int):
previousage = 0
persons = []
g = 0
for person_idx in range(person_count):
if person_idx == 0: # first is an adult
age = random.randint(18, 100)
previousage = age
g = random.randint(0, 1)
elif person_idx == 1: # 2nd adult should be roughly similar age
diffage = random.randint(0, 10)
age = previousage - 5 + diffage
if g == 0:
g = 1
else:
g = 0
else: # other people are children
age = random.randint(0, 20)
if g == 0:
g = 1
else:
g = 0
if g == 0:
gender = Gender.Male
else:
gender = Gender.Female
pd = PersonData(age, gender)
persons.append(pd)
return persons
def execute_lpg_single_household(
year: int,
householdref: JsonReference,
housetype: str,
startdate: str = None,
enddate: str = None,
geographic_location: JsonReference = None,
simulate_transportation: bool = False,
chargingset: JsonReference = None,
transportation_device_set: JsonReference = None,
travel_route_set: JsonReference = None,
random_seed: int = None,
energy_intensity: str = "Random",
) -> pd.DataFrame:
lpe: LPGExecutor = LPGExecutor(1, False)
# basic request
request = lpe.make_default_lpg_settings(year)
if random_seed is not None:
request.CalcSpec.RandomSeed = random_seed
request.House.HouseTypeCode = housetype
hhnamespec = HouseholdNameSpecification(householdref)
hhn = HouseholdData(
None,
None,
hhnamespec,
"hhid",
"hhname",
chargingset,
transportation_device_set,
travel_route_set,
None,
HouseholdDataSpecificationType.ByHouseholdName,
)
request.House.Households.append(hhn)
if request.CalcSpec is None:
raise Exception("Failed to initialize the calculation spec")
if startdate is not None:
request.CalcSpec.set_StartDate(startdate)
if enddate is not None:
request.CalcSpec.set_EndDate(enddate)
request.CalcSpec.EnergyIntensityType = energy_intensity
calcspecfilename = Path(lpe.calculation_directory, "calcspec.json")
request.CalcSpec.GeographicLocation = geographic_location
if simulate_transportation:
request.CalcSpec.EnableTransportation = True
request.CalcSpec.CalcOptions.append(CalcOption.TansportationDeviceJsons)
with open(calcspecfilename, "w") as calcspecfile:
jsonrequest = request.to_json(indent=4) # type: ignore
calcspecfile.write(jsonrequest)
lpe.execute_lpg_binaries()
return lpe.read_all_json_results_in_directory()
def execute_lpg_with_householdata(
year: int,
householddata: HouseholdData,
housetype: str,
startdate: str = None,
enddate: str = None,
simulate_transportation: bool = False,
target_heating_demand: Optional[float] = None,
target_cooling_demand: Optional[float] = None,
calculation_index: int = 1,
clear_previous_calc: bool = False,
random_seed: int = None,
energy_intensity: str = "Random",
):
try:
print(
"Starting calc with "
+ str(calculation_index)
+ " for "
+ householddata.Name
)
lpe: LPGExecutor = LPGExecutor(calculation_index, clear_previous_calc)
# basic request
request = lpe.make_default_lpg_settings(year)
request.House.HouseTypeCode = housetype
if random_seed is not None:
request.CalcSpec.RandomSeed = random_seed
if target_heating_demand is not None:
request.House.TargetHeatDemand = target_heating_demand
if target_cooling_demand is not None:
request.House.TargetCoolingDemand = target_cooling_demand
request.House.Households.append(householddata)
if request.CalcSpec is None:
raise Exception("Failed to initialize the calculation spec")
if startdate is not None:
request.CalcSpec.set_StartDate(startdate)
if enddate is not None:
request.CalcSpec.set_EndDate(enddate)
request.CalcSpec.EnergyIntensityType = energy_intensity
calcspecfilename = Path(lpe.calculation_directory, "calcspec.json")
if simulate_transportation:
request.CalcSpec.EnableTransportation = True
request.CalcSpec.CalcOptions.append(CalcOption.TansportationDeviceJsons)
with open(calcspecfilename, "w") as calcspecfile:
jsonrequest = request.to_json(indent=4) # type: ignore
calcspecfile.write(jsonrequest)
lpe.execute_lpg_binaries()
df = lpe.read_all_json_results_in_directory()
return df
except OSError as why:
print("Exception: " + str(why))
traceback.print_stack()
raise
except: # catch *all* exceptions
e = sys.exc_info()[0]
print("Exception: " + str(e))
traceback.print_stack()
raise
def execute_lpg_with_many_householdata(
year: int,
householddata: List[HouseholdData],
housetype: str,
startdate: str = None,
enddate: str = None,
simulate_transportation: bool = False,
target_heating_demand: Optional[float] = None,
target_cooling_demand: Optional[float] = None,
calculation_index: int = 1,
clear_previous_calc: bool = False,
random_seed: int = None,
energy_intensity: str = "Random",
):
try:
print(
"Starting calc with "
+ str(calculation_index)
+ " for "
+ str(len(householddata))
+ " households"
)
lpe: LPGExecutor = LPGExecutor(calculation_index, clear_previous_calc)
# basic request
request = lpe.make_default_lpg_settings(year)
request.House.HouseTypeCode = housetype
if random_seed is not None:
request.CalcSpec.RandomSeed = random_seed
if target_heating_demand is not None:
request.House.TargetHeatDemand = target_heating_demand
if target_cooling_demand is not None:
request.House.TargetCoolingDemand = target_cooling_demand
request.House.Households = request.House.Households + householddata
if request.CalcSpec is None:
raise Exception("Failed to initialize the calculation spec")
if startdate is not None:
request.CalcSpec.set_StartDate(startdate)
if enddate is not None:
request.CalcSpec.set_EndDate(enddate)
request.CalcSpec.EnergyIntensityType = energy_intensity
calcspecfilename = Path(lpe.calculation_directory, "calcspec.json")
if simulate_transportation:
request.CalcSpec.EnableTransportation = True
request.CalcSpec.CalcOptions.append(CalcOption.TansportationDeviceJsons)
with open(calcspecfilename, "w") as calcspecfile:
jsonrequest = request.to_json(indent=4) # type: ignore
calcspecfile.write(jsonrequest)
lpe.execute_lpg_binaries()
df = lpe.read_all_json_results_in_directory()
return df
except OSError as why:
print("Exception: " + str(why))
traceback.print_stack()
raise
except: # catch *all* exceptions
e = sys.exc_info()[0]
print("Exception: " + str(e))
traceback.print_stack()
raise
def execute_lpg_with_householdata_with_csv_save(
year: int,
householddata: HouseholdData,
housetype: str,
startdate: str = None,
enddate: str = None,
simulate_transportation: bool = False,
target_heating_demand: Optional[float] = None,
target_cooling_demand: Optional[float] = None,
calculation_index: int = 1,
):
try:
df = execute_lpg_with_householdata(
year,
householddata,
housetype,
startdate,
enddate,
simulate_transportation,
target_heating_demand,
target_cooling_demand,
calculation_index,
True,
)
df_electricity = df["Electricity_HH1"]
df_electricity.to_csv("R" + str(calculation_index) + ".csv")
calcdir = "C" + str(calculation_index)
if os.path.exists(calcdir):
print("cleaning up " + calcdir)
shutil.rmtree(calcdir)
time.sleep(1)
except OSError as why:
print("Exception: " + str(why))
traceback.print_stack()
raise
except: # catch *all* exceptions
e = sys.exc_info()[0]
print("Exception: " + str(e))
traceback.print_stack()
raise
def execute_grid_calc(
year: int,
household_size_list: List[int],
housetype: str,
startdate: str = None,
enddate: str = None,
simulate_transportation: bool = False,
chargingset: JsonReference = None,
transportation_device_set: JsonReference = None,
travel_route_set: JsonReference = None,
) -> pd.DataFrame:
lpe: LPGExecutor = LPGExecutor(1, True)
# basic request
request = lpe.make_default_lpg_settings(year)
request.CalcSpec.CalcOptions.clear()
request.CalcSpec.CalcOptions.append(CalcOption.JsonHouseSumFiles)
if len(household_size_list) < 1:
raise Exception("need at least one household.")
request.House.HouseTypeCode = housetype
count = 1
for hs in household_size_list:
hhdps: HouseholdDataPersonSpecification = HouseholdDataPersonSpecification(
make_reasonable_family(hs)
)
hhn = HouseholdData(
hhdps,
None,
None,
"hhid",
"hhname" + str(count),
chargingset,
transportation_device_set,
travel_route_set,
None,
HouseholdDataSpecificationType.ByPersons,
)
request.House.Households.append(hhn)
count = count + 1
if request.CalcSpec is None:
raise Exception("Failed to initialize the calculation spec")
if startdate is not None:
request.CalcSpec.set_StartDate(startdate)
if enddate is not None:
request.CalcSpec.set_EndDate(enddate)
calcspecfilename = Path(lpe.calculation_directory, "calcspec.json")
if simulate_transportation:
request.CalcSpec.EnableTransportation = True
request.CalcSpec.CalcOptions.append(CalcOption.TansportationDeviceJsons)
with open(calcspecfilename, "w") as calcspecfile:
jsonrequest = request.to_json(indent=4) # type: ignore
calcspecfile.write(jsonrequest)
lpe.execute_lpg_binaries()
return lpe.read_all_json_results_in_directory()
class LPGExecutor:
def __init__(self, calcidx: int, clear_previous_calc: bool):
version = "_"
self.working_directory = pathlib.Path(__file__).parent.absolute()
if platform == "linux" or platform == "linux2":
self.calculation_src_directory = Path(
self.working_directory, "LPG" + version + "linux"
)
self.simengine_src_filename = "simengine2"
fullname = Path(self.calculation_src_directory, self.simengine_src_filename)
print("starting to execute " + str(fullname))
os.chmod(str(fullname), stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
print("Permissions:" + str(oct(os.stat(str(fullname))[stat.ST_MODE])[-3:]))
elif platform == "win32":
self.calculation_src_directory = Path(
self.working_directory, "LPG" + version + "win"
)
self.simengine_src_filename = "simulationengine.exe"
else:
raise Exception("unknown operating system detected: " + platform)
self.calculation_directory = Path(self.working_directory, "C" + str(calcidx))
print("Working in directory: " + str(self.calculation_directory))
if clear_previous_calc and os.path.exists(self.calculation_directory):
self.error_tolerating_directory_clean(self.calculation_directory)
print("Removing " + str(self.calculation_directory))
shutil.rmtree(self.calculation_directory)
time.sleep(1)
if not os.path.exists(self.calculation_directory):
print(
"copying from "
+ str(self.calculation_src_directory)
+ " to "
+ str(self.calculation_directory)
)
shutil.copytree(self.calculation_src_directory, self.calculation_directory)
print("copied to: " + str(self.calculation_directory))
def error_tolerating_directory_clean(self, path: str):
mypath = str(path)
if len(str(mypath)) < 10:
raise Exception(
"Path too short. This is suspicious. Trying to delete more than you meant to?"
)
print("cleaning " + mypath)
files = glob.glob(mypath + "/*", recursive=True)
for file in files:
if os.path.isfile(file):
print("Removing " + file)
os.remove(file)
def execute_lpg_binaries(self) -> Any:
# execute LPG
pathname = Path(self.calculation_directory, self.simengine_src_filename)
print("executing in " + str(self.calculation_directory))
subprocess.run(
[str(pathname), "processhousejob", "-j", "calcspec.json"],
cwd=str(self.calculation_directory),
)
def make_default_lpg_settings(self, year: int) -> HouseCreationAndCalculationJob:
print("Creating")
hj = HouseCreationAndCalculationJob()
hj.set_Scenario("S1").set_Year(str(year)).set_DistrictName("district")
hd = HouseData()
hj.House = hd
hd.Name = "House"
hd.HouseGuid = StrGuid("houseguid")
hd.HouseTypeCode = (
HouseTypes.HT01_House_with_a_10kWh_Battery_and_a_fuel_cell_battery_charger_5_MWh_yearly_space_heating_gas_heating
)
hd.TargetCoolingDemand = 10000
hd.TargetHeatDemand = 0
hd.Households = []
cs: JsonCalcSpecification = JsonCalcSpecification()
hj.CalcSpec = cs
cs.IgnorePreviousActivitiesWhenNeeded = True
cs.LoadTypePriority = LoadTypePriority.All
cs.DefaultForOutputFiles | |
# 1972 article
n = int(n)
d = len(A)
if len(set(a%d for a in A)) == d:
return [i*d for i in range(n//d)]
# next, we consider an exhaustive search
from sage.combinat.dlx import DLXMatrix
rows = []
for i in range(n):
rows.append([i+1, [(i+a)%n+1 for a in A]])
M = DLXMatrix(rows)
for c in M:
return [i-1 for i in c]
def one_radical_difference_family(K, k):
r"""
Search for a radical difference family on ``K`` using dancing links
algorithm.
For the definition of radical difference family, see
:func:`radical_difference_family`. Here, we consider only radical difference
family with `\lambda = 1`.
INPUT:
- ``K`` -- a finite field of cardinality `q`.
- ``k`` -- a positive integer so that `k(k-1)` divides `q-1`.
OUTPUT:
Either a difference family or ``None`` if it does not exist.
ALGORITHM:
The existence of a radical difference family is equivalent to a one
dimensional tiling (or packing) problem in a cyclic group. This subsequent
problem is solved by a call to the function :func:`one_cyclic_tiling`.
Let `K^*` be the multiplicative group of the finite field `K`. A radical
family has the form `\mathcal B = \{x_1 B, \ldots, x_k B\}`, where
`B=\{x:x^{k}=1\}` (for `k` odd) or `B=\{x:x^{k-1}=1\}\cup \{0\}` (for
`k` even). Equivalently, `K^*` decomposes as:
.. MATH::
K^* = \Delta (x_1 B) \cup ... \cup \Delta (x_k B) = x_1 \Delta B \cup ... \cup x_k \Delta B
We observe that `C=B\backslash 0` is a subgroup of the (cyclic) group
`K^*`, that can thus be generated by some element `r`. Furthermore, we
observe that `\Delta B` is always a union of cosets of `\pm C` (which is
twice larger than `C`).
.. MATH::
\begin{array}{llll}
(k\text{ odd} ) & \Delta B &= \{r^i-r^j:r^i\neq r^j\} &= \pm C\cdot \{r^i-1: 0 < i \leq m\}\\
(k\text{ even}) & \Delta B &= \{r^i-r^j:r^i\neq r^j\}\cup C &= \pm C\cdot \{r^i-1: 0 < i < m\}\cup \pm C
\end{array}
where
.. MATH::
(k\text{ odd})\ m = (k-1)/2 \quad \text{and} \quad (k\text{ even})\ m = k/2.
Consequently, `\mathcal B = \{x_1 B, \ldots, x_k B\}` is a radical
difference family if and only if `\{x_1 (\Delta B/(\pm C)), \ldots, x_k
(\Delta B/(\pm C))\}` is a partition of the cyclic group `K^*/(\pm C)`.
EXAMPLES::
sage: from sage.combinat.designs.difference_family import (
....: one_radical_difference_family,
....: is_difference_family)
sage: one_radical_difference_family(GF(13),4)
[[0, 1, 3, 9]]
The parameters that appear in [Bu95]_::
sage: df = one_radical_difference_family(GF(449), 8); df
[[0, 1, 18, 25, 176, 324, 359, 444],
[0, 9, 88, 162, 222, 225, 237, 404],
[0, 11, 140, 198, 275, 357, 394, 421],
[0, 40, 102, 249, 271, 305, 388, 441],
[0, 49, 80, 93, 161, 204, 327, 433],
[0, 70, 99, 197, 230, 362, 403, 435],
[0, 121, 141, 193, 293, 331, 335, 382],
[0, 191, 285, 295, 321, 371, 390, 392]]
sage: is_difference_family(GF(449), df, 449, 8, 1)
True
"""
q = K.cardinality()
x = K.multiplicative_generator()
e = k*(k-1)
if q%e != 1:
raise ValueError("q%e is not 1")
# We define A by (see the function's documentation):
# ΔB = C.A
if k%2 == 1:
m = (k-1) // 2
r = x ** ((q-1) // k) # k-th root of unity
A = [r**i - 1 for i in range(1,m+1)]
else:
m = k // 2
r = x ** ((q-1) // (k-1)) # (k-1)-th root of unity
A = [r**i - 1 for i in range(1,m)]
A.append(K.one())
# instead of the complicated multiplicative group K^*/(±C) we use the
# discrete logarithm to convert everything into the additive group Z/cZ
c = m * (q-1) // e # cardinal of ±C
from sage.groups.generic import discrete_log
logA = [discrete_log(a,x)%c for a in A]
# if two elements of A are equal modulo c then no tiling is possible
if len(set(logA)) != m:
return None
# brute force
tiling = one_cyclic_tiling(logA, c)
if tiling is None:
return None
D = K.cyclotomic_cosets(r, [x**i for i in tiling])
if k%2 == 0:
for d in D:
d.insert(K.zero(),0)
return D
def radical_difference_family(K, k, l=1, existence=False, check=True):
r"""
Return a ``(v,k,l)``-radical difference family.
Let fix an integer `k` and a prime power `q = t k(k-1) + 1`. Let `K` be a
field of cardinality `q`. A `(q,k,1)`-difference family is *radical* if
its base blocks are either: a coset of the `k`-th root of unity for `k` odd
or a coset of `k-1`-th root of unity and `0` if `k` is even (the number `t`
is the number of blocks of that difference family).
The terminology comes from M. Buratti article [Bu95]_ but the first
constructions go back to <NAME> [Wi72]_.
INPUT:
- ``K`` - a finite field
- ``k`` -- positive integer, the size of the blocks
- ``l`` -- the `\lambda` parameter (default to `1`)
- ``existence`` -- if ``True``, then return either ``True`` if Sage knows
how to build such design, ``Unknown`` if it does not and ``False`` if it
knows that the design does not exist.
- ``check`` -- boolean (default: ``True``). If ``True`` then the result of
the computation is checked before being returned. This should not be
needed but ensures that the output is correct.
EXAMPLES::
sage: from sage.combinat.designs.difference_family import radical_difference_family
sage: radical_difference_family(GF(73),9)
[[1, 2, 4, 8, 16, 32, 37, 55, 64]]
sage: radical_difference_family(GF(281),5)
[[1, 86, 90, 153, 232],
[4, 50, 63, 79, 85],
[5, 36, 149, 169, 203],
[7, 40, 68, 219, 228],
[9, 121, 212, 248, 253],
[29, 81, 222, 246, 265],
[31, 137, 167, 247, 261],
[32, 70, 118, 119, 223],
[39, 56, 66, 138, 263],
[43, 45, 116, 141, 217],
[98, 101, 109, 256, 279],
[106, 124, 145, 201, 267],
[111, 123, 155, 181, 273],
[156, 209, 224, 264, 271]]
sage: for k in range(5,10):
....: print("k = {}".format(k))
....: list_q = []
....: for q in range(k*(k-1)+1, 2000, k*(k-1)):
....: if is_prime_power(q):
....: K = GF(q,'a')
....: if radical_difference_family(K, k, existence=True):
....: list_q.append(q)
....: _ = radical_difference_family(K,k)
....: print(" ".join([str(p) for p in list_q]))
k = 5
41 61 81 241 281 401 421 601 641 661 701 761 821 881 1181 1201 1301 1321
1361 1381 1481 1601 1681 1801 1901
k = 6
181 211 241 631 691 1531 1831 1861
k = 7
337 421 463 883 1723
k = 8
449 1009
k = 9
73 1153 1873
"""
v = K.cardinality()
x = K.multiplicative_generator()
one = K.one()
e = k*(k-1)
if (l*(v-1)) % e:
raise ValueError("k (k-1) = {} should be a multiple of l (v-1) ={}".format(
k*(k-1), l*(v-1)))
t = l*(v-1) // e # number of blocks
if t == 1:
return radical_difference_set(K, k, l, existence=existence, check=check)
elif l == (k-1):
if existence:
return True
else:
return K.cyclotomic_cosets(x**((v-1)//k))[1:]
# all the other cases below concern the case l == 1
elif l != 1:
if existence:
return Unknown
raise NotImplementedError("No radical families implemented for l > 2")
else:
D = one_radical_difference_family(K,k)
if D is None:
if existence:
return False
raise EmptySetError("No such difference family")
elif existence:
return True
if check and not is_difference_family(K, D, v, k, l):
raise RuntimeError("radical_difference_family produced a wrong "
"difference family with parameters v={}, "
"k={}, l={}. Please contact "
"<EMAIL>".format(v,k,l))
return D
def twin_prime_powers_difference_set(p, check=True):
r"""
Return a difference set on `GF(p) \times GF(p+2)`.
The difference set is built from the following element of the Cartesian
product of finite fields `GF(p) \times GF(p+2)`:
- `(x,0)` with any `x`
- `(x,y)` with `x` and `y` squares
- `(x,y)` with `x` and `y` non-squares
For more information see :wikipedia:`Difference_set`.
INPUT:
- ``check`` -- boolean (default: ``True``). If ``True`` then the result of
the computation is checked before being returned. This should not be
needed but ensures that the output is correct.
EXAMPLES::
sage: from sage.combinat.designs.difference_family import twin_prime_powers_difference_set
sage: G,D = twin_prime_powers_difference_set(3)
sage: G
The Cartesian product of (Finite Field of size 3, Finite Field of | |
isinstance(o.rvalue, CallExpr):
call_expr = o.rvalue
if self._IsInstantiation(call_expr):
temp_name = 'gobj%d' % self.unique_id
self.unique_id += 1
self.log('INSTANCE lval %s rval %s', lval, call_expr)
self.write('%s %s', call_expr.callee.name, temp_name)
# C c;, not C c(); which is most vexing parse
if call_expr.args:
self._WriteArgList(call_expr)
self.write(';\n')
self.write('%s %s = &%s;', get_c_type(lval_type), lval.name,
temp_name)
return
# src = cast(source__SourcedFile, src)
# -> source__SourcedFile* src = static_cast<source__SourcedFile>(src)
if isinstance(o.rvalue, CallExpr) and o.rvalue.callee.name == 'cast':
assert isinstance(lval, NameExpr)
call = o.rvalue
type_expr = call.args[0]
subtype_name = _GetCTypeForCast(type_expr)
cast_kind = _GetCastKind(self.module_path, subtype_name)
# HACK: Distinguish between UP cast and DOWN cast.
# osh/cmd_parse.py _MakeAssignPair does an UP cast within branches.
# _t is the base type, so that means it's an upcast.
if isinstance(type_expr, NameExpr) and type_expr.name.endswith('_t'):
if self.decl:
self.local_var_list.append((lval.name, subtype_name))
self.write_ind(
'%s = %s<%s>(', lval.name, cast_kind, subtype_name)
else:
self.write_ind(
'%s %s = %s<%s>(', subtype_name, lval.name, cast_kind,
subtype_name)
self.accept(call.args[1]) # variable being casted
self.write(');\n')
return
if isinstance(lval, NameExpr):
if lval.name == '_': # Skip _ = log
return
lval_type = self.types[lval]
#c_type = get_c_type(lval_type, local=self.indent != 0)
c_type = get_c_type(lval_type)
# for "hoisting" to the top of the function
if self.in_func_body:
self.write_ind('%s = ', lval.name)
if self.decl:
self.local_var_list.append((lval.name, c_type))
else:
# globals always get a type -- they're not mutated
self.write_ind('%s %s = ', c_type, lval.name)
# Special case for list comprehensions. Note that a variable has to
# be on the LHS, so we can append to it.
#
# y = [i+1 for i in x[1:] if i]
# =>
# y = []
# for i in x[1:]:
# if i:
# y.append(i+1)
# (but in C++)
if isinstance(o.rvalue, ListComprehension):
gen = o.rvalue.generator # GeneratorExpr
left_expr = gen.left_expr
index_expr = gen.indices[0]
seq = gen.sequences[0]
cond = gen.condlists[0] # TODO: not used!
# Write empty container as initialization.
assert c_type.endswith('*'), c_type # Hack
self.write('Alloc<%s>();\n' % c_type[:-1])
over_type = self.types[seq]
#self.log(' iterating over type %s', over_type)
if over_type.type.fullname() == 'builtins.list':
c_type = get_c_type(over_type)
assert c_type.endswith('*'), c_type
c_iter_type = c_type.replace('List', 'ListIter', 1)[:-1] # remove *
else:
# Example: assoc == Optional[Dict[str, str]]
c_iter_type = 'TODO_ASSOC'
self.write_ind('for (%s it(', c_iter_type)
self.accept(seq)
self.write('); !it.Done(); it.Next()) {\n')
seq_type = self.types[seq]
item_type = seq_type.args[0] # get 'int' from 'List<int>'
if isinstance(item_type, Instance):
self.write_ind(' %s ', get_c_type(item_type))
self.accept(index_expr)
self.write(' = it.Value();\n')
elif isinstance(item_type, TupleType): # for x, y in pairs
c_item_type = get_c_type(item_type)
if isinstance(index_expr, TupleExpr):
temp_name = 'tup%d' % self.unique_id
self.unique_id += 1
self.write_ind(' %s %s = it.Value();\n', c_item_type, temp_name)
self.indent += 1
self._write_tuple_unpacking(
temp_name, index_expr.items, item_type.items)
self.indent -= 1
else:
raise AssertionError()
else:
raise AssertionError('Unexpected type %s' % item_type)
if cond:
self.indent += 1
self.write_ind('if (')
self.accept(cond[0]) # Just the first one
self.write(') {\n')
self.write_ind(' %s->append(', lval.name)
self.accept(left_expr)
self.write(');\n')
if cond:
self.write_ind('}\n')
self.indent -= 1
self.write_ind('}\n')
return
self.accept(o.rvalue)
self.write(';\n')
elif isinstance(lval, MemberExpr):
self.write_ind('')
self.accept(lval)
self.write(' = ')
self.accept(o.rvalue)
self.write(';\n')
# Collect statements that look like self.foo = 1
if isinstance(lval.expr, NameExpr) and lval.expr.name == 'self':
#log(' lval.name %s', lval.name)
lval_type = self.types[lval]
self.member_vars[lval.name] = lval_type
elif isinstance(lval, IndexExpr): # a[x] = 1
# d->set(x, 1) for both List and Dict
self.write_ind('')
self.accept(lval.base)
self.write('->set(')
self.accept(lval.index)
self.write(', ')
self.accept(o.rvalue)
self.write(');\n')
elif isinstance(lval, TupleExpr):
# An assignment to an n-tuple turns into n+1 statements. Example:
#
# x, y = mytuple
#
# Tuple2<int, Str*> tup1 = mytuple
# int x = tup1->at0()
# Str* y = tup1->at1()
rvalue_type = self.types[o.rvalue]
c_type = get_c_type(rvalue_type)
is_return = isinstance(o.rvalue, CallExpr)
if is_return:
assert c_type.endswith('*')
c_type = c_type[:-1]
temp_name = 'tup%d' % self.unique_id
self.unique_id += 1
self.write_ind('%s %s = ', c_type, temp_name)
self.accept(o.rvalue)
self.write(';\n')
self._write_tuple_unpacking(temp_name, lval.items, rvalue_type.items,
is_return=is_return)
else:
raise AssertionError(lval)
def _write_body(self, body):
"""Write a block without the { }."""
for stmt in body:
# Ignore things that look like docstrings
if isinstance(stmt, ExpressionStmt) and isinstance(stmt.expr, StrExpr):
continue
#log('-- %d', self.indent)
self.accept(stmt)
def visit_for_stmt(self, o: 'mypy.nodes.ForStmt') -> T:
if 0:
self.log('ForStmt')
self.log(' index_type %s', o.index_type)
self.log(' inferred_item_type %s', o.inferred_item_type)
self.log(' inferred_iterator_type %s', o.inferred_iterator_type)
func_name = None # does the loop look like 'for x in func():' ?
if isinstance(o.expr, CallExpr) and isinstance(o.expr.callee, NameExpr):
func_name = o.expr.callee.name
# special case: 'for i in xrange(3)'
if func_name == 'xrange':
index_name = o.index.name
args = o.expr.args
num_args = len(args)
if num_args == 1: # xrange(end)
self.write_ind('for (int %s = 0; %s < ', index_name, index_name)
self.accept(args[0])
self.write('; ++%s) ', index_name)
elif num_args == 2: # xrange(being, end)
self.write_ind('for (int %s = ', index_name)
self.accept(args[0])
self.write('; %s < ', index_name)
self.accept(args[1])
self.write('; ++%s) ', index_name)
elif num_args == 3: # xrange(being, end, step)
# Special case to detect a constant -1. This is a static
# heuristic, because it could be negative dynamically. TODO:
# mylib.reverse_xrange() or something?
step = args[2]
if isinstance(step, UnaryExpr) and step.op == '-':
comparison_op = '>'
else:
comparison_op = '<'
self.write_ind('for (int %s = ', index_name)
self.accept(args[0])
self.write('; %s %s ', index_name, comparison_op)
self.accept(args[1])
self.write('; %s += ', index_name)
self.accept(step)
self.write(') ')
else:
raise AssertionError()
self.accept(o.body)
return
reverse = False
# for i, x in enumerate(...):
index0_name = None
if func_name == 'enumerate':
assert isinstance(o.index, TupleExpr), o.index
index0 = o.index.items[0]
assert isinstance(index0, NameExpr), index0
index0_name = index0.name # generate int i = 0; ; ++i
# type of 'x' in 'for i, x in enumerate(...)'
item_type = o.inferred_item_type.items[1]
index_expr = o.index.items[1]
# enumerate(mylist) turns into iteration over mylist with variable i
assert len(o.expr.args) == 1, o.expr.args
iterated_over = o.expr.args[0]
elif func_name == 'reversed':
# NOTE: enumerate() and reversed() can't be mixed yet. But you CAN
# reverse iter over tuples.
item_type = o.inferred_item_type
index_expr = o.index
args = o.expr.args
assert len(args) == 1, args
iterated_over = args[0]
reverse = True # use different iterate
elif func_name == 'iteritems':
item_type = o.inferred_item_type
index_expr = o.index
args = o.expr.args
assert len(args) == 1, args
# This should be a dict
iterated_over = args[0]
log('------------ ITERITEMS OVER %s', iterated_over)
else:
item_type = o.inferred_item_type
index_expr = o.index
iterated_over = o.expr
over_type = self.types[iterated_over]
#self.log(' iterating over type %s', over_type)
#self.log(' iterating over type %s', over_type.type.fullname())
over_dict = False
if over_type.type.fullname() == 'builtins.list':
c_type = get_c_type(over_type)
assert c_type.endswith('*'), c_type
c_iter_type = c_type.replace('List', 'ListIter', 1)[:-1] # remove *
# ReverseListIter!
if reverse:
c_iter_type = 'Reverse' + c_iter_type
elif over_type.type.fullname() == 'builtins.dict':
# Iterator
c_type = get_c_type(over_type)
assert c_type.endswith('*'), c_type
c_iter_type = c_type.replace('Dict', 'DictIter', 1)[:-1] # remove *
over_dict = True
assert not reverse
elif over_type.type.fullname() == 'builtins.str':
c_iter_type = 'StrIter'
assert not reverse # can't reverse iterate over string yet
else: # assume it's like d.iteritems()? Iterator type
assert False, over_type
if index0_name:
# can't initialize two things in a for loop, so do it on a separate line
if self.decl:
self.local_var_list.append((index0_name, 'int'))
self.write_ind('%s = 0;\n', index0_name)
index_update = ', ++%s' % index0_name
else:
index_update = ''
self.write_ind('for (%s it(', c_iter_type)
self.accept(iterated_over) # the thing being iterated over
self.write('); !it.Done(); it.Next()%s) {\n', index_update)
# for x in it: ...
# for i, x in enumerate(pairs): ...
if isinstance(item_type, Instance) or index0_name:
c_item_type = get_c_type(item_type)
self.write_ind(' %s ', c_item_type)
self.accept(index_expr)
if over_dict:
self.write(' = it.Key();\n')
else:
self.write(' = it.Value();\n')
elif isinstance(item_type, TupleType): # for x, y in pairs
if over_dict:
assert isinstance(o.index, TupleExpr), o.index
index_items = o.index.items
assert len(index_items) == 2, index_items
assert len(item_type.items) == 2, item_type.items
key_type = get_c_type(item_type.items[0])
val_type = get_c_type(item_type.items[1])
self.write_ind(' %s %s = it.Key();\n', key_type, index_items[0].name)
self.write_ind(' %s %s = it.Value();\n', val_type, index_items[1].name)
else:
# Example:
# for (ListIter it(mylist); !it.Done(); it.Next()) {
# Tuple2<int, Str*> tup1 = it.Value();
# int i = tup1->at0();
# Str* s = tup1->at1();
# log("%d %s", i, s);
# }
c_item_type = get_c_type(item_type)
if isinstance(o.index, TupleExpr):
temp_name = 'tup%d' % self.unique_id
self.unique_id += 1
self.write_ind(' %s %s = it.Value();\n', c_item_type, temp_name)
self.indent += 1
self._write_tuple_unpacking(
temp_name, o.index.items, item_type.items)
self.indent -= 1
else:
self.write_ind(' %s %s = it.Value();\n', c_item_type, o.index.name)
else:
raise AssertionError('Unexpected type %s' % item_type)
# Copy | |
= Var(within=Reals,bounds=(0,1),initialize=0)
m.x598 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x599 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x600 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x601 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x602 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x603 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x604 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x605 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x606 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x607 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x608 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x609 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x610 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x611 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x612 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x613 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x614 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x615 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x616 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x617 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x618 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x619 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x620 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x621 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x622 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x623 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x624 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x625 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x626 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x627 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x628 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x629 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x630 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x631 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x632 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x633 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x634 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x635 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x636 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x637 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x638 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x639 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x640 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x641 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x642 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x643 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x644 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x645 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x646 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x647 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x648 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x649 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x650 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x651 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x652 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x653 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x654 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x655 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x656 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x657 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x658 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x659 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x660 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x661 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x662 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x663 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x664 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x665 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x666 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x667 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x668 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x669 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x670 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x671 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x672 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x673 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x674 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x675 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x676 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x677 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x678 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x679 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x680 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x681 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x682 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x683 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x684 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x685 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x686 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x687 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x688 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x689 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x690 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x691 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x692 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x693 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x694 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x695 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x696 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x697 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x698 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x699 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x700 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x701 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x702 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x703 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x704 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x705 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x706 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x707 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x708 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x709 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x710 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x711 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x712 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x713 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x714 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x715 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x716 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x717 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x718 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x719 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x720 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x721 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x722 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x723 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x724 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x725 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x726 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x727 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x728 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x729 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x730 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x731 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x732 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x733 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x734 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x735 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x736 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x737 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x738 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x739 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x740 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x741 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x742 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x743 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x744 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x745 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x746 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x747 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x748 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x749 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x750 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x751 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x752 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x753 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x754 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x755 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x756 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x757 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x758 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x759 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x760 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x761 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x762 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x763 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x764 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x765 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x766 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x767 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x768 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x769 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x770 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x771 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x772 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x773 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x774 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x775 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x776 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x777 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x778 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x779 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x780 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x781 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x782 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x783 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x784 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x785 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x786 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x787 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x788 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x789 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x790 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x791 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x792 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x793 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x794 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x795 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x796 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x797 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x798 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x799 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x800 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x801 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x802 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x803 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x804 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x805 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x806 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x807 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x808 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x809 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x810 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x811 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x812 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x813 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x814 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x815 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x816 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x817 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x818 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x819 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x820 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x821 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x822 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x823 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x824 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x825 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x826 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x827 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x828 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x829 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x830 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x831 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x832 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x833 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x834 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x835 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x836 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x837 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x838 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x839 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x840 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x841 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x842 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x843 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x844 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x845 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x846 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x847 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x848 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x849 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x850 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x851 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x852 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x853 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x854 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x855 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x856 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x857 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x858 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x859 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x860 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x861 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x862 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x863 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x864 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x865 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x866 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x867 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x868 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x869 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x870 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x871 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x872 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x873 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x874 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x875 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x876 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x877 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x878 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x879 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x880 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x881 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x882 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x883 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x884 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x885 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x886 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x887 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x888 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x889 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x890 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x891 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x892 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x893 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x894 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x895 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x896 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x897 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x898 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x899 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x900 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x901 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x902 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x903 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x904 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x905 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x906 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x907 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x908 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x909 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x910 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x911 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x912 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x913 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x914 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x915 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x916 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x917 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x918 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x919 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x920 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x921 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x922 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x923 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x924 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x925 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x926 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x927 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x928 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x929 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x930 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x931 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x932 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x933 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x934 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x935 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x936 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x937 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x938 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x939 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x940 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x941 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x942 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x943 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x944 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x945 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x946 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x947 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x948 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x949 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x950 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x951 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x952 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x953 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x954 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x955 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x956 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x957 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x958 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x959 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x960 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x961 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x962 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x963 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x964 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x965 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x966 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x967 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x968 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x969 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x970 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x971 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x972 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x973 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x974 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x975 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x976 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x977 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x978 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x979 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x980 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x981 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x982 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x983 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x984 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x985 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x986 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x987 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x988 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x989 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x990 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x991 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x992 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x993 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x994 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x995 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x996 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x997 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x998 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x999 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1000 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1001 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1002 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1003 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1004 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1005 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1006 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1007 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1008 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1009 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1010 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1011 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1012 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1013 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1014 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1015 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1016 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1017 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1018 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1019 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1020 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1021 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1022 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1023 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1024 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1025 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1026 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1027 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1028 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1029 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1030 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1031 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1032 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1033 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1034 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1035 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1036 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1037 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1038 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1039 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1040 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1041 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1042 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1043 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1044 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1045 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1046 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1047 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1048 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1049 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1050 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1051 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1052 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1053 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1054 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1055 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1056 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1057 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1058 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1059 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1060 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1061 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1062 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1063 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1064 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1065 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1066 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1067 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1068 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1069 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1070 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1071 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1072 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1073 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1074 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1075 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1076 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1077 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1078 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1079 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1080 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1081 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1082 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1083 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1084 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1085 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1086 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1087 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1088 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1089 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1090 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1091 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1092 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1093 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1094 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1095 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1096 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1097 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1098 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1099 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1100 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1101 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1102 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1103 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1104 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1105 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1106 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1107 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1108 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1109 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1110 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1111 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1112 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1113 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1114 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1115 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1116 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1117 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1118 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1119 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1120 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1121 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1122 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1123 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1124 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1125 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1126 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1127 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1128 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1129 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1130 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1131 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1132 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1133 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1134 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1135 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1136 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1137 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1138 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1139 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1140 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1141 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1142 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1143 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1144 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1145 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1146 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1147 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1148 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1149 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1150 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1151 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1152 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1153 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1154 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1155 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1156 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1157 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1158 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1159 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1160 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1161 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1162 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1163 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1164 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1165 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1166 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1167 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1168 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1169 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1170 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1171 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1172 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1173 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1174 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1175 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1176 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1177 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1178 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1179 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1180 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1181 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1182 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1183 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1184 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1185 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1186 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1187 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1188 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1189 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1190 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1191 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1192 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1193 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1194 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1195 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1196 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1197 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1198 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1199 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1200 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1201 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1202 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1203 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1204 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1205 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1206 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1207 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1208 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1209 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1210 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1211 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1212 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1213 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1214 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1215 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1216 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1217 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1218 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1219 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1220 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1221 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1222 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1223 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1224 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1225 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1226 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1227 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1228 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1229 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1230 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1231 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1232 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1233 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1234 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1235 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1236 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x1237 | |
from django.contrib import admin
from parler.admin import TranslatableAdmin
from django.utils.html import format_html
from django.forms import BaseInlineFormSet
from django.shortcuts import redirect
from django import forms
import data_wizard # Solution to data import madness that had refused to go
from django.conf import settings # allow import of projects settings at the root
from django.forms import TextInput,Textarea #customize textarea row and column size
from import_export.formats import base_formats
from .models import (StgFacilityType,StgFacilityServiceMeasureUnits,
StgFacilityOwnership,StgHealthFacility,StgServiceDomain,StgLocationCodes,
FacilityServiceAvailability,FacilityServiceAvailabilityProxy,
FacilityServiceProvision,StgFacilityServiceIntervention,
FacilityServiceReadiness,StgFacilityServiceAreas,
FacilityServiceProvisionProxy,FacilityServiceReadinesProxy)
from commoninfo.admin import OverideImportExport,OverideExport,OverideImport
# from publications.serializers import StgKnowledgeProductSerializer
from .resources import (StgFacilityResourceExport,FacilityTypeResourceExport,
FacilityServiceDomainResourceExport,StgFacilityServiceAvailabilityExport,
StgFacilityServiceCapacityExport,StgFacilityServiceReadinessExport,)
from regions.models import StgLocation,StgLocationCodes
from django_admin_listfilter_dropdown.filters import (
DropdownFilter, RelatedDropdownFilter, ChoiceDropdownFilter,
RelatedOnlyDropdownFilter) #custom
from import_export.admin import (ImportExportModelAdmin, ExportMixin,
ImportExportActionModelAdmin,ExportActionModelAdmin,)
from authentication.models import CustomUser, CustomGroup
from bootstrap_datepicker_plus import DatePickerInput # Nice date picker 06/03
from .filters import TranslatedFieldFilter #Danile solution to duplicate filters
#Methods used to register global actions performed on data. See actions listbox
def transition_to_pending (modeladmin, request, queryset):
queryset.update(comment = 'pending')
transition_to_pending.short_description = "Mark selected as Pending"
def transition_to_approved (modeladmin, request, queryset):
queryset.update (comment = 'approved')
transition_to_approved.short_description = "Mark selected as Approved"
def transition_to_rejected (modeladmin, request, queryset):
queryset.update (comment = 'rejected')
transition_to_rejected.short_description = "Mark selected as Rejected"
@admin.register(StgFacilityType)
class FacilityTypeAdmin(TranslatableAdmin,OverideExport):
from django.db import models
formfield_overrides = {
models.CharField: {'widget': TextInput(attrs={'size':'100'})},
models.TextField: {'widget': Textarea(attrs={'rows':3, 'cols':100})},
}
"""
Serge requested that a user does not see other users or groups data.
This method filters logged in users depending on group roles and permissions.
Only the superuser can see all users and locations data while a users
can only see data from registered location within his/her group/system role.
If a user is not assigned to a group, he/she can only own data - 01/02/2021
"""
def get_queryset(self, request):
language = request.LANGUAGE_CODE
qs = super().get_queryset(request).filter(
translations__language_code=language).order_by(
'translations__name').distinct()
# Get a query of groups the user belongs and flatten it to list object
groups = list(request.user.groups.values_list('user', flat=True))
user = request.user.id
user_location = request.user.location.location_id
db_locations = StgLocation.objects.all().order_by('location_id')
# Returns data for all the locations to the lowest location level
if request.user.is_superuser:
qs
# returns data for AFRO and member countries
elif user in groups and user_location==1:
qs_admin=db_locations.filter(
locationlevel__locationlevel_id__gte=1,
locationlevel__locationlevel_id__lte=2)
return qs
def get_export_resource_class(self):
return FacilityTypeResourceExport
fieldsets = (
('Health Facility Type', {
'fields':('name','shortname','description',) #afrocode may be null
}),
)
list_display=['name','code','shortname','description']
list_display_links =('code', 'name',)
search_fields = ('code','translations__name',) #display search field
list_per_page = 30 #limit records displayed on admin site to 15
exclude = ('date_created','date_lastupdated','code',)
@admin.register(StgFacilityOwnership)
class FacilityOwnership (TranslatableAdmin):
from django.db import models
formfield_overrides = {
models.CharField: {'widget': TextInput(attrs={'size':'100'})},
models.TextField: {'widget': Textarea(attrs={'rows':3, 'cols':100})},
}
"""
Serge requested that a user does not see other users or groups data.
This method filters logged in users depending on group roles and permissions.
Only the superuser can see all users and locations data while a users
can only see data from registered location within his/her group/system role.
If a user is not assigned to a group, he/she can only own data - 01/02/2021
"""
def get_queryset(self, request):
language = request.LANGUAGE_CODE
qs = super().get_queryset(request).filter(
translations__language_code=language).order_by(
'translations__name').distinct()
groups = list(request.user.groups.values_list('user', flat=True))
user = request.user.id
user_location = request.user.location.location_id
db_locations = StgLocation.objects.all().order_by('location_id')
# Returns data for all the locations to the lowest location level
if request.user.is_superuser:
qs
# returns data for AFRO and member countries
elif user in groups and user_location==1:
qs_admin=db_locations.filter(
locationlevel__locationlevel_id__gte=1,
locationlevel__locationlevel_id__lte=2)
return qs
def formfield_for_foreignkey(self, db_field, request =None, **kwargs):
qs = super().get_queryset(request)
groups = list(request.user.groups.values_list('user', flat=True))
user = request.user.username
countrycodes=StgLocationCodes.objects.values_list(
'country_code',flat=True)
# This queryset is used to load specific phone code for logged in user
if db_field.name == "location":
if request.user.is_superuser:
kwargs["queryset"] = StgLocationCodes.objects.all().order_by(
'location_id')
# Looks up for the location level upto the country level
else:
kwargs["queryset"] = StgLocationCodes.objects.filter(
location_id=request.user.location_id).order_by(
'location_id')
if db_field.name == "user":
kwargs["queryset"] = CustomUser.objects.filter(
username=user)
return super().formfield_for_foreignkey(db_field, request,**kwargs)
fieldsets = (
('Facility Ownership Details', {
'fields':('name','shortname','description','location',) #afrocode may be null
}),
('Logged Admin/Staff', {
'fields': ('user',)
}),
)
list_display=['name','code','shortname','description','location',]
list_select_related = ('location','user',)
list_display_links =('code', 'name',)
search_fields = ('code','translations__name','translations__shortname',) #display search field
list_per_page = 30 #limit records displayed on admin site to 15
exclude = ('date_created','date_lastupdated','code',)
class FacilityServiceAvailabilityProxyForm(forms.ModelForm):
class Meta:
model = FacilityServiceAvailability
fields = ('facility','domain','intervention','service','provided',
'specialunit','staff','infrastructure','supplies','date_assessed',)
widgets = {
'date_assessed': DatePickerInput(), # # default date-format %m/%d/%Y will be used
}
class FacilityServiceAvailabilityInline(admin.TabularInline):
"""
Serge requested that a user does not see other users or groups data.
This method filters logged in users depending on group roles and permissions.
Only the superuser can see all users and locations data while a users
can only see data from registered location within his/her group/system role.
If a user is not assigned to a group, he/she can only own data - 01/02/2021
"""
def get_queryset(self, request):
qs = super().get_queryset(request)
# Get a query of groups the user belongs and flatten it to list object
groups = list(request.user.groups.values_list('user', flat=True))
user = request.user.username
user_location = request.user.location.location_id
db_locations = StgLocation.objects.all().order_by('location_id')
# Returns data for all the locations to the lowest location level
if request.user.is_superuser:
qs
# returns data for AFRO and member countries
elif user in groups and user_location==1:
qs_admin=db_locations.filter(
locationlevel__locationlevel_id__gte=1,
locationlevel__locationlevel_id__lte=2)
# return data based on the location of the user logged/request location
elif user in groups and user_location>1:
qs=qs.filter(username=user)
return qs
"""
Serge requested that the form for data input be restricted to user's country.
Thus, this function is for filtering location to display country level.
The location is used to filter the dropdownlist based on the request
object's USER, If the user has superuser privileges or is a member of
AFRO-DataAdmins, he/she can enter data for all the AFRO member countries
otherwise, can only enter data for his/her country.=== modified 02/02/2021
"""
def formfield_for_foreignkey(self, db_field, request =None, **kwargs):
qs = super().get_queryset(request)
db_sevicedomains = StgServiceDomain.objects.all()
db_sevicesubdomains=db_sevicedomains.exclude(
parent_id__isnull=True).filter(category=1)
if db_field.name == "domain":
kwargs["queryset"]=db_sevicesubdomains
return super().formfield_for_foreignkey(db_field, request,**kwargs)
# form = FacilityServiceAvailabilityProxyForm #overrides the default model form
model = FacilityServiceAvailability
# formset = LimitModelFormset
extra = 1 # Used to control number of empty rows displayed.
list_select_related = ('facility','domain','intervention','service',)
fields = ('facility','domain','intervention','service','provided',
'specialunit','staff','infrastructure','supplies','date_assessed',)
class FacilityServiceCapacityInline(admin.TabularInline):
"""
Serge requested that a user does not see other users or groups data.
This method filters logged in users depending on group roles and permissions.
Only the superuser can see all users and locations data while a users
can only see data from registered location within his/her group/system role.
If a user is not assigned to a group, he/she can only own data - 01/02/2021
"""
def get_queryset(self, request):
qs = super().get_queryset(request)
# Get a query of groups the user belongs and flatten it to list object
groups = list(request.user.groups.values_list('user', flat=True))
user = request.user.username
user_location = request.user.location.location_id
db_locations = StgLocation.objects.all().order_by('location_id')
# Returns data for all the locations to the lowest location level
if request.user.is_superuser:
qs
# returns data for AFRO and member countries
elif user in groups and user_location==1:
qs_admin=db_locations.filter(
locationlevel__locationlevel_id__gte=1,
locationlevel__locationlevel_id__lte=2)
# return data based on the location of the user logged/request location
elif user in groups and user_location>1:
qs=qs.filter(username=user)
return qs
def formfield_for_foreignkey(self, db_field, request =None, **kwargs):
db_sevicedomains = StgServiceDomain.objects.all()
db_sevicesubdomains=db_sevicedomains.exclude(
parent_id__isnull=True).filter(category=2).filter(
level='Level 2')
db_provisionunits=StgFacilityServiceMeasureUnits.objects.select_related(
'domain') #good
if db_field.name == "domain":
kwargs["queryset"]=db_sevicesubdomains
return super().formfield_for_foreignkey(db_field, request,**kwargs)
model = FacilityServiceProvision
# formset = LimitModelFormset
extra = 1 # Used to control number of empty rows displayed.
list_select_related = ('facility','domain','units')
fields = ('facility','domain','units','available','functional',
'date_assessed',)
class FacilityServiceReadinessInline(admin.TabularInline):
def get_queryset(self, request):
qs = super().get_queryset(request)
groups = list(request.user.groups.values_list('user', flat=True))
user = request.user.username
user_location = request.user.location.location_id
db_locations = StgLocation.objects.all().order_by('location_id')
# Returns data for all the locations to the lowest location level
if request.user.is_superuser:
qs
# returns data for AFRO and member countries
elif user in groups and user_location<=2:
qs_admin=db_locations.filter(
locationlevel__locationlevel_id__gt=2,
locationlevel__locationlevel_id__lte=3)
# return data based on the location of the user logged/request location
elif user in groups and user_location>1:
qs=qs.filter(username=user)
return qs
"""
Serge requested that the form for data input be restricted to user's country.
Thus, this function is for filtering location to display country level.
The location is used to filter the dropdownlist based on the request
object's USER, If the user has superuser privileges or is a member of
AFRO-DataAdmins, he/she can enter data for all the AFRO member countries
otherwise, can only enter data for his/her country.=== modified 02/02/2021
"""
def formfield_for_foreignkey(self, db_field, request =None, | |
<gh_stars>1-10
"""Core async functions."""
import asyncio
from pathlib import Path
from ssl import SSLContext
from typing import Any, Awaitable, Dict, List, Optional, Sequence, Tuple, Union
import cytoolz as tlz
import ujson as json
from aiohttp import TCPConnector
from aiohttp.typedefs import StrOrURL
from aiohttp_client_cache import CachedSession, SQLiteBackend
from . import utils
from .exceptions import InvalidInputValue
from .utils import EXPIRE, BaseRetriever
__all__ = ["retrieve", "delete_url_cache", "retrieve_text", "retrieve_json", "retrieve_binary"]
async def async_session(
url_kwds: Tuple[Tuple[int, StrOrURL, Dict[StrOrURL, Any]], ...],
read: str,
r_kwds: Dict[str, Any],
request_method: str,
cache_name: Path,
family: int,
timeout: float = 5.0,
expire_after: float = EXPIRE,
ssl: Union[SSLContext, bool, None] = None,
disable: bool = False,
) -> Awaitable[Union[str, bytes, Dict[str, Any]]]:
"""Create an async session for sending requests.
Parameters
----------
url_kwds : list of tuples of urls and payloads
A list of URLs or URLs with their payloads to be retrieved.
read : str
The method for returning the request; ``binary`` (bytes), ``json``, and ``text``.
r_kwds : dict
Keywords to pass to the response read function. ``{"content_type": None}`` if read
is ``json`` else it's empty.
request_method : str
The request type; GET or POST.
cache_name : str
Path to a file for caching the session, defaults to
``./cache/aiohttp_cache.sqlite``.
family : int
TCP socket family
timeout : float, optional
Timeout for the request, defaults to 5.0.
expire_after : int, optional
Expiration time for the cache in seconds, defaults to -1 (never expire).
ssl : bool or SSLContext, optional
SSLContext to use for the connection, defaults to None. Set to False to disable
SSL cetification verification.
disable : bool, optional
If ``True`` temporarily disable caching requests and get new responses
from the server, defaults to False.
Returns
-------
asyncio.gather
An async gather function
"""
cache = SQLiteBackend(
cache_name=cache_name,
expire_after=expire_after,
allowed_methods=("GET", "POST"),
timeout=timeout,
)
connector = TCPConnector(family=family, ssl=ssl)
async with CachedSession(
json_serialize=json.dumps,
cache=cache,
connector=connector,
trust_env=True,
) as session:
_session = session.disabled() if disable else session
async with _session:
request_func = getattr(session, request_method.lower())
tasks = (
utils.retriever(uid, url, kwds, request_func, read, r_kwds)
for uid, url, kwds in url_kwds
)
return await asyncio.gather(*tasks) # type: ignore
def delete_url_cache(
url: StrOrURL,
request_method: str = "GET",
cache_name: Optional[Union[Path, str]] = None,
**kwargs: Dict[str, Any],
) -> None:
"""Delete cached response associated with ``url``, along with its history (if applicable).
Parameters
----------
url : str
URL to be deleted from the cache
request_method : str, optional
HTTP request method to be deleted from the cache, defaults to ``GET``.
cache_name : str, optional
Path to a file for caching the session, defaults to
``./cache/aiohttp_cache.sqlite``.
kwargs : dict, optional
Keywords to pass to the ``cache.delete_url()``.
"""
loop, new_loop = utils.get_event_loop()
asyncio.set_event_loop(loop)
request_method = request_method.upper()
valid_methods = ["GET", "POST"]
if request_method not in valid_methods:
raise InvalidInputValue("method", valid_methods)
loop.run_until_complete(
utils.delete_url(url, request_method, utils.create_cachefile(cache_name), **kwargs)
)
if new_loop:
loop.close()
def retrieve(
urls: Sequence[StrOrURL],
read: str,
request_kwds: Optional[Sequence[Dict[str, Any]]] = None,
request_method: str = "GET",
max_workers: int = 8,
cache_name: Optional[Union[Path, str]] = None,
family: str = "both",
timeout: float = 5.0,
expire_after: float = EXPIRE,
ssl: Union[SSLContext, bool, None] = None,
disable: bool = False,
) -> List[Union[str, Dict[str, Any], bytes]]:
r"""Send async requests.
Parameters
----------
urls : list of str
List of URLs.
read : str
Method for returning the request; ``binary``, ``json``, and ``text``.
request_kwds : list of dict, optional
List of requests keywords corresponding to input URLs (1 on 1 mapping),
defaults to ``None``. For example, ``[{"params": {...}, "headers": {...}}, ...]``.
request_method : str, optional
Request type; ``GET`` (``get``) or ``POST`` (``post``). Defaults to ``GET``.
max_workers : int, optional
Maximum number of async processes, defaults to 8.
cache_name : str, optional
Path to a file for caching the session, defaults to ``./cache/aiohttp_cache.sqlite``.
family : str, optional
TCP socket family, defaults to both, i.e., IPv4 and IPv6. For IPv4
or IPv6 only pass ``ipv4`` or ``ipv6``, respectively.
timeout : float, optional
Timeout for the request, defaults to 5.0.
expire_after : int, optional
Expiration time for response caching in seconds, defaults to -1 (never expire).
ssl : bool or SSLContext, optional
SSLContext to use for the connection, defaults to None. Set to False to disable
SSL cetification verification.
disable : bool, optional
If ``True`` temporarily disable caching requests and get new responses
from the server, defaults to False.
Returns
-------
list
List of responses in the order of input URLs.
Examples
--------
>>> import async_retriever as ar
>>> stations = ["01646500", "08072300", "11073495"]
>>> url = "https://waterservices.usgs.gov/nwis/site"
>>> urls, kwds = zip(
... *[
... (url, {"params": {"format": "rdb", "sites": s, "siteStatus": "all"}})
... for s in stations
... ]
... )
>>> resp = ar.retrieve(urls, "text", request_kwds=kwds)
>>> resp[0].split('\n')[-2].split('\t')[1]
'01646500'
"""
inp = BaseRetriever(urls, read, request_kwds, request_method, cache_name, family)
loop, new_loop = utils.get_event_loop()
asyncio.set_event_loop(loop)
session = tlz.partial(
async_session,
read=inp.read,
r_kwds=inp.r_kwds,
request_method=inp.request_method,
cache_name=inp.cache_name,
family=inp.family,
timeout=timeout,
expire_after=expire_after,
ssl=ssl,
disable=disable,
)
chunked_reqs = tlz.partition_all(max_workers, inp.url_kwds)
results = (loop.run_until_complete(session(url_kwds=c)) for c in chunked_reqs)
resp = [r for _, r in sorted(tlz.concat(results))]
if new_loop:
loop.close()
return resp
def retrieve_text(
urls: Sequence[StrOrURL],
request_kwds: Optional[Sequence[Dict[str, Any]]] = None,
request_method: str = "GET",
max_workers: int = 8,
cache_name: Optional[Union[Path, str]] = None,
family: str = "both",
timeout: float = 5.0,
expire_after: float = EXPIRE,
ssl: Union[SSLContext, bool, None] = None,
disable: bool = False,
) -> List[str]:
r"""Send async requests and get the response as ``text``.
Parameters
----------
urls : list of str
List of URLs.
request_kwds : list of dict, optional
List of requests keywords corresponding to input URLs (1 on 1 mapping),
defaults to ``None``. For example, ``[{"params": {...}, "headers": {...}}, ...]``.
request_method : str, optional
Request type; ``GET`` (``get``) or ``POST`` (``post``). Defaults to ``GET``.
max_workers : int, optional
Maximum number of async processes, defaults to 8.
cache_name : str, optional
Path to a file for caching the session, defaults to ``./cache/aiohttp_cache.sqlite``.
family : str, optional
TCP socket family, defaults to both, i.e., IPv4 and IPv6. For IPv4
or IPv6 only pass ``ipv4`` or ``ipv6``, respectively.
timeout : float, optional
Timeout for the request in seconds, defaults to 5.0.
expire_after : int, optional
Expiration time for response caching in seconds, defaults to -1 (never expire).
ssl : bool or SSLContext, optional
SSLContext to use for the connection, defaults to None. Set to False to disable
SSL cetification verification.
disable : bool, optional
If ``True`` temporarily disable caching requests and get new responses
from the server, defaults to False.
Returns
-------
list
List of responses in the order of input URLs.
Examples
--------
>>> import async_retriever as ar
>>> stations = ["01646500", "08072300", "11073495"]
>>> url = "https://waterservices.usgs.gov/nwis/site"
>>> urls, kwds = zip(
... *[
... (url, {"params": {"format": "rdb", "sites": s, "siteStatus": "all"}})
... for s in stations
... ]
... )
>>> resp = ar.retrieve_text(urls, kwds)
>>> resp[0].split('\n')[-2].split('\t')[1]
'01646500'
"""
resp: List[str] = retrieve( # type: ignore
urls,
"text",
request_kwds,
request_method,
max_workers,
cache_name,
family,
timeout,
expire_after,
ssl,
disable,
)
return resp
def retrieve_json(
urls: Sequence[StrOrURL],
request_kwds: Optional[Sequence[Dict[str, Any]]] = None,
request_method: str = "GET",
max_workers: int = 8,
cache_name: Optional[Union[Path, str]] = None,
family: str = "both",
timeout: float = 5.0,
expire_after: float = EXPIRE,
ssl: Union[SSLContext, bool, None] = None,
disable: bool = False,
) -> List[Dict[str, Any]]:
r"""Send async requests and get the response as ``json``.
Parameters
----------
urls : list of str
List of URLs.
request_kwds : list of dict, optional
List of requests keywords corresponding to input URLs (1 on 1 mapping),
defaults to ``None``. For example, ``[{"params": {...}, "headers": {...}}, ...]``.
request_method : str, optional
Request type; ``GET`` (``get``) or ``POST`` (``post``). Defaults to ``GET``.
max_workers : int, optional
Maximum number of async processes, defaults to 8.
cache_name : str, optional
Path to a file for caching the session, defaults to ``./cache/aiohttp_cache.sqlite``.
family : str, optional
TCP socket family, defaults to both, i.e., IPv4 and IPv6. For IPv4
or IPv6 only pass ``ipv4`` or ``ipv6``, respectively.
timeout : float, optional
Timeout for the request, defaults to 5.0.
expire_after : int, optional
Expiration time for response caching | |
<reponame>ssalonen/pandas<filename>pandas/io/html.py<gh_stars>0
""":mod:`pandas.io.html` is a module containing functionality for dealing with
HTML IO.
"""
import os
import re
import numbers
import collections
from distutils.version import LooseVersion
import numpy as np
from pandas import DataFrame, MultiIndex, isnull
from pandas.io.common import _is_url, urlopen, parse_url
from pandas.compat import range, lrange, lmap, u, map
from pandas import compat
try:
import bs4
except ImportError:
_HAS_BS4 = False
else:
_HAS_BS4 = True
try:
import lxml
except ImportError:
_HAS_LXML = False
else:
_HAS_LXML = True
try:
import html5lib
except ImportError:
_HAS_HTML5LIB = False
else:
_HAS_HTML5LIB = True
#############
# READ HTML #
#############
_RE_WHITESPACE = re.compile(r'([\r\n]+|\s{2,})')
def _remove_whitespace(s, regex=_RE_WHITESPACE):
"""Replace extra whitespace inside of a string with a single space.
Parameters
----------
s : str or unicode
The string from which to remove extra whitespace.
regex : regex
The regular expression to use to remove extra whitespace.
Returns
-------
subd : str or unicode
`s` with all extra whitespace replaced with a single space.
"""
return regex.sub(' ', s.strip())
def _get_skiprows_iter(skiprows):
"""Get an iterator given an integer, slice or container.
Parameters
----------
skiprows : int, slice, container
The iterator to use to skip rows; can also be a slice.
Raises
------
TypeError
* If `skiprows` is not a slice, integer, or Container
Raises
------
TypeError
* If `skiprows` is not a slice, integer, or Container
Returns
-------
it : iterable
A proper iterator to use to skip rows of a DataFrame.
"""
if isinstance(skiprows, slice):
return lrange(skiprows.start or 0, skiprows.stop, skiprows.step or 1)
elif isinstance(skiprows, numbers.Integral):
return lrange(skiprows)
elif isinstance(skiprows, collections.Container):
return skiprows
else:
raise TypeError('{0} is not a valid type for skipping'
' rows'.format(type(skiprows)))
def _read(io):
"""Try to read from a url, file or string.
Parameters
----------
io : str, unicode, or file-like
Returns
-------
raw_text : str
"""
if _is_url(io):
with urlopen(io) as url:
raw_text = url.read()
elif hasattr(io, 'read'):
raw_text = io.read()
elif os.path.isfile(io):
with open(io) as f:
raw_text = f.read()
elif isinstance(io, compat.string_types):
raw_text = io
else:
raise TypeError("Cannot read object of type "
"'{0.__class__.__name__!r}'".format(io))
return raw_text
class _HtmlFrameParser(object):
"""Base class for parsers that parse HTML into DataFrames.
Parameters
----------
io : str or file-like
This can be either a string of raw HTML, a valid URL using the HTTP,
FTP, or FILE protocols or a file-like object.
match : str or regex
The text to match in the document.
attrs : dict
List of HTML <table> element attributes to match.
Attributes
----------
io : str or file-like
raw HTML, URL, or file-like object
match : regex
The text to match in the raw HTML
attrs : dict-like
A dictionary of valid table attributes to use to search for table
elements.
Notes
-----
To subclass this class effectively you must override the following methods:
* :func:`_build_doc`
* :func:`_text_getter`
* :func:`_parse_td`
* :func:`_parse_tables`
* :func:`_parse_tr`
* :func:`_parse_thead`
* :func:`_parse_tbody`
* :func:`_parse_tfoot`
See each method's respective documentation for details on their
functionality.
"""
def __init__(self, io, match, attrs):
self.io = io
self.match = match
self.attrs = attrs
def parse_tables(self):
tables = self._parse_tables(self._build_doc(), self.match, self.attrs)
return (self._build_table(table) for table in tables)
def _parse_raw_data(self, rows):
"""Parse the raw data into a list of lists.
Parameters
----------
rows : iterable of node-like
A list of row elements.
text_getter : callable
A callable that gets the text from an individual node. This must be
defined by subclasses.
column_finder : callable
A callable that takes a row node as input and returns a list of the
column node in that row. This must be defined by subclasses.
Raises
------
AssertionError
* If `text_getter` is not callable
* If `column_finder` is not callable
Returns
-------
data : list of list of strings
"""
data = [[_remove_whitespace(self._text_getter(col)) for col in
self._parse_td(row)] for row in rows]
return data
def _text_getter(self, obj):
"""Return the text of an individual DOM node.
Parameters
----------
obj : node-like
A DOM node.
Returns
-------
text : str or unicode
The text from an individual DOM node.
"""
raise NotImplementedError
def _parse_td(self, obj):
"""Return the td elements from a row element.
Parameters
----------
obj : node-like
Returns
-------
columns : list of node-like
These are the elements of each row, i.e., the columns.
"""
raise NotImplementedError
def _parse_tables(self, doc, match, attrs):
"""Return all tables from the parsed DOM.
Parameters
----------
doc : tree-like
The DOM from which to parse the table element.
match : str or regular expression
The text to search for in the DOM tree.
attrs : dict
A dictionary of table attributes that can be used to disambiguate
mutliple tables on a page.
Raises
------
AssertionError
* If `match` does not match any text in the document.
Returns
-------
tables : list of node-like
A list of <table> elements to be parsed into raw data.
"""
raise NotImplementedError
def _parse_tr(self, table):
"""Return the list of row elements from the parsed table element.
Parameters
----------
table : node-like
A table element that contains row elements.
Returns
-------
rows : list of node-like
A list row elements of a table, usually <tr> or <th> elements.
"""
raise NotImplementedError
def _parse_thead(self, table):
"""Return the header of a table.
Parameters
----------
table : node-like
A table element that contains row elements.
Returns
-------
thead : node-like
A <thead>...</thead> element.
"""
raise NotImplementedError
def _parse_tbody(self, table):
"""Return the body of the table.
Parameters
----------
table : node-like
A table element that contains row elements.
Returns
-------
tbody : node-like
A <tbody>...</tbody> element.
"""
raise NotImplementedError
def _parse_tfoot(self, table):
"""Return the footer of the table if any.
Parameters
----------
table : node-like
A table element that contains row elements.
Returns
-------
tfoot : node-like
A <tfoot>...</tfoot> element.
"""
raise NotImplementedError
def _build_doc(self):
"""Return a tree-like object that can be used to iterate over the DOM.
Returns
-------
obj : tree-like
"""
raise NotImplementedError
def _build_table(self, table):
header = self._parse_raw_thead(table)
body = self._parse_raw_tbody(table)
footer = self._parse_raw_tfoot(table)
return header, body, footer
def _parse_raw_thead(self, table):
thead = self._parse_thead(table)
res = []
if thead:
res = lmap(self._text_getter, self._parse_th(thead[0]))
return np.array(res).squeeze() if res and len(res) == 1 else res
def _parse_raw_tfoot(self, table):
tfoot = self._parse_tfoot(table)
res = []
if tfoot:
res = lmap(self._text_getter, self._parse_td(tfoot[0]))
return np.array(res).squeeze() if res and len(res) == 1 else res
def _parse_raw_tbody(self, table):
tbody = self._parse_tbody(table)
try:
res = self._parse_tr(tbody[0])
except IndexError:
res = self._parse_tr(table)
return self._parse_raw_data(res)
class _BeautifulSoupHtml5LibFrameParser(_HtmlFrameParser):
"""HTML to DataFrame parser that uses BeautifulSoup under the hood.
See Also
--------
pandas.io.html._HtmlFrameParser
pandas.io.html._LxmlFrameParser
Notes
-----
Documentation strings for this class are in the base class
:class:`pandas.io.html._HtmlFrameParser`.
"""
def __init__(self, *args, **kwargs):
super(_BeautifulSoupHtml5LibFrameParser, self).__init__(*args,
**kwargs)
from bs4 import SoupStrainer
self._strainer = SoupStrainer('table')
def _text_getter(self, obj):
return obj.text
def _parse_td(self, row):
return row.find_all(('td', 'th'))
def _parse_tr(self, element):
return element.find_all('tr')
def _parse_th(self, element):
return element.find_all('th')
def _parse_thead(self, table):
return table.find_all('thead')
def _parse_tbody(self, table):
return table.find_all('tbody')
def _parse_tfoot(self, table):
return table.find_all('tfoot')
def _parse_tables(self, doc, match, attrs):
element_name = self._strainer.name
tables = doc.find_all(element_name, attrs=attrs)
if not tables:
# known sporadically working release
raise AssertionError('No tables found')
mts = [table.find(text=match) for table in tables]
matched_tables = [mt for mt in mts if mt is not None]
tables = list(set(mt.find_parent(element_name)
for mt in matched_tables))
if not tables:
raise AssertionError("No tables found matching "
"'{0}'".format(match.pattern))
return tables
def _setup_build_doc(self):
raw_text = _read(self.io)
if not raw_text:
raise AssertionError('No text parsed from document: '
'{0}'.format(self.io))
return raw_text
def _build_doc(self):
from bs4 import BeautifulSoup
return BeautifulSoup(self._setup_build_doc(), features='html5lib')
def _build_node_xpath_expr(attrs):
"""Build an xpath expression to simulate bs4's ability to pass in kwargs to
search for attributes when using the lxml parser.
Parameters
----------
attrs : dict
A dict of HTML attributes. These are NOT checked for validity.
Returns
-------
expr : unicode
An XPath expression that checks for the given HTML attributes.
"""
# give class attribute as class_ because class is a python keyword
if 'class_' in attrs:
attrs['class'] = attrs.pop('class_')
s = (u("@{k}='{v}'").format(k=k, v=v) for k, v in compat.iteritems(attrs))
return u('[{0}]').format(' and '.join(s))
_re_namespace = {'re': 'http://exslt.org/regular-expressions'}
_valid_schemes = 'http', 'file', 'ftp'
class _LxmlFrameParser(_HtmlFrameParser):
"""HTML to DataFrame parser that uses lxml under the hood.
Warning
-------
This parser | |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# External imports
from flaky import flaky
# Bokeh imports
from bokeh._testing.util.selenium import (
RECORD,
alt_click,
enter_text_in_cell,
get_table_cell,
get_table_column_cells,
get_table_row,
get_table_selected_rows,
shift_click,
sort_table_column,
)
from bokeh.layouts import column
from bokeh.models import (
Button,
ColumnDataSource,
CustomAction,
CustomJS,
DataTable,
NumberEditor,
Plot,
Range1d,
Rect,
TableColumn,
TapTool,
)
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
pytest_plugins = (
"bokeh._testing.plugins.project",
)
def _is_cds_data_patch(evt):
return evt['kind'] == 'ModelChanged' and evt['attr'] == 'data'
def has_cds_data_patches(msgs):
for msg in msgs:
if msg.msgtype == "PATCH-DOC":
if any(_is_cds_data_patch(evt) for evt in msg.content.get('events', [])):
return True
return False
@pytest.mark.selenium
class Test_DataTableSource(object):
def test_server_source_patch_does_not_duplicate_data_update_event(self, bokeh_server_page) -> None:
def modify_doc(doc):
data = {'x': [1,2,3,4], 'y': [10,20,30,40]}
source = ColumnDataSource(data)
plot = Plot(plot_height=400, plot_width=400, x_range=Range1d(0, 1), y_range=Range1d(0, 1), min_border=0)
plot.add_tools(CustomAction(callback=CustomJS(args=dict(s=source), code=RECORD("data", "s.data"))))
table = DataTable(columns=[
TableColumn(field="x"),
TableColumn(field="y")
], source=source, editable=False)
btn = Button(label="Click Me!", css_classes=["foo"])
@btn.on_click
def btn_click():
source.patch({"x": [(0, 42)]})
doc.add_root(column(plot, table, btn))
page = bokeh_server_page(modify_doc)
page.click_custom_action()
results = page.results
assert results == {'data': {'x': [1,2,3,4], 'y': [10,20,30,40]}}
button = page.driver.find_element_by_class_name('foo')
button.click()
page.click_custom_action()
results = page.results
assert results == {'data': {'x': [42,2,3,4], 'y': [10,20,30,40]}}
# if the server receives something back like:
#
# Message 'PATCH-DOC' (revision 1) content: {
# 'events': [{
# 'kind': 'ModelChanged',
# 'model': {'id': '1001'},
# 'attr': 'data', 'new': {'x': [42, 2, 3, 4], 'y': [10, 20, 30, 40]}
# }],
# 'references': []
# }
#
# Then that means the client got our patch message and erroneously ping
# ponged a full data update back to us
assert not has_cds_data_patches(page.message_test_port.received)
# XXX (bev) disabled until https://github.com/bokeh/bokeh/issues/7970 is resolved
#assert page.has_no_console_errors()
def test_server_source_stream_does_not_duplicate_data_update_event(self, bokeh_server_page) -> None:
def modify_doc(doc):
data = {'x': [1,2,3,4], 'y': [10,20,30,40]}
source = ColumnDataSource(data)
plot = Plot(plot_height=400, plot_width=400, x_range=Range1d(0, 1), y_range=Range1d(0, 1), min_border=0)
plot.add_tools(CustomAction(callback=CustomJS(args=dict(s=source), code=RECORD("data", "s.data"))))
table = DataTable(columns=[
TableColumn(field="x"),
TableColumn(field="y")
], source=source, editable=False)
btn = Button(label="Click Me!", css_classes=["foo"])
@btn.on_click
def btn_click():
source.stream({"x": [5], "y": [50]})
doc.add_root(column(plot, table, btn))
page = bokeh_server_page(modify_doc)
page.click_custom_action()
results = page.results
assert results == {'data': {'x': [1,2,3,4], 'y': [10,20,30,40]}}
button = page.driver.find_element_by_class_name('foo')
button.click()
page.click_custom_action()
results = page.results
assert results == {'data': {'x': [1,2,3,4,5], 'y': [10,20,30,40,50]}}
# if the server receives something back like:
#
# Message 'PATCH-DOC' (revision 1) content: {
# 'events': [{
# 'kind': 'ModelChanged',
# 'model': {'id': '1001'},
# 'attr': 'data', 'new': {'x': [1, 2, 3, 4, 5], 'y': [10, 20, 30, 40, 50]}
# }],
# 'references': []
# }
#
# Then that means the client got our stream message and erroneously ping
# ponged a full data update back to us
assert not has_cds_data_patches(page.message_test_port.received)
# XXX (bev) disabled until https://github.com/bokeh/bokeh/issues/7970 is resolved
#assert page.has_no_console_errors()
def test_server_source_update_does_not_duplicate_data_update_event(self, bokeh_server_page) -> None:
def modify_doc(doc):
data = {'x': [1,2,3,4], 'y': [10,20,30,40]}
source = ColumnDataSource(data)
plot = Plot(plot_height=400, plot_width=400, x_range=Range1d(0, 1), y_range=Range1d(0, 1), min_border=0)
plot.add_tools(CustomAction(callback=CustomJS(args=dict(s=source), code=RECORD("data", "s.data"))))
table = DataTable(columns=[
TableColumn(field="x"),
TableColumn(field="y")
], source=source, editable=False)
btn = Button(label="Click Me!", css_classes=["foo"])
@btn.on_click
def btn_click():
source.data = {'x': [5,6,7,8], 'y': [50,60,70,80]}
doc.add_root(column(plot, table, btn))
page = bokeh_server_page(modify_doc)
page.click_custom_action()
results = page.results
assert results == {'data': {'x': [1,2,3,4], 'y': [10,20,30,40]}}
button = page.driver.find_element_by_class_name('foo')
button.click()
page.click_custom_action()
results = page.results
assert results == {'data': {'x': [5,6,7,8], 'y': [50,60,70,80]}}
# if the server receives something back like:
#
# Message 'PATCH-DOC' (revision 1) content: {
# 'events': [{
# 'kind': 'ModelChanged',
# 'model': {'id': '1001'},
# 'attr': 'data', 'new': {'x': [1, 2, 3, 4, 5], 'y': [10, 20, 30, 40, 50]}
# }],
# 'references': []
# }
#
# Then that means the client got our stream message and erroneously ping
# ponged a full data update back to us
assert not has_cds_data_patches(page.message_test_port.received)
# XXX (bev) disabled until https://github.com/bokeh/bokeh/issues/7970 is resolved
#assert page.has_no_console_errors()
def test_server_edit_does_not_duplicate_data_update_event(self, bokeh_server_page) -> None:
def modify_doc(doc):
data = {'x': [1,2,3,4], 'y': [10,20,30,40]}
source = ColumnDataSource(data)
plot = Plot(plot_height=400, plot_width=400, x_range=Range1d(0, 1), y_range=Range1d(0, 1), min_border=0)
plot.add_tools(CustomAction(callback=CustomJS(args=dict(s=source), code=RECORD("data", "s.data"))))
table = DataTable(columns=[
TableColumn(field="x"),
TableColumn(field="y", editor=NumberEditor())
], source=source, editable=True)
doc.add_root(column(plot, table))
page = bokeh_server_page(modify_doc)
page.click_custom_action()
results = page.results
assert results == {'data': {'x': [1,2,3,4], 'y': [10,20,30,40]}}
cell = get_table_cell(page.driver, 3, 2)
assert cell.text == '30'
enter_text_in_cell(page.driver, cell, '100')
page.click_custom_action()
results = page.results
assert results == {'data': {'x': [1,2,3,4], 'y': [10, 20, 100, 40]}}
# if the server receives something back like:
#
# Message 'PATCH-DOC' (revision 1) content: {
# 'events': [{
# 'kind': 'ModelChanged',
# 'model': {'id': '1001'},
# 'attr': 'data', 'new': {'x': [1,2,3,4], 'y': [10, 20, 100, 40]}
# }],
# 'references': []
# }
#
# Then that means the client got our stream message and erroneously ping
# ponged a full data update back to us
assert not has_cds_data_patches(page.message_test_port.received)
# XXX (bev) disabled until https://github.com/bokeh/bokeh/issues/7970 is resolved
#assert page.has_no_console_errors()
def test_server_basic_selection(self, bokeh_server_page) -> None:
data = {'x': [1,2,3,4,5,6], 'y': [60,50,40,30,20,10]}
source = ColumnDataSource(data)
def modify_doc(doc):
plot = Plot(plot_height=400, plot_width=400, x_range=Range1d(0, 1), y_range=Range1d(0, 1), min_border=0)
plot.add_tools(CustomAction(callback=CustomJS(args=dict(s=source), code=RECORD("indices", "s.selected.indices"))))
table = DataTable(columns=[
TableColumn(field="x"),
TableColumn(field="y")
], source=source, editable=False)
doc.add_root(column(plot, table))
page = bokeh_server_page(modify_doc)
page.click_custom_action()
results = page.results
assert results == {'indices': []}
assert set(source.selected.indices) == set([])
assert get_table_selected_rows(page.driver) == set([])
# select the third row
row = get_table_row(page.driver, 3)
row.click()
page.click_custom_action()
results = page.results
assert results == {'indices': [2]}
assert source.selected.indices == [2]
assert get_table_selected_rows(page.driver) == set([2])
# select the first row
row = get_table_row(page.driver, 1)
row.click()
page.click_custom_action()
results = page.results
assert results == {'indices': [0]}
assert source.selected.indices == [0]
assert get_table_selected_rows(page.driver) == set([0])
# XXX (bev) disabled until https://github.com/bokeh/bokeh/issues/7970 is resolved
#assert page.has_no_console_errors()
def test_server_basic_mulitselection(self, bokeh_server_page) -> None:
data = {'x': [1,2,3,4,5,6], 'y': [60,50,40,30,20,10]}
source = ColumnDataSource(data)
def modify_doc(doc):
plot = Plot(plot_height=400, plot_width=400, x_range=Range1d(0, 1), y_range=Range1d(0, 1), min_border=0)
plot.add_tools(CustomAction(callback=CustomJS(args=dict(s=source), code=RECORD("indices", "s.selected.indices"))))
table = DataTable(columns=[
TableColumn(field="x"),
TableColumn(field="y")
], source=source, editable=False)
doc.add_root(column(plot, table))
page = bokeh_server_page(modify_doc)
page.click_custom_action()
results = page.results
assert results == {'indices': []}
assert set(source.selected.indices) == set([])
assert get_table_selected_rows(page.driver) == set([])
# select the third row
row = get_table_row(page.driver, 2)
row.click()
row = get_table_row(page.driver, 4)
shift_click(page.driver, row)
page.click_custom_action()
results = page.results
assert set(results['indices']) == set([1, 2, 3])
assert set(source.selected.indices) == set([1, 2, 3])
assert get_table_selected_rows(page.driver) == set([1, 2, 3])
row = get_table_row(page.driver, 6)
alt_click(page.driver, row)
page.click_custom_action()
results = page.results
assert set(results['indices']) == set([1, 2, 3, 5])
assert set(source.selected.indices) == set([1, 2, 3, 5])
assert get_table_selected_rows(page.driver) == set([1, 2, 3, 5])
# XXX (bev) disabled until https://github.com/bokeh/bokeh/issues/7970 is resolved
#assert page.has_no_console_errors()
@flaky(max_runs=5)
def test_server_sorted_after_data_update(self, bokeh_server_page) -> None:
data = {'x': [1,2,5,6], 'y': [60,50,20,10]}
source = ColumnDataSource(data)
def modify_doc(doc):
plot = Plot(plot_height=400, plot_width=400, x_range=Range1d(0, 1), y_range=Range1d(0, 1), min_border=0)
plot.add_tools(CustomAction(callback=CustomJS(args=dict(s=source), code=RECORD("data", "s.data"))))
table = DataTable(columns=[
TableColumn(field="x", title="x", sortable=True),
TableColumn(field="y", title="y", sortable=True)
], source=source, editable=False)
button = Button(css_classes=["foo"])
def cb():
source.data = {'x': [0,1,2,3,4,5,6,7], 'y': [70,60,50,40,30,20,10,0]}
button.on_click(cb)
doc.add_root(column(plot, table, button))
page = bokeh_server_page(modify_doc)
page.click_custom_action()
results = page.results
assert results == {'data': {'x': [1,2,5,6], 'y': [60,50,20,10]}}
assert get_table_column_cells(page.driver, 1) == ['1', '2', '5', '6']
assert get_table_column_cells(page.driver, 2) == ['60', '50', '20', '10']
sort_table_column(page.driver, 1)
assert get_table_column_cells(page.driver, 1) == ['1', '2', '5', '6']
assert get_table_column_cells(page.driver, 2) == ['60', '50', '20', '10']
sort_table_column(page.driver, 2, True)
assert get_table_column_cells(page.driver, 1) == ['6', '5', '2', '1']
assert get_table_column_cells(page.driver, 2) == ['10', '20', '50', '60']
button = page.driver.find_element_by_class_name('foo')
button.click()
page.click_custom_action()
results = page.results
assert results == {'data': {'x': [0,1,2,3,4,5,6,7], 'y': [70,60,50,40,30,20,10,0]}}
assert source.data == {'x': [0,1,2,3,4,5,6,7], 'y': [70,60,50,40,30,20,10,0]}
assert get_table_column_cells(page.driver, 1) == ['7', '6', '5', '4', '3', '2', '1', '0']
assert get_table_column_cells(page.driver, 2) == ['0', '10', '20', '30', '40', '50', '60', '70']
# XXX (bev) disabled until https://github.com/bokeh/bokeh/issues/7970 is resolved
#assert page.has_no_console_errors()
@pytest.mark.skip
def test_server_sorted_after_patch(self, bokeh_server_page) -> None:
data = {'x': [1,2,5,6], 'y': [60,50,20,10]}
source = ColumnDataSource(data)
def modify_doc(doc):
plot = Plot(plot_height=400, plot_width=400, x_range=Range1d(0, 1), y_range=Range1d(0, 1), min_border=0)
plot.add_tools(CustomAction(callback=CustomJS(args=dict(s=source), code=RECORD("data", "s.data"))))
table = DataTable(columns=[
TableColumn(field="x", title="x", sortable=True),
TableColumn(field="y", title="y", sortable=True)
], source=source, editable=False)
button = Button(css_classes=["foo"])
def cb():
source.patch({'y': [[2, 100]]})
button.on_click(cb)
doc.add_root(column(plot, table, button))
page = bokeh_server_page(modify_doc)
page.click_custom_action()
results = page.results
assert results == {'data': {'x': [1,2,5,6], 'y': [60,50,20,10]}}
assert get_table_column_cells(page.driver, 1) == ['1', '2', '5', | |
import pandas as pd
import sys
import numpy as np
import scipy as sp
import json
import os
from decimal import Decimal
import scipy.optimize as opt
from scipy.optimize import minimize, curve_fit
from scipy.special import erfc
from scipy.stats import crystalball
from scipy.signal import medfilt, find_peaks
import pygama.analysis.histograms as pgh
import pygama.utils as pgu
import pygama.analysis.peak_fitting as pga
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
plt.style.use('style.mplstyle')
def main():
## this code takes the peaks from thorium's first-pass calibration and fits them. the values from these fits are used to then do a non-linear, second-pass calibration.
peak_2615()
#peak_1765()
#peak_1460()
#peak_609()
#peak_352()
def peak_2615():
if(len(sys.argv) != 2):
print('Usage: fit_bkg_peaks.py [run number]')
sys.exit()
with open("runDB.json") as f:
runDB = json.load(f)
meta_dir = os.path.expandvars(runDB["meta_dir"])
#df = pd.read_hdf("{}/Spectrum_280-329.hdf5".format(meta_dir), key="df")
df = pd.read_hdf("{}/Spectrum_{}.hdf5".format(meta_dir,sys.argv[1]), key="df")
def gauss(x, mu, sigma, A=1):
"""
define a gaussian distribution, w/ args: mu, sigma, area (optional).
"""
return A * (1. / sigma / np.sqrt(2 * np.pi)) * np.exp(-(x - mu)**2 / (2. * sigma**2))
def radford_peak(x, mu, sigma, hstep, htail, tau, bg0, a=1):
"""
David Radford's HPGe peak shape function
"""
# make sure the fractional amplitude parameters stay reasonable...
if htail < 0 or htail > 1:
return np.zeros_like(x)
if hstep < 0 or hstep > 1:
return np.zeros_like(x)
bg_term = bg0 #+ x*bg1
if np.any(bg_term < 0):
return np.zeros_like(x)
# compute the step and the low energy tail
step = a * hstep * erfc((x - mu) / (sigma * np.sqrt(2)))
le_tail = a * htail
le_tail *= erfc((x - mu) / (sigma * np.sqrt(2)) + sigma / (tau * np.sqrt(2)))
le_tail *= np.exp((x - mu) / tau)
le_tail /= (2 * tau * np.exp(-(sigma / (np.sqrt(2) * tau))**2))
# add up all the peak shape components
return (1 - htail) * gauss(x, mu, sigma, a) + bg_term + step + le_tail
hist, bins, var = pgh.get_hist(df['e_cal'], range=(2540,2680), dx=0.5)
pgh.plot_hist(hist, bins, var=hist, label="data")
pars, cov = pga.fit_hist(radford_peak, hist, bins, var=hist, guess=[2608.5, 1.05, 0.001, 0.02, 5, 1, 4000])
pgu.print_fit_results(pars, cov, radford_peak)
pgu.plot_func(radford_peak, pars, label="chi2 fit", color='red')
#x_vals = np.arange(2540,2680,0.5)
#plt.plot(x_vals, radford_peak(x_vals, 2608.5, 1.05, .001, 0.02, 5, 1, 4000))
FWHM = '%.2f' % Decimal(pars[1]*2)
FWHM_uncertainty = '%.2f' % Decimal(np.sqrt(cov[1][1])*2)
peak = '%.2f' % Decimal(pars[0])
peak_uncertainty = '%.2f' % Decimal(np.sqrt(cov[0][0]))
residual = '%.2f' % (2614.51 - float(peak))
chi_2_element_list = []
for i in range(len(hist)):
chi_2_element = abs((radford_peak(bins[i], *pars) - hist[i])**2/radford_peak(bins[i], *pars))
chi_2_element_list.append(chi_2_element)
chi_2 = sum(chi_2_element_list)
reduced_chi_2 = '%.2f' % Decimal(chi_2/len(hist))
print(reduced_chi_2)
label_01 = '2614.51 keV peak fit'
label_02 = 'FWHM = '+str(FWHM)+r' $\pm$ '+str(FWHM_uncertainty)
label_03 = 'Peak = '+str(peak)+r' $\pm$ '+str(peak_uncertainty)
label_04 = 'Residual = '+str(residual)+r' $\pm$ '+str(peak_uncertainty)
colors = ['red', 'red','red', 'red']
lines = [Line2D([0], [0], color=c, lw=2) for c in colors]
labels = [label_01, label_02, label_03, label_04]
plt.xlim(2540,2680)
plt.ylim(0,plt.ylim()[1])
plt.xlabel('Energy (keV)', ha='right', x=1.0)
plt.ylabel('Counts', ha='right', y=1.0)
plt.title('Fit of First-Pass Kr83m Calibration Peak')
plt.tight_layout()
#plt.semilogy()
plt.legend(lines, labels, frameon=False, loc='upper right', fontsize='small')
plt.show()
def peak_1765():
if(len(sys.argv) != 2):
print('Usage: fit_bkg_peaks.py [run number]')
sys.exit()
with open("runDB.json") as f:
runDB = json.load(f)
meta_dir = os.path.expandvars(runDB["meta_dir"])
#df = pd.read_hdf("{}/Spectrum_280-329.hdf5".format(meta_dir), key="df")
df = pd.read_hdf("{}/Spectrum_{}.hdf5".format(meta_dir,sys.argv[1]), key="df")
def gauss(x, mu, sigma, A=1):
"""
define a gaussian distribution, w/ args: mu, sigma, area (optional).
"""
return A * (1. / sigma / np.sqrt(2 * np.pi)) * np.exp(-(x - mu)**2 / (2. * sigma**2))
def radford_peak(x, mu, sigma, hstep, htail, tau, bg0, a=1):
"""
<NAME>'s HPGe peak shape function
"""
# make sure the fractional amplitude parameters stay reasonable...
if htail < 0 or htail > 1:
return np.zeros_like(x)
if hstep < 0 or hstep > 1:
return np.zeros_like(x)
bg_term = bg0 #+ x*bg1
if np.any(bg_term < 0):
return np.zeros_like(x)
# compute the step and the low energy tail
step = a * hstep * erfc((x - mu) / (sigma * np.sqrt(2)))
le_tail = a * htail
le_tail *= erfc((x - mu) / (sigma * np.sqrt(2)) + sigma / (tau * np.sqrt(2)))
le_tail *= np.exp((x - mu) / tau)
le_tail /= (2 * tau * np.exp(-(sigma / (np.sqrt(2) * tau))**2))
# add up all the peak shape components
return (1 - htail) * gauss(x, mu, sigma, a) + bg_term + step + le_tail
hist, bins, var = pgh.get_hist(df['e_cal'], range=(1740,1780), dx=0.5)
pgh.plot_hist(hist, bins, var=hist, label="data")
pars, cov = pga.fit_hist(radford_peak, hist, bins, var=hist, guess=[1761, 1.85, 0.001, 0.02, 5, 1, 4000])
pgu.print_fit_results(pars, cov, radford_peak)
pgu.plot_func(radford_peak, pars, label="chi2 fit", color='red')
#x_vals = np.arange(1740,1780,0.5)
#plt.plot(x_vals, radford_peak(x_vals, 1761, 1.85, .001, 0.02, 5, 1, 4000))
FWHM = '%.2f' % Decimal(pars[1]*2)
FWHM_uncertainty = '%.2f' % Decimal(np.sqrt(cov[1][1])*2)
peak = '%.2f' % Decimal(pars[0])
peak_uncertainty = '%.2f' % Decimal(np.sqrt(cov[0][0]))
residual = '%.2f' % (1764.49 - float(peak))
#chi_2_element_list = []
#for i in range(len(hist)):
#chi_2_element = abs((radford_peak(bins[i], *pars) - hist[i])**2/radford_peak(bins[i], *pars))
#chi_2_element_list.append(chi_2_element)
#chi_2 = sum(chi_2_element_list)
#reduced_chi_2 = '%.2f' % Decimal(chi_2/len(hist))
label_01 = '1764.49 keV peak fit'
label_02 = 'FWHM = '+str(FWHM)+r' $\pm$ '+str(FWHM_uncertainty)
label_03 = 'Peak = '+str(peak)+r' $\pm$ '+str(peak_uncertainty)
label_04 = 'Residual = '+str(residual)+r' $\pm$ '+str(peak_uncertainty)
colors = ['red', 'red','red', 'red']
lines = [Line2D([0], [0], color=c, lw=2) for c in colors]
labels = [label_01, label_02, label_03, label_04]
plt.xlim(1740,1780)
plt.ylim(0,plt.ylim()[1])
plt.xlabel('Energy (keV)', ha='right', x=1.0)
plt.ylabel('Counts', ha='right', y=1.0)
plt.tight_layout()
#plt.semilogy()
plt.legend(lines, labels, frameon=False, loc='upper right', fontsize='small')
plt.show()
def peak_1460():
if(len(sys.argv) != 2):
print('Usage: fit_bkg_peaks.py [run number]')
sys.exit()
with open("runDB.json") as f:
runDB = json.load(f)
meta_dir = os.path.expandvars(runDB["meta_dir"])
tier_dir = os.path.expandvars(runDB["tier_dir"])
#df = pd.read_hdf("{}/Spectrum_280-329.hdf5".format(meta_dir), key="df")
df = pd.read_hdf("{}/Spectrum_{}.hdf5".format(meta_dir,sys.argv[1]), key="df")
#df = pd.read_hdf("{}/t2_run{}.h5".format(tier_dir,sys.argv[1]))
#df['e_cal'] = 0.4054761904761905 * df['e_ftp'] + 3.113095238095184
def gauss(x, mu, sigma, A=1):
"""
define a gaussian distribution, w/ args: mu, sigma, area (optional).
"""
return A * (1. / sigma / np.sqrt(2 * np.pi)) * np.exp(-(x - mu)**2 / (2. * sigma**2))
def radford_peak(x, mu, sigma, hstep, htail, tau, bg0, a=1):
"""
<NAME>'s HPGe peak shape function
"""
# make sure the fractional amplitude parameters stay reasonable...
if htail < 0 or htail > 1:
return np.zeros_like(x)
if hstep < 0 or hstep > 1:
return np.zeros_like(x)
bg_term = bg0 #+ x*bg1
if np.any(bg_term < 0):
return np.zeros_like(x)
# compute the step and the low energy tail
step = a * hstep * erfc((x - mu) / (sigma * np.sqrt(2)))
le_tail = a * htail
le_tail *= erfc((x - mu) / (sigma * np.sqrt(2)) + sigma / (tau * np.sqrt(2)))
le_tail *= np.exp((x - mu) / tau)
le_tail /= (2 * tau * np.exp(-(sigma / (np.sqrt(2) * tau))**2))
# add up all the peak shape components
return (1 - htail) * gauss(x, mu, sigma, a) + bg_term + step + le_tail
hist, bins, var = pgh.get_hist(df['e_cal'], range=(1420,1500), dx=0.5)
pgh.plot_hist(hist, bins, var=hist, label="data")
pars, cov = pga.fit_hist(radford_peak, hist, bins, var=hist, guess=[1460.8, 1.95, 0.001, 0.03, 4, 1, 100000])
pgu.print_fit_results(pars, cov, radford_peak)
pgu.plot_func(radford_peak, pars, label="chi2 fit", color='red')
#x_vals = np.arange(1420,1500,0.5)
#plt.plot(x_vals, radford_peak(x_vals, 1460.8, 2.95, .001, 0.03, 5, 1, 100000))
FWHM = '%.2f' % Decimal(pars[1]*2)
FWHM_uncertainty = '%.2f' % Decimal(np.sqrt(cov[1][1])*2)
peak = '%.2f' % Decimal(pars[0])
peak_uncertainty = '%.2f' % Decimal(np.sqrt(cov[0][0]))
residual = '%.2f' % (1460.82 - float(peak))
#chi_2_element_list = []
#for i in range(len(hist)):
#chi_2_element = abs((radford_peak(bins[i], *pars) - hist[i])**2/radford_peak(bins[i], *pars))
#chi_2_element_list.append(chi_2_element)
#chi_2 = sum(chi_2_element_list)
#reduced_chi_2 = '%.2f' % Decimal(chi_2/len(hist))
label_01 = '1460.82 keV peak fit'
label_02 = 'FWHM = '+str(FWHM)+r' $\pm$ '+str(FWHM_uncertainty)
label_03 = 'Peak = '+str(peak)+r' $\pm$ '+str(peak_uncertainty)
label_04 = 'Residual = '+str(residual)+r' $\pm$ '+str(peak_uncertainty)
colors = ['red', 'red','red', 'red']
lines = [Line2D([0], [0], color=c, lw=2) for c in colors]
labels = [label_01, label_02, label_03, label_04]
plt.xlim(1420,1500)
plt.ylim(0,plt.ylim()[1])
plt.xlabel('Energy (keV)', ha='right', x=1.0)
plt.ylabel('Counts', ha='right', y=1.0)
plt.tight_layout()
plt.legend(lines, labels, frameon=False, loc='upper right', fontsize='small')
#plt.semilogy()
plt.show()
def peak_609():
if(len(sys.argv) != 2):
print('Usage: fit_bkg_peaks.py [run number]')
sys.exit()
with open("runDB.json") as f:
runDB = json.load(f)
meta_dir = os.path.expandvars(runDB["meta_dir"])
#df = pd.read_hdf("{}/Spectrum_280-329.hdf5".format(meta_dir), key="df")
df = pd.read_hdf("{}/Spectrum_{}.hdf5".format(meta_dir,sys.argv[1]), key="df")
def gauss(x, mu, sigma, A=1):
"""
define a gaussian distribution, w/ args: mu, sigma, area (optional).
"""
return A * (1. / sigma / np.sqrt(2 * np.pi)) * np.exp(-(x - mu)**2 / (2. * sigma**2))
def radford_peak(x, mu, sigma, hstep, htail, tau, bg0, a=1):
"""
<NAME>'s HPGe peak shape function
"""
# make sure the fractional amplitude | |
condition(self):
""" Return a non-numerical health status """
status = "unknown"
if self.getHitPoints() <= 0:
status = "dead"
elif self.getHitPoints() < self.getMaxHP() * 0.10:
# Less than 10% of health remains
status = "desperate"
elif self.getHitPoints() < self.getMaxHP() * 0.25:
# 11-25% of health remains
status = "injured"
elif self.getHitPoints() < self.getMaxHP() * 0.50:
# 26-50% of health remains
status = "drained"
elif self.getHitPoints() < self.getMaxHP() * 0.75:
# 51-75% of health remains
status = "fatigued"
elif self.getHitPoints() < self.getMaxHP() * 0.99:
# 76-99% of health remains
status = "healthy"
elif self.getHitPoints() == self.getMaxHP():
# totally healthy
status = "fresh"
return status
def dodge(self, basePercent=100, dodgeTxt=""):
""" Return true if dodged
* If basePercent is increased, chance of dodging goes down.
* chances improved by dex, class, dodge skill, and dodgeBonus """
result = "dodge failed"
randX = random.randint(1, 100)
classMult = 2 if self.getClassName().lower() == "rogue" else 1
skillMult = self._dodge + self._dodgeBonus
dodgeAdv = self.getDexterity() * (classMult + skillMult) / 10
dodgeCalc = (randX + dodgeAdv) * 2
if dodgeCalc > basePercent:
result = "dodged"
if dodgeTxt != "":
dodgeTxt += " "
dLog(
"{0}{1} - character dodge calc ({2}) >? {0}odds ({3})".format(
dodgeTxt, result, dodgeCalc, basePercent
),
self._instanceDebug,
)
if result == "dodged":
return True
return False
def acDamageReduction(self, damage):
""" reduce damage based on AC """
ac = self.getAc()
# reduce AC if protection is broken
for obj in self.getEquippedProtection():
if obj.isBroken():
ac -= obj.getAc()
# reduce damage based on percentage:
acReduction = int(damage * (0.05 * ac))
damage -= acReduction
return max(0, damage)
def getCircleSecs(self):
""" Returns the number seconds a creature will wait given a successful
circle - based on character level/stats"""
secsToWait = random.randint(self.getLevel(), 20 + self.getDexterity())
return secsToWait
def damageIsLethal(self, num=0):
if num >= self.getHitPoints():
return True
return False
def takeDamage(self, damage=0, nokill=False):
""" Take damage and check for death """
self.subtractHP(damage)
if nokill and self.getHitPoints() <= 0:
self.setNearDeathExperience()
condition = self.condition()
dLog(self.getName() + " takes " + str(damage) + " damage", self._instanceDebug)
self.save()
if self.getHitPoints() <= 0:
if self.isDm():
self._spoolOut(
"You would be dead if you weren't a dm."
+ " Resetting hp to maxhp.\n"
)
self.setHitPoints(self._maxhp)
else:
self.processDeath()
return condition
def obituary(self):
""" Notify/record death """
deathMsg = self.describe() + " has died"
self.client.getGameObj().gameMsg(self.client.txtBanner(deathMsg) + "\n")
logger.info("obituary: " + deathMsg)
def processDeath(self, calculateLevelsToLose=True, silent=False):
""" Do all the things related to dying """
levelsToLose = 1
if calculateLevelsToLose:
levelsToLose = self.levelsToLose()
for numlvl in range(1, levelsToLose + 1):
self.levelDownStats()
if self.getLevel() > 1:
self.subtractlevel()
self.setHitPoints(self.getMaxHP())
self.setPoisoned(False)
self.setPlagued(False)
self.save()
if not silent: # primarily used for testing hundreds of deaths
self._spoolOut("You are dead!\n")
self.obituary()
# return to starting room or guild
self.client.gameObj.joinRoom(58, self)
self._spoolOut(self.getRoom().display(self))
return True
def searchSucceeds(self, obj, basePercent=30):
""" Returns True if search succeeds
* chance of success based on dex, level, and luck """
logPrefix = __class__.__name__ + " searchSucceeds: "
if self.canSeeHidden():
dLog(logPrefix + "Pass - Character can see hidden", self._instanceDebug)
return True
percentChance = (
basePercent + self.getDexterity() + self.getLevel() + self.getLuck()
)
if obj.getType() == "Creature" or obj.getType() == "Character":
# +/- 10% per level difference
percentChance += (self.getLevel() - obj.getLevel()) * 10
if random.randint(1, 20) == 1: # Always a 5 percent chance of success
dLog(logPrefix + "Pass - Always 5% Chance", self._instanceDebug)
return True
randX = random.randint(1, 100)
if randX <= percentChance:
dLog(
logPrefix + "Pass - Roll - " + str(randX) + " < " + str(percentChance),
self._instanceDebug,
)
return True
dLog(logPrefix + "Failed", self._instanceDebug)
return False
def equipFist(self):
""" equip fist, the default weapon - fist is a special weapon that is
not in any inventory """
obj = Weapon()
obj.setName("fist")
obj._article = "a"
obj._singledesc = "fist"
obj.setMaximumDamage(self.getFistDamage())
self.equip(obj)
def getFistDamage(self):
""" calculate damage for the fist, the default weapon """
damage = int((self.getStrength() / 5) + (self.getLevel() / 2))
damage += self.classDict[self.getClassKey()]["baseDamage"]
damage -= random.randint(0, 3)
return max(0, damage)
def equip(self, obj):
# Deal with currently equipped item
equippedObj = getattr(self, obj.getEquippedSlotName())
if equippedObj is None: # Nothing is currently equipped
pass
elif equippedObj == obj: # desired object is already in use
return True
elif obj is not None: # wearing some other item
self.unEquip(obj) # Pass object so we know which slot to vacate
slotName = obj.getEquippedSlotName()
if slotName:
setattr(self, slotName, obj)
self.setAc()
return True
return False
def unEquip(self, obj=None, slotName=""):
if obj and slotName == "":
# Use the current object to determine slot name
if obj.isEquippable():
slotName = obj.getEquippedSlotName()
if slotName == "":
return False
setattr(self, slotName, None)
self.setAc()
if self.getEquippedWeapon() is None:
self.equipFist()
return True
def attemptToHide(self):
randX = random.randint(0, 99)
hidechance = self.getLevel() * 20 + self.dexterity
if self.getClassName().lower() == "rogue":
hidechance *= 2 # double the chance of success for rogues
# consider additional bonus for guild status
# half the chance of success if there are already creatures in the room
if len(self._roomObj.getCreatureList()) > 0:
hidechance /= 2
hidechance = max(66, hidechance) # Chance to hide tops out at 66%
if hidechance > randX:
self.setHidden()
return True
return False
def hearsWhispers(self):
""" calculate whether a character can hear whispers in a room
todo: make this more random and skill/sluck based """
if self.getClassName().lower() == "ranger":
return True
return False
def adjustPrice(self, price):
""" Adjust the price of goods depending on character attributes
* non-character price changes occur elsewhere """
# consider adjustments for charisma, alignment, luck
return price
def setMaxWeightForCharacter(self):
""" Maxweight varies depending on attributes """
weight = 10 * max(7, int(self.strength))
self.setInventoryMaxWeight(weight)
def fumbles(self, basePercent=20):
""" Return true if player fumbles.
* Fumble is a trip while attacking which causes player to unequip
weapon and shield and wait 30 seconds before attacking again
* random chance, partially based on dex.
* if fumble, player's weapon is unequipped
"""
logPrefix = "char.fumbles: "
fumbles = False
if self.isAttackingWithFist():
return False
fumbleRoll = random.randint(1, 100)
percentage = basePercent - self.getDexterity()
if fumbleRoll == 1: # always a 1% change of fumbling
dLog(logPrefix + "Bad luck - 1% fumble triggered", self._instanceDebug)
fumbles = True
elif fumbleRoll < percentage:
dLog(
logPrefix
+ "Standard Roll: "
+ str(fumbleRoll)
+ " < "
+ str(percentage),
self._instanceDebug,
)
fumbles = True
if fumbles:
self.unEquip(slotName="_equippedWeapon")
self.unEquip(slotName="_equippedShield")
self.setSecondsUntilNextAttack(30)
return fumbles
def discardsEquippedWeapon(self):
""" drop currently equipped weapon """
if self.isAttackingWithFist():
return True
weaponObj = self.getEquippedWeapon()
self.unEquip(slotName="_equippedWeapon")
self.removeFromInventory(weaponObj)
roomObj = self.getRoom()
if roomObj:
roomObj.addToInventory(weaponObj)
return True
def possibilyLoseHiddenWhenMoving(self):
""" set hidden to false if you fail the roll.
* when moving, there is a chance that you will not remain hidden
* base chance of remaining hidden is 50% + dex
* rangers and theives get improved chance = dex
a ranger/thief with 20 dex has 99% chance of staying hidden """
if not self.isHidden:
return False
oddsOfStayingHidden = 60 + self.getDexterity()
if self.getClassName() in ["rogue", "ranger"]:
oddsOfStayingHidden += self.getDexterity()
if random.randint(1, 100) >= oddsOfStayingHidden:
self.setHidden(False)
return True
def processPoisonAndRegen(self, regenInterval=90, poisonInterval=60):
""" At certain intervals, poison and hp regeneration kick in
* poison should be faster and/or stronger than regen """
conAdj = self.getConstitution() - 12
intAdj = self.getIntelligence() - 12
regenHp = max(1, int(self.getMaxHP() / 10) + conAdj)
regenMana = max(1, int(self.getMaxMana() / 8) + intAdj)
poisonHp = max(1, int(self.getLevel() - conAdj))
if not self.isPlagued(): # no regen if plagued
# Check the time
if self.getLastRegenDate() == getNeverDate():
regenSecsRemaining = 0
else:
regenSecsRemaining = regenInterval - secsSinceDate(
self.getLastRegenDate()
)
dLog(
"regen counter: "
+ str(regenSecsRemaining)
+ " secs - "
+ str(self.getLastRegenDate())
+ " - "
+ str(secsSinceDate(self.getLastRegenDate())),
False,
)
if regenSecsRemaining <= 0:
self.addHP(regenHp)
self.addMana(regenMana)
self.setLastRegen()
if self.isPoisoned(): # take damage if poisoned
# Check the time
if self.getLastPoisonDate() == getNeverDate():
poisonSecsRemaining = 0
else:
poisonSecsRemaining = poisonInterval - secsSinceDate(
self.getLastPoisonDate()
)
dLog("poison cntr: " + str(regenSecsRemaining) + " secs", False)
if poisonSecsRemaining <= 0:
self._spoolOut(
"As | |
:class:`int`
:param invite_link: If user has joined the chat using an invite link, the invite link; may be null, defaults to None
:type invite_link: :class:`ChatInviteLink`, optional
:param old_chat_member: Previous chat member
:type old_chat_member: :class:`ChatMember`
:param new_chat_member: New chat member
:type new_chat_member: :class:`ChatMember`
"""
ID: str = Field("updateChatMember", alias="@type")
chat_id: int
actor_user_id: int
date: int
invite_link: typing.Optional[ChatInviteLink] = None
old_chat_member: ChatMember
new_chat_member: ChatMember
@staticmethod
def read(q: dict) -> UpdateChatMember:
return UpdateChatMember.construct(**q)
class UpdateChatMessageSender(Update):
"""
The message sender that is selected to send messages in a chat has changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param message_sender_id: New value of message_sender_id; may be null if the user can't change message sender, defaults to None
:type message_sender_id: :class:`MessageSender`, optional
"""
ID: str = Field("updateChatMessageSender", alias="@type")
chat_id: int
message_sender_id: typing.Optional[MessageSender] = None
@staticmethod
def read(q: dict) -> UpdateChatMessageSender:
return UpdateChatMessageSender.construct(**q)
class UpdateChatMessageTtl(Update):
"""
The message Time To Live setting for a chat was changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param message_ttl: New value of message_ttl
:type message_ttl: :class:`int`
"""
ID: str = Field("updateChatMessageTtl", alias="@type")
chat_id: int
message_ttl: int
@staticmethod
def read(q: dict) -> UpdateChatMessageTtl:
return UpdateChatMessageTtl.construct(**q)
class UpdateChatNotificationSettings(Update):
"""
Notification settings for a chat were changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param notification_settings: The new notification settings
:type notification_settings: :class:`ChatNotificationSettings`
"""
ID: str = Field("updateChatNotificationSettings", alias="@type")
chat_id: int
notification_settings: ChatNotificationSettings
@staticmethod
def read(q: dict) -> UpdateChatNotificationSettings:
return UpdateChatNotificationSettings.construct(**q)
class UpdateChatOnlineMemberCount(Update):
"""
The number of online group members has changed. This update with non-zero count is sent only for currently opened chats. There is no guarantee that it will be sent just after the count has changed
:param chat_id: Identifier of the chat
:type chat_id: :class:`int`
:param online_member_count: New number of online members in the chat, or 0 if unknown
:type online_member_count: :class:`int`
"""
ID: str = Field("updateChatOnlineMemberCount", alias="@type")
chat_id: int
online_member_count: int
@staticmethod
def read(q: dict) -> UpdateChatOnlineMemberCount:
return UpdateChatOnlineMemberCount.construct(**q)
class UpdateChatPendingJoinRequests(Update):
"""
The chat pending join requests were changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param pending_join_requests: The new data about pending join requests; may be null, defaults to None
:type pending_join_requests: :class:`ChatJoinRequestsInfo`, optional
"""
ID: str = Field("updateChatPendingJoinRequests", alias="@type")
chat_id: int
pending_join_requests: typing.Optional[ChatJoinRequestsInfo] = None
@staticmethod
def read(q: dict) -> UpdateChatPendingJoinRequests:
return UpdateChatPendingJoinRequests.construct(**q)
class UpdateChatPermissions(Update):
"""
Chat permissions was changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param permissions: The new chat permissions
:type permissions: :class:`ChatPermissions`
"""
ID: str = Field("updateChatPermissions", alias="@type")
chat_id: int
permissions: ChatPermissions
@staticmethod
def read(q: dict) -> UpdateChatPermissions:
return UpdateChatPermissions.construct(**q)
class UpdateChatPhoto(Update):
"""
A chat photo was changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param photo: The new chat photo; may be null, defaults to None
:type photo: :class:`ChatPhotoInfo`, optional
"""
ID: str = Field("updateChatPhoto", alias="@type")
chat_id: int
photo: typing.Optional[ChatPhotoInfo] = None
@staticmethod
def read(q: dict) -> UpdateChatPhoto:
return UpdateChatPhoto.construct(**q)
class UpdateChatPosition(Update):
"""
The position of a chat in a chat list has changed. Instead of this update updateChatLastMessage or updateChatDraftMessage might be sent
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param position: New chat position. If new order is 0, then the chat needs to be removed from the list
:type position: :class:`ChatPosition`
"""
ID: str = Field("updateChatPosition", alias="@type")
chat_id: int
position: ChatPosition
@staticmethod
def read(q: dict) -> UpdateChatPosition:
return UpdateChatPosition.construct(**q)
class UpdateChatReadInbox(Update):
"""
Incoming messages were read or the number of unread messages has been changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param last_read_inbox_message_id: Identifier of the last read incoming message
:type last_read_inbox_message_id: :class:`int`
:param unread_count: The number of unread messages left in the chat
:type unread_count: :class:`int`
"""
ID: str = Field("updateChatReadInbox", alias="@type")
chat_id: int
last_read_inbox_message_id: int
unread_count: int
@staticmethod
def read(q: dict) -> UpdateChatReadInbox:
return UpdateChatReadInbox.construct(**q)
class UpdateChatReadOutbox(Update):
"""
Outgoing messages were read
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param last_read_outbox_message_id: Identifier of last read outgoing message
:type last_read_outbox_message_id: :class:`int`
"""
ID: str = Field("updateChatReadOutbox", alias="@type")
chat_id: int
last_read_outbox_message_id: int
@staticmethod
def read(q: dict) -> UpdateChatReadOutbox:
return UpdateChatReadOutbox.construct(**q)
class UpdateChatReplyMarkup(Update):
"""
The default chat reply markup was changed. Can occur because new messages with reply markup were received or because an old reply markup was hidden by the user
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param reply_markup_message_id: Identifier of the message from which reply markup needs to be used; 0 if there is no default custom reply markup in the chat
:type reply_markup_message_id: :class:`int`
"""
ID: str = Field("updateChatReplyMarkup", alias="@type")
chat_id: int
reply_markup_message_id: int
@staticmethod
def read(q: dict) -> UpdateChatReplyMarkup:
return UpdateChatReplyMarkup.construct(**q)
class UpdateChatTheme(Update):
"""
The chat theme was changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param theme_name: The new name of the chat theme; may be empty if theme was reset to default
:type theme_name: :class:`str`
"""
ID: str = Field("updateChatTheme", alias="@type")
chat_id: int
theme_name: str
@staticmethod
def read(q: dict) -> UpdateChatTheme:
return UpdateChatTheme.construct(**q)
class UpdateChatThemes(Update):
"""
The list of available chat themes has changed
:param chat_themes: The new list of chat themes
:type chat_themes: :class:`list[ChatTheme]`
"""
ID: str = Field("updateChatThemes", alias="@type")
chat_themes: list[ChatTheme]
@staticmethod
def read(q: dict) -> UpdateChatThemes:
return UpdateChatThemes.construct(**q)
class UpdateChatTitle(Update):
"""
The title of a chat was changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param title: The new chat title
:type title: :class:`str`
"""
ID: str = Field("updateChatTitle", alias="@type")
chat_id: int
title: str
@staticmethod
def read(q: dict) -> UpdateChatTitle:
return UpdateChatTitle.construct(**q)
class UpdateChatUnreadMentionCount(Update):
"""
The chat unread_mention_count has changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param unread_mention_count: The number of unread mention messages left in the chat
:type unread_mention_count: :class:`int`
"""
ID: str = Field("updateChatUnreadMentionCount", alias="@type")
chat_id: int
unread_mention_count: int
@staticmethod
def read(q: dict) -> UpdateChatUnreadMentionCount:
return UpdateChatUnreadMentionCount.construct(**q)
class UpdateChatVideoChat(Update):
"""
A chat video chat state has changed
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param video_chat: New value of video_chat
:type video_chat: :class:`VideoChat`
"""
ID: str = Field("updateChatVideoChat", alias="@type")
chat_id: int
video_chat: VideoChat
@staticmethod
def read(q: dict) -> UpdateChatVideoChat:
return UpdateChatVideoChat.construct(**q)
class UpdateConnectionState(Update):
"""
The connection state has changed. This update must be used only to show a human-readable description of the connection state
:param state: The new connection state
:type state: :class:`ConnectionState`
"""
ID: str = Field("updateConnectionState", alias="@type")
state: ConnectionState
@staticmethod
def read(q: dict) -> UpdateConnectionState:
return UpdateConnectionState.construct(**q)
class UpdateDeleteMessages(Update):
"""
Some messages were deleted
:param chat_id: Chat identifier
:type chat_id: :class:`int`
:param message_ids: Identifiers of the deleted messages
:type message_ids: :class:`list[int]`
:param is_permanent: True, if the messages are permanently deleted by a user (as opposed to just becoming inaccessible)
:type is_permanent: :class:`bool`
:param from_cache: True, if the messages are deleted only from the cache and can possibly be retrieved again in the future
:type from_cache: :class:`bool`
"""
ID: str = Field("updateDeleteMessages", alias="@type")
chat_id: int
message_ids: list[int]
is_permanent: bool
from_cache: bool
@staticmethod
def read(q: dict) -> UpdateDeleteMessages:
return UpdateDeleteMessages.construct(**q)
class UpdateDiceEmojis(Update):
"""
The list of supported dice emojis has changed
:param emojis: The new list of supported dice emojis
:type emojis: :class:`list[str]`
"""
ID: str = Field("updateDiceEmojis", alias="@type")
emojis: list[str]
@staticmethod
def read(q: dict) -> UpdateDiceEmojis:
return UpdateDiceEmojis.construct(**q)
class UpdateFavoriteStickers(Update):
"""
The list of favorite stickers was updated
:param sticker_ids: The new list of file identifiers of favorite stickers
:type sticker_ids: :class:`list[int]`
"""
ID: str = Field("updateFavoriteStickers", alias="@type")
sticker_ids: list[int]
@staticmethod
def read(q: dict) -> UpdateFavoriteStickers:
return UpdateFavoriteStickers.construct(**q)
class UpdateFile(Update):
"""
Information about a file was updated
:param file: New data about the file
:type file: :class:`File`
"""
ID: str = Field("updateFile", alias="@type")
file: File
@staticmethod
def read(q: dict) -> UpdateFile:
return UpdateFile.construct(**q)
class UpdateFileGenerationStart(Update):
"""
The file generation process needs to be started by the application
:param generation_id: Unique identifier for the generation process
:type generation_id: :class:`int`
:param | |
nav_only["NAV_fx"][0]) * 100
nav_only["NAV_ret"] = nav_only["NAV_norm"].pct_change()
table = {}
table["meta"] = {}
table["meta"]["start_date"] = (nav_only.index[0]).strftime("%m-%d-%Y")
table["meta"]["end_date"] = nav_only.index[-1].strftime("%m-%d-%Y")
table["meta"]["number_of_days"] = (
(nav_only.index[-1] - nav_only.index[0])).days
table["meta"]["count_of_points"] = nav_only["NAV_fx"].count().astype(float)
table["NAV"] = {}
table["NAV"]["start"] = nav_only["NAV_fx"][0]
table["NAV"]["end"] = nav_only["NAV_fx"][-1]
table["NAV"]["return"] = (nav_only["NAV_fx"][-1] / nav_only["NAV_fx"][0]) - 1
table["NAV"]["avg_return"] = nav_only["NAV_ret"].mean()
table["NAV"]["ann_std_dev"] = nav_only["NAV_ret"].std() * math.sqrt(365)
# Include avg return + and -
for ticker in tickers:
if messages[ticker] == "ok":
# Include new columns for return and normalized data
nav_only[ticker + "_norm"] = (
nav_only[ticker + "_price"] / nav_only[ticker + "_price"][0]
) * 100
nav_only[ticker + "_ret"] = nav_only[ticker + "_norm"].pct_change()
# Create Metadata
table[ticker] = {}
table[ticker]["start"] = nav_only[ticker + "_price"][0]
table[ticker]["end"] = nav_only[ticker + "_price"][-1]
table[ticker]["return"] = (
nav_only[ticker + "_price"][-1] /
nav_only[ticker + "_price"][0]
) - 1
table[ticker]["comp2nav"] = table[ticker]["return"] - \
table["NAV"]["return"]
table[ticker]["avg_return"] = nav_only[ticker + "_ret"].mean()
table[ticker]["ann_std_dev"] = nav_only[ticker + "_ret"].std() * math.sqrt(
365
)
logging.info("[scatter_json] Success")
# Create Correlation Matrix
filter_col = [col for col in nav_only if col.endswith("_ret")]
nav_matrix = nav_only[filter_col]
corr_matrix = nav_matrix.corr(method="pearson").round(2)
corr_html = corr_matrix.to_html(
classes="table small text-center", border=0, justify="center"
)
# Create series data for HighCharts in scatter plot format
# series : [{
# name: 'NAV / BTC',
# color: '[blue]',
# data: [[-0.01,-0.02], [0.02, 0.04]]
# },{
# name: .....}]
series_hc = []
# Append NAV ticker to list of tickers, remove duplicates
tickers.append("NAV")
tickers = list(set(tickers))
for ticker in tickers:
tmp_dict = {}
if ticker == market:
continue
tmp_dict["name"] = "x: " + market + ", y: " + ticker
tmp_dict["regression"] = 1
tmp_df = nav_matrix[[market + "_ret", ticker + "_ret"]]
tmp_df.fillna(0, inplace=True)
tmp_dict["data"] = list(
zip(tmp_df[market + "_ret"], tmp_df[ticker + "_ret"]))
series_hc.append(tmp_dict)
# Now, let's return the data in the correct format as requested
return jsonify(
{
"chart_data": series_hc,
"messages": messages,
"meta_data": meta_data,
"table": table,
"corr_html": corr_html,
}
)
@api.route("/transactionsandcost_json", methods=["GET"])
@login_required
# Return daily data on transactions and cost for a single ticker
# Takes arguments:
# ticker - single ticker for filter
# start - start date in the format YYMMDD (defaults to 1st transaction on ticker)
# end - end date in the format YYMMDD (defaults to today)
def transactionsandcost_json():
# Get arguments and assign values if needed
if request.method == "GET":
start_date = request.args.get("start")
ticker = request.args.get("ticker")
# Check if start and end dates exist, if not assign values
try:
start_date = datetime.strptime(start_date, "%Y-%m-%d")
except (ValueError, TypeError) as e:
logging.info(
f"[transactionsandcost_json] Warning: {e}, " +
"setting start_date to zero"
)
start_date = datetime(2000, 1, 1)
end_date = request.args.get("end")
try:
end_date = datetime.strptime(end_date, "%Y-%m-%d")
except (ValueError, TypeError) as e:
logging.info(
f"[transactionsandcost_json] Warning: {e}, " +
"setting end_date to now"
)
end_date = datetime.now()
# Get Transaction List
df = transactions_fx()
# Filter only to requested ticker
# if no ticker, use BTC as default, if not BTC then the 1st in list
tickers = df.trade_asset_ticker.unique().tolist()
try:
tickers.remove(current_user.fx())
except ValueError:
pass
if not ticker:
if "BTC" in tickers:
ticker = "BTC"
else:
ticker = tickers[0]
# Filter only the trades for current user
df = df[(df.trade_asset_ticker == ticker)]
# Filter only buy and sells, ignore deposit / withdraw
# For now, including Deposits and Withdrawns as well but
# may consider only B and S as line below.
df = df[(df.trade_operation == "B") | (df.trade_operation == "S")]
df.drop("user_id", axis=1, inplace=True)
# Create a cash_flow column - so we can calculate
# average price for days with multiple buys and/or sells
df["cash_flow"] = df["trade_quantity"] * \
df["trade_price_fx"] + df["trade_fees_fx"]
# Consolidate all transactions from a single day by grouping
df = df.groupby(["date"])[["cash_value", "trade_fees",
"trade_quantity", "cash_value_fx"]].agg([
"sum", "count"])
# Remove the double index for column and consolidate under one row
df.columns = ["_".join(col).strip() for col in df.columns.values]
# Filter to Start and End Dates passed as arguments
mask = (df.index >= start_date) & (df.index <= end_date)
df = df.loc[mask]
# ---------------------------------------------------------
# Get price of ticker passed as argument and merge into df
message = {}
data = price_data_fx(ticker)
# If notification is an error, skip this ticker
if data is None:
messages = data.errors
return jsonify(messages)
data = data.rename(columns={'close_converted': ticker})
data = data.astype(float)
# Create a DF, fill with dates and fill with operation and prices
start_date = df.index.min()
daily_df = pd.DataFrame(columns=["date"])
daily_df["date"] = pd.date_range(start=start_date, end=end_date)
daily_df = daily_df.set_index("date")
# Fill dailyNAV with prices for each ticker
daily_df = pd.merge(daily_df, df, on="date", how="left")
daily_df.fillna(0, inplace=True)
if type(daily_df) != type(data):
data = data.to_frame()
daily_df = pd.merge(daily_df, data, on="date", how="left")
daily_df[ticker].fillna(method="ffill", inplace=True)
message = "ok"
logging.info(f"[transactionandcost_json] {ticker}: Success - Merged OK")
# ---------------------------------------------------------
# Create additional columns on df
# ---------------------------------------------------------
daily_df.loc[daily_df.trade_quantity_sum > 0, "traded"] = 1
daily_df.loc[daily_df.trade_quantity_sum <= 0, "traded"] = 0
daily_df["q_cum_sum"] = daily_df["trade_quantity_sum"].cumsum()
daily_df["cv_cum_sum"] = daily_df["cash_value_sum"].cumsum()
daily_df["cv_fx_cum_sum"] = daily_df["cash_value_fx_sum"].cumsum()
daily_df["avg_cost"] = daily_df["cv_fx_cum_sum"] / daily_df["q_cum_sum"]
daily_df["price_over_cost_usd"] = daily_df[ticker] - daily_df["avg_cost"]
daily_df["price_over_cost_perc"] = (
daily_df[ticker] / daily_df["avg_cost"]) - 1
daily_df["impact_on_cost_usd"] = daily_df["avg_cost"].diff()
daily_df["impact_on_cost_per"] = daily_df["impact_on_cost_usd"] / \
daily_df[ticker]
# Remove cost if position is too small - this avoids large numbers
# Also, remove cost calculation if positions are open (from zero)
daily_df.loc[daily_df.q_cum_sum <= 0.009, "price_over_cost_usd"] = np.NaN
daily_df.loc[daily_df.q_cum_sum <= 0.009, "avg_cost"] = np.NaN
daily_df.loc[daily_df.q_cum_sum.shift(
1) <= 0.009, "impact_on_cost_usd"] = np.NaN
daily_df.loc[daily_df.q_cum_sum <= 0.009, "impact_on_cost_usd"] = np.NaN
daily_df.loc[daily_df.q_cum_sum <= 0.009, "impact_on_cost_per"] = np.NaN
return_dict = {}
return_dict["data"] = daily_df.to_json()
return_dict["message"] = message
return_dict["fx"] = fxsymbol(current_user.fx())
logging.info(f"[transactionandcost_json] Success generating data")
return jsonify(return_dict)
@api.route("/heatmapbenchmark_json", methods=["GET"])
@login_required
# Return Monthly returns for Benchmark and Benchmark difference from NAV
# Takes arguments:
# ticker - single ticker for filter
def heatmapbenchmark_json():
# Get portfolio data first
heatmap_gen, heatmap_stats, years, cols = heatmap_generator()
# Now get the ticker information and run comparison
if request.method == "GET":
ticker = request.args.get("ticker")
# Defaults to king BTC
if not ticker:
ticker = "BTC"
# Gather the first trade date in portfolio and store
# used to match the matrixes later
# Panda dataframe with transactions
df = pd.read_sql_table("trades", db.engine)
df = df[(df.user_id == current_user.username)]
# Filter the df acccoring to filter passed as arguments
df["trade_date"] = pd.to_datetime(df["trade_date"])
start_date = df["trade_date"].min()
start_date -= timedelta(days=1) # start on t-1 of first trade
# Generate price Table now for the ticker and trim to match portfolio
data = price_data_fx(ticker)
mask = data.index >= start_date
data = data.loc[mask]
# If notification is an error, skip this ticker
if data is None:
messages = data.errors
return jsonify(messages)
data = data.rename(columns={'close_converted': ticker+'_price'})
data = data[[ticker+'_price']]
data.sort_index(ascending=True, inplace=True)
data["pchange"] = (data / data.shift(1)) - 1
# Run the mrh function to generate heapmap table
heatmap = mrh.get(data["pchange"], eoy=True)
heatmap_stats = heatmap
cols = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
"eoy",
]
cols_months = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
years = heatmap.index.tolist()
# Create summary stats for the Ticker
heatmap_stats["MAX"] = heatmap_stats[heatmap_stats[cols_months] != 0].max(
axis=1)
heatmap_stats["MIN"] = heatmap_stats[heatmap_stats[cols_months] != 0].min(
axis=1)
heatmap_stats["POSITIVES"] = heatmap_stats[heatmap_stats[cols_months] > 0].count(
axis=1
)
heatmap_stats["NEGATIVES"] = heatmap_stats[heatmap_stats[cols_months] < 0].count(
axis=1
)
heatmap_stats["POS_MEAN"] = heatmap_stats[heatmap_stats[cols_months] > 0].mean(
axis=1
)
heatmap_stats["NEG_MEAN"] = heatmap_stats[heatmap_stats[cols_months] < 0].mean(
axis=1
)
heatmap_stats["MEAN"] = heatmap_stats[heatmap_stats[cols_months] != 0].mean(
axis=1)
# Create the difference between the 2 df - Pandas is cool!
heatmap_difference = heatmap_gen - heatmap
# return (heatmap, heatmap_stats, years, cols, ticker, heatmap_diff)
return simplejson.dumps(
{
"heatmap": heatmap.to_dict(),
"heatmap_stats": heatmap_stats.to_dict(),
"cols": cols,
"years": years,
"ticker": ticker,
"heatmap_diff": heatmap_difference.to_dict(),
},
ignore_nan=True,
default=datetime.isoformat,
)
@api.route("/drawdown_json", methods=["GET"])
@login_required
# Return the largest drawdowns in a time period
# Takes arguments:
# ticker: Single ticker for filter (default = NAV)
# start_date: If none, defaults to all available
# end_date: If none, defaults to today
# n_dd: Top n drawdowns to be calculated
# chart: Boolean - return data for chart
def drawdown_json():
# Get the arguments and store
if request.method == "GET":
start_date = request.args.get("start")
ticker = request.args.get("ticker")
n_dd = request.args.get("n_dd")
chart = request.args.get("chart")
if not ticker:
ticker = "NAV"
ticker = ticker.upper()
if n_dd:
try:
n_dd = int(n_dd)
except TypeError:
n_dd = 2
if not n_dd:
n_dd = 2
| |
return not (self == other)
class EndMaintenanceResult:
"""
Attributes:
- statuses
"""
thrift_spec = (
None, # 0
(1, TType.SET, 'statuses', (TType.STRUCT,(HostStatus, HostStatus.thrift_spec)), None, ), # 1
)
def __init__(self, statuses=None,):
self.statuses = statuses
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.SET:
self.statuses = set()
(_etype231, _size228) = iprot.readSetBegin()
for _i232 in range(_size228):
_elem233 = HostStatus()
_elem233.read(iprot)
self.statuses.add(_elem233)
iprot.readSetEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('EndMaintenanceResult')
if self.statuses is not None:
oprot.writeFieldBegin('statuses', TType.SET, 1)
oprot.writeSetBegin(TType.STRUCT, len(self.statuses))
for iter234 in self.statuses:
iter234.write(oprot)
oprot.writeSetEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.statuses)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class RoleSummaryResult:
"""
Attributes:
- summaries
"""
thrift_spec = (
None, # 0
(1, TType.SET, 'summaries', (TType.STRUCT,(RoleSummary, RoleSummary.thrift_spec)), None, ), # 1
)
def __init__(self, summaries=None,):
self.summaries = summaries
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.SET:
self.summaries = set()
(_etype238, _size235) = iprot.readSetBegin()
for _i239 in range(_size235):
_elem240 = RoleSummary()
_elem240.read(iprot)
self.summaries.add(_elem240)
iprot.readSetEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('RoleSummaryResult')
if self.summaries is not None:
oprot.writeFieldBegin('summaries', TType.SET, 1)
oprot.writeSetBegin(TType.STRUCT, len(self.summaries))
for iter241 in self.summaries:
iter241.write(oprot)
oprot.writeSetEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.summaries)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class JobSummaryResult:
"""
Attributes:
- summaries
"""
thrift_spec = (
None, # 0
(1, TType.SET, 'summaries', (TType.STRUCT,(JobSummary, JobSummary.thrift_spec)), None, ), # 1
)
def __init__(self, summaries=None,):
self.summaries = summaries
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.SET:
self.summaries = set()
(_etype245, _size242) = iprot.readSetBegin()
for _i246 in range(_size242):
_elem247 = JobSummary()
_elem247.read(iprot)
self.summaries.add(_elem247)
iprot.readSetEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('JobSummaryResult')
if self.summaries is not None:
oprot.writeFieldBegin('summaries', TType.SET, 1)
oprot.writeSetBegin(TType.STRUCT, len(self.summaries))
for iter248 in self.summaries:
iter248.write(oprot)
oprot.writeSetEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.summaries)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class GetLocksResult:
"""
Attributes:
- locks
"""
thrift_spec = (
None, # 0
(1, TType.SET, 'locks', (TType.STRUCT,(Lock, Lock.thrift_spec)), None, ), # 1
)
def __init__(self, locks=None,):
self.locks = locks
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.SET:
self.locks = set()
(_etype252, _size249) = iprot.readSetBegin()
for _i253 in range(_size249):
_elem254 = Lock()
_elem254.read(iprot)
self.locks.add(_elem254)
iprot.readSetEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('GetLocksResult')
if self.locks is not None:
oprot.writeFieldBegin('locks', TType.SET, 1)
oprot.writeSetBegin(TType.STRUCT, len(self.locks))
for iter255 in self.locks:
iter255.write(oprot)
oprot.writeSetEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.locks)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ConfigSummaryResult:
"""
Attributes:
- summary
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'summary', (ConfigSummary, ConfigSummary.thrift_spec), None, ), # 1
)
def __init__(self, summary=None,):
self.summary = summary
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.summary = ConfigSummary()
self.summary.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ConfigSummaryResult')
if self.summary is not None:
oprot.writeFieldBegin('summary', TType.STRUCT, 1)
self.summary.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.summary)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class GetPendingReasonResult:
"""
Attributes:
- reasons
"""
thrift_spec = (
None, # 0
(1, TType.SET, 'reasons', (TType.STRUCT,(PendingReason, PendingReason.thrift_spec)), None, ), # 1
)
def __init__(self, reasons=None,):
self.reasons = reasons
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.SET:
self.reasons = set()
(_etype259, _size256) = iprot.readSetBegin()
for _i260 in range(_size256):
_elem261 = PendingReason()
_elem261.read(iprot)
self.reasons.add(_elem261)
iprot.readSetEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('GetPendingReasonResult')
if self.reasons is not None:
oprot.writeFieldBegin('reasons', TType.SET, 1)
oprot.writeSetBegin(TType.STRUCT, len(self.reasons))
for iter262 in self.reasons:
iter262.write(oprot)
oprot.writeSetEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.reasons)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class StartJobUpdateResult:
"""
Result of the startUpdate call.
Attributes:
- key: Unique identifier for the job update.
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'key', (JobUpdateKey, JobUpdateKey.thrift_spec), None, ), # 1
)
def __init__(self, key=None,):
self.key = key
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.key = JobUpdateKey()
self.key.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('StartJobUpdateResult')
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRUCT, 1)
self.key.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ | |
#!/usr/bin/env python3
#
# Copyright (c) <NAME> and the University of Texas MD Anderson Cancer Center
# Distributed under the terms of the 3-clause BSD License.
import contextlib
import logging
import os
import subprocess
import sys
import time
from collections import OrderedDict, defaultdict
from textwrap import dedent
import pandas as pd
import pkg_resources
from ipykernel.ipkernel import IPythonKernel
from IPython.core.display import HTML
from IPython.utils.tokenutil import line_at_cursor, token_at_cursor
from jupyter_client import manager
from sos._version import __sos_version__, __version__
from sos.eval import SoS_eval, SoS_exec, interpolate
from sos.syntax import SOS_SECTION_HEADER
from sos.utils import (format_duration, WorkflowDict, env, log_to_file,
short_repr)
from ._version import __version__ as __notebook_version__
from .completer import SoS_Completer
from .inspector import SoS_Inspector
from .step_executor import PendingTasks
from .workflow_executor import runfile, NotebookLoggingHandler
from .magics import SoS_Magics
class FlushableStringIO:
'''This is a string buffer for output, but it will only
keep the first 200 lines and the last 10 lines.
'''
def __init__(self, kernel, name, *args, **kwargs):
self.kernel = kernel
self.name = name
def write(self, content):
if content.startswith('HINT: '):
content = content.splitlines()
hint_line = content[0][6:].strip()
content = '\n'.join(content[1:])
self.kernel.send_response(self.kernel.iopub_socket, 'display_data',
{
'metadata': {},
'data': {'text/html': HTML(
f'<div class="sos_hint">{hint_line}</div>').data}
})
if content:
if self.kernel._meta['capture_result'] is not None:
self.kernel._meta['capture_result'].append(
('stream', {'name': self.name, 'text': content}))
self.kernel.send_response(self.kernel.iopub_socket, 'stream',
{'name': self.name, 'text': content})
def flush(self):
pass
__all__ = ['SoS_Kernel']
class subkernel(object):
# a class to information on subkernel
def __init__(self, name=None, kernel=None, language='', color='', options={}):
self.name = name
self.kernel = kernel
self.language = language
self.color = color
self.options = options
def __repr__(self):
return f'subkernel {self.name} with kernel {self.kernel} for language {self.language} with color {self.color}'
# translate a message to transient_display_data message
def make_transient_msg(msg_type, content, title, append=False, page='Info'):
if msg_type == 'display_data':
return {
'title': title,
'data': content.get('data', {}),
'metadata': {'append': append, 'page': page}
}
elif msg_type == 'stream':
if content['name'] == 'stdout':
return {
'title': title,
'data': {
'text/plain': content['text'],
'application/vnd.jupyter.stdout': content['text']
},
'metadata': {'append': append, 'page': page}
}
else:
return {
'title': title,
'data': {
'text/plain': content['text'],
'application/vnd.jupyter.stderr': content['text']
},
'metadata': {'append': append, 'page': page}
}
else:
raise ValueError(
f"failed to translate message {msg_type} to transient_display_data message")
class Subkernels(object):
# a collection of subkernels
def __init__(self, kernel):
self.sos_kernel = kernel
self.language_info = kernel.supported_languages
from jupyter_client.kernelspec import KernelSpecManager
km = KernelSpecManager()
specs = km.find_kernel_specs()
# get supported languages
self._kernel_list = []
lan_map = {}
for x in self.language_info.keys():
for lname, knames in kernel.supported_languages[x].supported_kernels.items():
for kname in knames:
if x != kname:
lan_map[kname] = (lname, self.get_background_color(self.language_info[x], lname),
getattr(self.language_info[x], 'options', {}))
# kernel_list has the following items
#
# 1. displayed name
# 2. kernel name
# 3. language name
# 4. color
for spec in specs.keys():
if spec == 'sos':
# the SoS kernel will be default theme color.
self._kernel_list.append(
subkernel(name='SoS', kernel='sos', options={
'variable_pattern': r'^\s*[_A-Za-z0-9\.]+\s*$',
'assignment_pattern': r'^\s*([_A-Za-z0-9\.]+)\s*=.*$'}))
elif spec in lan_map:
# e.g. ir ==> R
self._kernel_list.append(
subkernel(name=lan_map[spec][0], kernel=spec, language=lan_map[spec][0],
color=lan_map[spec][1], options=lan_map[spec][2]))
else:
# undefined language also use default theme color
self._kernel_list.append(subkernel(name=spec, kernel=spec))
def kernel_list(self):
return self._kernel_list
# now, no kernel is found, name has to be a new name and we need some definition
# if kernel is defined
def add_or_replace(self, kdef):
for idx, x in enumerate(self._kernel_list):
if x.name == kdef.name:
self._kernel_list[idx] = kdef
return self._kernel_list[idx]
else:
self._kernel_list.append(kdef)
return self._kernel_list[-1]
def get_background_color(self, plugin, lan):
# if a single color is defined, it is used for all supported
# languages
if isinstance(plugin.background_color, str):
# return the same background color for all inquiry
return plugin.background_color
else:
# return color for specified, or any color if unknown inquiry is made
return plugin.background_color.get(lan, next(iter(plugin.background_color.values())))
def find(self, name, kernel=None, language=None, color=None, notify_frontend=True):
# find from subkernel name
def update_existing(idx):
x = self._kernel_list[idx]
if (kernel is not None and kernel != x.kernel) or (language not in (None, '', 'None') and language != x.language):
raise ValueError(
f'Cannot change kernel or language of predefined subkernel {name} {x}')
if color is not None:
if color == 'default':
if self._kernel_list[idx].language:
self._kernel_list[idx].color = self.get_background_color(
self.language_info[self._kernel_list[idx].language], self._kernel_list[idx].language)
else:
self._kernel_list[idx].color = ''
else:
self._kernel_list[idx].color = color
if notify_frontend:
self.notify_frontend()
# if the language module cannot be loaded for some reason
if name in self.sos_kernel._failed_languages:
raise self.sos_kernel._failed_languages[name]
# find from language name (subkernel name, which is usually language name)
for idx, x in enumerate(self._kernel_list):
if x.name == name:
if x.name == 'SoS' or x.language or language is None:
update_existing(idx)
return x
else:
if not kernel:
kernel = name
break
# find from kernel name
for idx, x in enumerate(self._kernel_list):
if x.kernel == name:
# if exist language or no new language defined.
if x.language or language is None:
update_existing(idx)
return x
else:
# otherwise, try to use the new language
kernel = name
break
if kernel is not None:
# in this case kernel should have been defined in kernel list
if kernel not in [x.kernel for x in self._kernel_list]:
raise ValueError(
f'Unrecognized Jupyter kernel name {kernel}. Please make sure it is properly installed and appear in the output of command "jupyter kenelspec list"')
# now this a new instance for an existing kernel
kdef = [x for x in self._kernel_list if x.kernel == kernel][0]
if not language:
if color == 'default':
if kdef.language:
color = self.get_background_color(
self.language_info[kdef.language], kdef.language)
else:
color = kdef.color
new_def = self.add_or_replace(subkernel(name, kdef.kernel, kdef.language, kdef.color if color is None else color,
getattr(self.language_info[kdef.language], 'options', {}) if kdef.language else {}))
if notify_frontend:
self.notify_frontend()
return new_def
else:
# if language is defined,
if ':' in language:
# if this is a new module, let us create an entry point and load
from pkg_resources import EntryPoint
mn, attr = language.split(':', 1)
ep = EntryPoint(name=kernel, module_name=mn,
attrs=tuple(attr.split('.')))
try:
plugin = ep.resolve()
self.language_info[name] = plugin
# for convenience, we create two entries for, e.g. R and ir
# but only if there is no existing definition
for supported_lan, supported_kernels in plugin.supported_kernels.items():
for supported_kernel in supported_kernels:
if name != supported_kernel and supported_kernel not in self.language_info:
self.language_info[supported_kernel] = plugin
if supported_lan not in self.language_info:
self.language_info[supported_lan] = plugin
except Exception as e:
raise RuntimeError(
f'Failed to load language {language}: {e}')
#
if color == 'default':
color = self.get_background_color(plugin, kernel)
new_def = self.add_or_replace(subkernel(name, kdef.kernel, kernel, kdef.color if color is None else color,
getattr(plugin, 'options', {})))
else:
# if should be defined ...
if language not in self.language_info:
raise RuntimeError(
f'Unrecognized language definition {language}, which should be a known language name or a class in the format of package.module:class')
#
self.language_info[name] = self.language_info[language]
if color == 'default':
color = self.get_background_color(
self.language_info[name], language)
new_def = self.add_or_replace(subkernel(name, kdef.kernel, language, kdef.color if color is None else color,
getattr(self.language_info[name], 'options', {})))
if notify_frontend:
self.notify_frontend()
return new_def
elif language is not None:
# kernel is not defined and we only have language
if ':' in language:
# if this is a new module, let us create an entry point and load
from pkg_resources import EntryPoint
mn, attr = language.split(':', 1)
ep = EntryPoint(name='__unknown__', module_name=mn,
attrs=tuple(attr.split('.')))
try:
plugin = ep.resolve()
self.language_info[name] = plugin
except Exception as e:
raise RuntimeError(
f'Failed to load language {language}: {e}')
if name in plugin.supported_kernels:
# if name is defined in the module, only search kernels for this language
avail_kernels = [x for x in plugin.supported_kernels[name] if
x in [y.kernel for y in self._kernel_list]]
else:
# otherwise we search all supported kernels
avail_kernels = [x for x in sum(plugin.supported_kernels.values(), []) if
x in [y.kernel for y in self._kernel_list]]
if not avail_kernels:
raise ValueError(
'Failed to find any of the kernels {} supported by language {}. Please make sure it is properly installed and appear in the output of command "jupyter kenelspec list"'.format(
', '.join(sum(plugin.supported_kernels.values(), [])), language))
# use the first available kernel
# find the language that has the kernel
lan_name = list({x: y for x, y in plugin.supported_kernels.items(
) if avail_kernels[0] in y}.keys())[0]
if color == 'default':
color = self.get_background_color(plugin, lan_name)
new_def = self.add_or_replace(subkernel(name, avail_kernels[0], lan_name, self.get_background_color(plugin, lan_name) if color is None else color,
getattr(plugin, 'options', {})))
else:
# if a language name is specified (not a path to module), if should be defined in setup.py
if language not in self.language_info:
raise RuntimeError(
f'Unrecognized language definition {language}')
#
plugin = self.language_info[language]
if language in plugin.supported_kernels:
avail_kernels = | |
<filename>venv/Lib/site-packages/dash/testing/browser.py
# pylint: disable=missing-docstring
import os
import sys
import time
import logging
import warnings
import percy
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import (
WebDriverException,
TimeoutException,
MoveTargetOutOfBoundsException,
)
from dash.testing.wait import (
text_to_equal,
style_to_equal,
contains_text,
until,
)
from dash.testing.dash_page import DashPageMixin
from dash.testing.errors import (
DashAppLoadingError,
BrowserError,
TestingTimeoutError,
)
from dash.testing.consts import SELENIUM_GRID_DEFAULT
logger = logging.getLogger(__name__)
class Browser(DashPageMixin):
def __init__(
self,
browser,
remote=False,
remote_url=None,
headless=False,
options=None,
download_path="",
percy_run=True,
percy_finalize=True,
percy_assets_root="",
wait_timeout=10,
):
self._browser = browser.lower()
self._remote_url = remote_url
self._remote = (
True if remote_url and remote_url != SELENIUM_GRID_DEFAULT else remote
)
self._headless = headless
self._options = options
self._download_path = download_path
self._wait_timeout = wait_timeout
self._percy_finalize = percy_finalize
self._percy_run = percy_run
self._driver = until(self.get_webdriver, timeout=1)
self._driver.implicitly_wait(2)
self._wd_wait = WebDriverWait(self.driver, wait_timeout)
self._last_ts = 0
self._url = None
self._window_idx = 0 # switch browser tabs
if self._percy_run:
self.percy_runner = percy.Runner(
loader=percy.ResourceLoader(
webdriver=self.driver,
base_url="/assets",
root_dir=percy_assets_root,
)
)
self.percy_runner.initialize_build()
logger.info("initialize browser with arguments")
logger.info(" headless => %s", self._headless)
logger.info(" download_path => %s", self._download_path)
logger.info(" percy asset root => %s", os.path.abspath(percy_assets_root))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, traceback):
try:
self.driver.quit()
if self._percy_run and self._percy_finalize:
logger.info("percy runner finalize build now")
self.percy_runner.finalize_build()
else:
logger.info("percy finalize relies on CI job")
except WebDriverException:
logger.exception("webdriver quit was not successful")
except percy.errors.Error:
logger.exception("percy runner failed to finalize properly")
def visit_and_snapshot(
self,
resource_path,
hook_id,
wait_for_callbacks=True,
assert_check=True,
stay_on_page=False,
):
try:
path = resource_path.lstrip("/")
if path != resource_path:
logger.warning("we stripped the left '/' in resource_path")
self.driver.get("{}/{}".format(self.server_url.rstrip("/"), path))
# wait for the hook_id to present and all callbacks get fired
self.wait_for_element_by_id(hook_id)
self.percy_snapshot(path, wait_for_callbacks=wait_for_callbacks)
if assert_check:
assert not self.driver.find_elements_by_css_selector(
"div.dash-debug-alert"
), "devtools should not raise an error alert"
if not stay_on_page:
self.driver.back()
except WebDriverException as e:
logger.exception("snapshot at resource %s error", path)
raise e
def percy_snapshot(self, name="", wait_for_callbacks=False):
"""percy_snapshot - visual test api shortcut to `percy_runner.snapshot`.
It also combines the snapshot `name` with the python version.
"""
snapshot_name = "{} - py{}.{}".format(
name, sys.version_info.major, sys.version_info.minor
)
logger.info("taking snapshot name => %s", snapshot_name)
try:
if wait_for_callbacks:
# the extra one second sleep adds safe margin in the context
# of wait_for_callbacks
time.sleep(1)
until(self._wait_for_callbacks, timeout=40, poll=0.3)
except TestingTimeoutError:
# API will log the error but this TimeoutError should not block
# the test execution to continue and it will still do a snapshot
# as diff reference for the build run.
logger.error(
"wait_for_callbacks failed => status of invalid rqs %s",
list(_ for _ in self.redux_state_rqs if not _.get("responseTime")),
)
logger.debug("full content of the rqs => %s", self.redux_state_rqs)
self.percy_runner.snapshot(name=snapshot_name)
def take_snapshot(self, name):
"""Hook method to take snapshot when a selenium test fails. The
snapshot is placed under.
- `/tmp/dash_artifacts` in linux
- `%TEMP` in windows
with a filename combining test case name and the
running selenium session id
"""
target = "/tmp/dash_artifacts" if not self._is_windows() else os.getenv("TEMP")
if not os.path.exists(target):
try:
os.mkdir(target)
except OSError:
logger.exception("cannot make artifacts")
self.driver.save_screenshot(
"{}/{}_{}.png".format(target, name, self.session_id)
)
def find_element(self, selector):
"""find_element returns the first found element by the css `selector`
shortcut to `driver.find_element_by_css_selector`."""
return self.driver.find_element_by_css_selector(selector)
def find_elements(self, selector):
"""find_elements returns a list of all elements matching the css
`selector`.
shortcut to `driver.find_elements_by_css_selector`.
"""
return self.driver.find_elements_by_css_selector(selector)
def _get_element(self, elem_or_selector):
if isinstance(elem_or_selector, str):
return self.find_element(elem_or_selector)
return elem_or_selector
def _wait_for(self, method, args, timeout, msg):
"""Abstract generic pattern for explicit WebDriverWait."""
_wait = (
self._wd_wait if timeout is None else WebDriverWait(self.driver, timeout)
)
logger.debug(
"method, timeout, poll => %s %s %s",
method,
_wait._timeout, # pylint: disable=protected-access
_wait._poll, # pylint: disable=protected-access
)
return _wait.until(method(*args), msg)
def wait_for_element(self, selector, timeout=None):
"""wait_for_element is shortcut to `wait_for_element_by_css_selector`
timeout if not set, equals to the fixture's `wait_timeout`."""
return self.wait_for_element_by_css_selector(selector, timeout)
def wait_for_element_by_css_selector(self, selector, timeout=None):
"""Explicit wait until the element is present, timeout if not set,
equals to the fixture's `wait_timeout` shortcut to `WebDriverWait` with
`EC.presence_of_element_located`."""
return self._wait_for(
EC.presence_of_element_located,
((By.CSS_SELECTOR, selector),),
timeout,
"timeout {}s => waiting for selector {}".format(
timeout if timeout else self._wait_timeout, selector
),
)
def wait_for_element_by_id(self, element_id, timeout=None):
"""Explicit wait until the element is present, timeout if not set,
equals to the fixture's `wait_timeout` shortcut to `WebDriverWait` with
`EC.presence_of_element_located`."""
return self._wait_for(
EC.presence_of_element_located,
((By.ID, element_id),),
timeout,
"timeout {}s => waiting for element id {}".format(
timeout if timeout else self._wait_timeout, element_id
),
)
def wait_for_style_to_equal(self, selector, style, val, timeout=None):
"""Explicit wait until the element's style has expected `value` timeout
if not set, equals to the fixture's `wait_timeout` shortcut to
`WebDriverWait` with customized `style_to_equal` condition."""
return self._wait_for(
method=style_to_equal,
args=(selector, style, val),
timeout=timeout,
msg="style val => {} {} not found within {}s".format(
style, val, timeout if timeout else self._wait_timeout
),
)
def wait_for_text_to_equal(self, selector, text, timeout=None):
"""Explicit wait until the element's text equals the expected `text`.
timeout if not set, equals to the fixture's `wait_timeout`
shortcut to `WebDriverWait` with customized `text_to_equal`
condition.
"""
return self._wait_for(
method=text_to_equal,
args=(selector, text),
timeout=timeout,
msg="text -> {} not found within {}s".format(
text, timeout if timeout else self._wait_timeout
),
)
def wait_for_contains_text(self, selector, text, timeout=None):
"""Explicit wait until the element's text contains the expected `text`.
timeout if not set, equals to the fixture's `wait_timeout`
shortcut to `WebDriverWait` with customized `contains_text`
condition.
"""
return self._wait_for(
method=contains_text,
args=(selector, text),
timeout=timeout,
msg="text -> {} not found inside element within {}s".format(
text, timeout if timeout else self._wait_timeout
),
)
def wait_for_page(self, url=None, timeout=10):
"""wait_for_page navigates to the url in webdriver wait until the
renderer is loaded in browser.
use the `server_url` if url is not provided.
"""
self.driver.get(self.server_url if url is None else url)
try:
self.wait_for_element_by_css_selector(
self.dash_entry_locator, timeout=timeout
)
except TimeoutException:
logger.exception("dash server is not loaded within %s seconds", timeout)
logger.debug(self.get_logs())
raise DashAppLoadingError(
"the expected Dash react entry point cannot be loaded"
" in browser\n HTML => {}\n Console Logs => {}\n".format(
self.driver.find_element_by_tag_name("body").get_property(
"innerHTML"
),
"\n".join((str(log) for log in self.get_logs())),
)
)
def select_dcc_dropdown(self, elem_or_selector, value=None, index=None):
dropdown = self._get_element(elem_or_selector)
dropdown.click()
menu = dropdown.find_element_by_css_selector("div.Select-menu-outer")
logger.debug("the available options are %s", "|".join(menu.text.split("\n")))
options = menu.find_elements_by_css_selector("div.VirtualizedSelectOption")
if options:
if isinstance(index, int):
options[index].click()
return
for option in options:
if option.text == value:
option.click()
return
logger.error(
"cannot find matching option using value=%s or index=%s", value, index,
)
def toggle_window(self):
"""Switch between the current working window and the new opened one."""
idx = (self._window_idx + 1) % 2
self.switch_window(idx=idx)
self._window_idx += 1
def switch_window(self, idx=0):
"""Switch to window by window index shortcut to
`driver.switch_to.window`."""
if len(self.driver.window_handles) <= idx:
raise BrowserError("there is no second window in Browser")
self.driver.switch_to.window(self.driver.window_handles[idx])
def open_new_tab(self, url=None):
"""Open a new tab in browser url is not set, equals to `server_url`."""
self.driver.execute_script(
'window.open("{}", "new window")'.format(
self.server_url if url is None else url
)
)
def get_webdriver(self):
try:
return getattr(self, "_get_{}".format(self._browser))()
except WebDriverException:
logger.exception("<<<Webdriver not initialized correctly>>>")
def _get_wd_options(self):
options = (
self._options[0]
if self._options and isinstance(self._options, list)
else getattr(webdriver, self._browser).options.Options()
)
if self._headless:
options.headless = True
return options
def _get_chrome(self):
options = self._get_wd_options()
capabilities = DesiredCapabilities.CHROME
capabilities["loggingPrefs"] = {"browser": "SEVERE"}
if "DASH_TEST_CHROMEPATH" in os.environ:
options.binary_location = os.environ["DASH_TEST_CHROMEPATH"]
options.add_experimental_option(
"prefs",
{
"download.default_directory": self.download_path,
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"safebrowsing.enabled": False,
"safebrowsing.disable_download_protection": True,
},
)
chrome = (
webdriver.Remote(
command_executor=self._remote_url,
options=options,
desired_capabilities=capabilities,
)
if self._remote
else webdriver.Chrome(options=options, desired_capabilities=capabilities)
)
# https://bugs.chromium.org/p/chromium/issues/detail?id=696481
if self._headless:
# pylint: disable=protected-access
chrome.command_executor._commands["send_command"] = (
"POST",
"/session/$sessionId/chromium/send_command",
)
params = {
"cmd": "Page.setDownloadBehavior",
"params": {"behavior": "allow", "downloadPath": self.download_path},
}
res = chrome.execute("send_command", params)
logger.debug("enabled headless download returns %s", res)
chrome.set_window_position(0, 0)
return chrome
def _get_firefox(self):
options = self._get_wd_options()
capabilities = DesiredCapabilities.FIREFOX
capabilities["loggingPrefs"] = {"browser": "SEVERE"}
capabilities["marionette"] = True
# https://developer.mozilla.org/en-US/docs/Download_Manager_preferences
fp = webdriver.FirefoxProfile()
fp.set_preference("browser.download.dir", self.download_path)
fp.set_preference("browser.download.folderList", 2)
fp.set_preference(
"browser.helperApps.neverAsk.saveToDisk",
"application/octet-stream", # this MIME is generic for binary
)
return (
webdriver.Remote(
command_executor=self._remote_url,
options=options,
desired_capabilities=capabilities,
)
if self._remote
else webdriver.Firefox(
firefox_profile=fp, options=options, capabilities=capabilities
)
)
@staticmethod
def _is_windows():
return sys.platform == "win32"
def multiple_click(self, elem_or_selector, clicks):
"""multiple_click click the element with number of `clicks`."""
for _ in range(clicks):
self._get_element(elem_or_selector).click()
def clear_input(self, elem_or_selector):
"""Simulate key press to clear the input."""
elem = self._get_element(elem_or_selector)
logger.debug("clear input with %s => %s", elem_or_selector, elem)
(
ActionChains(self.driver)
.move_to_element(elem)
.pause(0.2)
.click(elem)
.send_keys(Keys.END)
.key_down(Keys.SHIFT)
.send_keys(Keys.HOME)
.key_up(Keys.SHIFT)
.send_keys(Keys.DELETE)
).perform()
def zoom_in_graph_by_ratio(
self, elem_or_selector, start_fraction=0.5, zoom_box_fraction=0.2, | |
#!/usr/bin/env python
"""Web server for the NDVI Time Series Tool application.
The code in this file runs on App Engine. It's called when the user loads the
web page, requests a map or chart and if he wants to export an image.
The App Engine code does most of the communication with EE. It uses the
EE Python library and the service account specified in config.py. The
exception is that when the browser loads map tiles it talks directly with EE.
The map handler generates a unique client ID for the Channel API connection,
injects it into the index.html template, and returns the page contents.
When the user changes the options in the UI and clicks the compute button, the /mapid handler will generated
map IDs for each image band.
When the user requests a chart the /chart handler generates and returns a small chart over the Channel API.
Also a full screen version is temporary available (ids are saved with the Memcache API) where the chart can
be saved as image or table.
When the user exports a file, the /export handler then kicks off an export
runner (running asynchronously) to create the EE task and poll for the task's
completion. When the EE task completes, the file is stored for 5 hours in the service
account's Drive folder and an download link is sent to the user's browser using the Channel API.
To clear the service account's Drive folder a cron job runs every hour and deletes all files older than 5 hours.
Another export method is the /download handler that generates a download url directly from the EE.
With this method the computing is done on the fly, because of that the download is not very stable and
the file size is limited by 1024 MB.
"""
import math
import traceback
import json
import logging
import os
import random
import socket
import string
import time
import calendar
import urlparse
import re
from datetime import datetime
# ctypes PATH KeyError fix
os.environ.setdefault("PATH", '')
import httplib2
import firebase_admin
from firebase_admin import auth as firebase_auth
import ee
import jinja2
from oauth2client.service_account import ServiceAccountCredentials
import webapp2
import gviz_api
from google.appengine.api import taskqueue
from google.appengine.api import urlfetch
from google.appengine.api import memcache
from google.appengine.api import users
import config
import drive
###############################################################################
# Initialization. #
###############################################################################
# Debug flag controls the output of the stacktrace if errors occur
DEBUG = True
# The timeout for URL Fetch, Socket and Earth Engine (seconds).
# Note: Normal request are terminated after 60 seconds, background requests after 10 Minutes
URL_FETCH_TIMEOUT = 600 # 10 Minuten
# Check https://developers.google.com/drive/scopes for all available scopes.
# Compines the Drive, Earth Engine and Firebase Scopes
OAUTH_SCOPES = ["https://www.googleapis.com/auth/drive"] + ["https://www.googleapis.com/auth/earthengine","https://www.googleapis.com/auth/devstorage.full_control"] + ["https://www.googleapis.com/auth/userinfo.email","https://www.googleapis.com/auth/firebase.database"]
# Our App Engine service account's credentials for Earth Engine and Google Drive
CREDENTIALS = ServiceAccountCredentials.from_json_keyfile_name(config.SERVICE_ACC_JSON_KEYFILE, OAUTH_SCOPES)
# Initialize the EE API.
ee.Initialize(CREDENTIALS)
# Set some timeouts
ee.data.setDeadline(URL_FETCH_TIMEOUT*1000) # in milliseconds (default no limit)
socket.setdefaulttimeout(URL_FETCH_TIMEOUT)
urlfetch.set_default_fetch_deadline(URL_FETCH_TIMEOUT)
# The Jinja templating system we use to dynamically generate HTML. See:
# http://jinja.pocoo.org/docs/dev/
JINJA2_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
autoescape=True,
extensions=["jinja2.ext.autoescape"])
# An authenticated Drive helper object for the app service account.
DRIVE_HELPER = drive.DriveHelper(CREDENTIALS)
# The resolution of the exported images (meters per pixel).
EXPORT_RESOLUTION = 30
# The maximum number of pixels in an exported image.
EXPORT_MAX_PIXELS = 10e10
# The frequency to poll for export EE task completion (seconds).
TASK_POLL_FREQUENCY = 10
###############################################################################
# Web request handlers. #
###############################################################################
class DataHandler(webapp2.RequestHandler):
"""A servlet base class for responding to data queries.
We use this base class to wrap our web request handlers with try/except
blocks and set per-thread values (e.g. URL_FETCH_TIMEOUT).
"""
def get(self):
self.Handle(self.DoGet)
def post(self):
self.Handle(self.DoPost)
def DoGet(self):
"""Processes a GET request and returns a JSON-encodable result."""
raise NotImplementedError()
def DoPost(self):
"""Processes a POST request and returns a JSON-encodable result."""
raise NotImplementedError()
def Handle(self, handle_function):
"""Responds with the result of the handle_function or errors, if any."""
try:
response = handle_function()
except Exception as e:
if DEBUG:
response = {"error": str(e) + " - " + traceback.format_exc()}
else:
response = {"error": str(e)}
if response:
self.response.headers["Content-Type"] = "application/json"
self.response.out.write(json.dumps(response))
class MapHandler(DataHandler):
"""A servlet to handle requests to load the main web page."""
def DoGet(self):
"""Returns the main web page with Firebase details included."""
client_id = _GetUniqueString()
template = JINJA2_ENVIRONMENT.get_template("templates/index.html")
self.response.out.write(template.render({
# channel token expire in 24 hours
"clientId": client_id,
"firebaseToken": create_custom_token(client_id),
"firebaseConfig": "templates/%s" % config.FIREBASE_CONFIG,
"display_splash": "none"
}))
class MapIdHandler(DataHandler):
"""A servlet that generates the map IDs for each band based on the selected options"""
def DoPost(self):
"""Returns the map IDs of the requested options.
HTTP Parameters:
regression: the regression type [poly1,poly2,poly3,zhuWood]
source: the source satellite [all,land5,land7,land8]
start: the start year to filter the satellite images (including)
end: the end year to filter the satellite images (including)
cloudscore: the max cloudscore for the ee.Algorithms.Landsat.simpleCloudScore [1-100]
Higher means that the pixel is more likley to be a cloud
point: an array of two double values representing coordinates like [<longitude>,<latitude>]
region: an array of arrays representing a region [[<longitude>,<latitude>],[<longitude>,<latitude>],...]
client_id: the unique id that is used for the channel api
Returns:
A dictionary with a key called 'bands' containing an array of dictionaries
like {"name":<band name>,"mapid":<mapid>,"token":<token>}.
"""
# reads the request options
options = _ReadOptions(self.request)
# creates an image based on the options
image = _GetImage(options)
# _GetImage returns None if the collection is empty
if image is None:
return {"error": "No images in collection. Change your options."}
bands = image.bandNames().getInfo()
layers = []
for band in bands:
# create a map overlay for each band
mapid = image.select(band).visualize().getMapId()
layers.append({"name":band, "mapid": mapid["mapid"], "token": mapid["token"]})
return {"bands":layers}
class ChartHandler(DataHandler):
"""A servlet to handle chart requests"""
def DoGet(self):
"""Returns the full screen view of a chart.
HTTP Parameters:
id: the unique chart id (key value for the Memcache API).
Returns:
A html page with the full screen chart
"""
chart_id = self.request.get("id")
# load chart options from Memcache API
chart_options = memcache.get(chart_id)
if chart_options is None:
return {"error":"Chart id doesn't exist!"}
else:
# read template file
f = open("templates/full_chart.html", "r")
full_chart = f.read()
f.close()
# style chart view corresponding to the regression type
if chart_options["regression"] == "zhuWood":
chart_options["chart_style"] = "height: 40%;"
chart_options["chartArea"] = "{width: \"80%\"}"
else:
chart_options["chart_style"] = "height: 60%; max-width: 1000px;"
chart_options["chartArea"] = "{width: \"70%\"}"
# output html page
self.response.set_status(200)
self.response.headers["Content-Type"] = "text/html"
self.response.out.write(full_chart % chart_options)
return
def DoPost(self):
"""Starts an ChartRunnerHandler to asynchronously generate a chart.
HTTP Parameters:
regression: the regression type [poly1,poly2,poly3,zhuWood]
source: the source satellite [all,land5,land7,land8]
start: the start year to filter the satellite images (including)
end: the end year to filter the satellite images (including)
cloudscore: the max cloudscore for the ee.Algorithms.Landsat.simpleCloudScore [1-100]
Higher means that the pixel is more likley to be a cloud
point: an array of two double values representing coordinates like [<longitude>,<latitude>]
client_id: the unique id that is used for the channel api
"""
# read request options
options = _ReadOptions(self.request)
# Kick off an export runner to start and monitor the EE export task.
# Note: The work "task" is used by both Earth Engine and App Engine to refer
# to two different things. "TaskQueue" is an async App Engine service.
# only execute once even if task fails
taskqueue.add(url="/chartrunner", params={"options":json.dumps(options)}, retry_options=taskqueue.TaskRetryOptions(task_retry_limit=0,task_age_limit=1))
# notify client browser that the chart creation has started
_SendMessage(options["client_id"],"chart-" + options["filename"],"info","Chart creation at [%s/%s] in progress." % (options["point"][1],options["point"][0]))
class ChartRunnerHandler(webapp2.RequestHandler):
"""A servlet for handling async chart task requests."""
def post(self):
"""Generates a small chart that is displayed as alert in the clients browser
and creates the full screen version that is saved with the Memcache API.
HTTP Parameters:
regression: the regression type [poly1,poly2,poly3,zhuWood]
source: the source satellite [all,land5,land7,land8]
start: the start year to filter the satellite images (including)
end: the end year to filter the satellite images (including)
cloudscore: the max cloudscore for the ee.Algorithms.Landsat.simpleCloudScore [1-100]
Higher means that the pixel is more likley to be a cloud
point: an array of two double values representing coordinates like [<longitude>,<latitude>]
client_id: the unique id that is used for the channel api
"""
# load the options
options = json.loads(self.request.get("options"))
# create the chart
try:
chart = _GetChart(options)
except Exception as e:
if DEBUG:
_SendMessage(options["client_id"],"chart-" + options["filename"],"danger","Chart creation failed.", str(e) + " - " + traceback.format_exc())
else:
_SendMessage(options["client_id"],"chart-" + options["filename"],"danger","Chart creation failed.", str(e))
return
# _GetChart returns None if the collection is | |
# Copyright 2018 Jetperch LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from joulescope.usb import core as usb_core
from joulescope.usb.api import DeviceEvent
from joulescope.usb.impl_tools import RunUntilDone
from joulescope.usb.core import SetupPacket, ControlTransferResponse
from joulescope.usb.scan_info import INFO
from typing import List
import time
import threading
from contextlib import contextmanager
import platform
import numpy as np
import os
import sys
import struct
import ctypes
import ctypes.util
from ctypes import Structure, c_uint8, c_uint16, c_uint32, c_uint, \
c_int, c_char, c_ssize_t, c_void_p, POINTER, pointer, byref
import logging
log = logging.getLogger(__name__)
STRING_LENGTH_MAX = 255
CONTROL_TRANSFER_TIMEOUT_MS = 1000 # default in milliseconds
find_lib = ctypes.util.find_library('usb-1.0')
if find_lib is None:
if platform.system() == 'Darwin' and getattr(sys, 'frozen', False):
os_version = platform.uname().release.split('.')[0]
find_lib = os.path.join(sys._MEIPASS, '%s_libusb-1.0.0.dylib' % os_version)
log.info('Darwin lib: %s', find_lib)
else:
raise RuntimeError('Could not import libusb')
_lib = ctypes.cdll.LoadLibrary(find_lib)
class DescriptorType:
DEVICE = 0x01
CONFIG = 0x02
STRING = 0x03
INTERFACE = 0x04
ENDPOINT = 0x05
BOS = 0x0f
DEVICE_CAPABILITY = 0x10
HID = 0x21
REPORT = 0x22
PHYSICAL = 0x23
HUB = 0x29
SUPERSPEED_HUB = 0x2a
SS_ENDPOINT_COMPANION = 0x30
class TransferType:
CONTROL = 0
ISOCHRONOUS = 1
BULK = 2
INTERRUPT = 3
BULK_STREAM = 4
class TransferStatus:
COMPLETED = 0
ERROR = 1
TIMED_OUT = 2
CANCELLED = 3
STALL = 4
NO_DEVICE = 5
OVERFLOW = 6
class TransferFlags:
SHORT_NOT_OK = 1 << 0
FREE_BUFFER = 1 << 1
FREE_TRANSFER = 1 << 2
ADD_ZERO_PACKET = 1 << 3
class ReturnCodes:
SUCCESS = 0
ERROR_IO = -1
ERROR_INVALID_PARAM = -2
ERROR_ACCESS = -3
ERROR_NO_DEVICE = -4
ERROR_NOT_FOUND = -5
ERROR_BUSY = -6
ERROR_TIMEOUT = -7
ERROR_OVERFLOW = -8
ERROR_PIPE = -9
ERROR_INTERRUPTED = -10
ERROR_NO_MEM = -11
ERROR_NOT_SUPPORTED = -12
ERROR_OTHER = -99
class _libusb_device_descriptor(Structure):
_fields_ = [
('bLength', c_uint8),
('bDescriptorType', c_uint8),
('bcdUSB', c_uint16),
('bDeviceClass', c_uint8),
('bDeviceSubClass', c_uint8),
('bDeviceProtocol', c_uint8),
('bMaxPacketSize0', c_uint8),
('idVendor', c_uint16),
('idProduct', c_uint16),
('bcdDevice', c_uint16),
('iManufacturer', c_uint8),
('iProduct', c_uint8),
('iSerialNumber', c_uint8),
('bNumConfigurations', c_uint8)]
# typedef void (LIBUSB_CALL *libusb_transfer_cb_fn)(struct libusb_transfer *transfer);
libusb_transfer_cb_fn = ctypes.CFUNCTYPE(None, c_void_p)
class _libusb_transfer(Structure):
_fields_ = [
('dev_handle', c_void_p),
('flags', c_uint8),
('endpoint_id', c_uint8),
('endpoint_type', c_uint8),
('timeout_ms', c_uint),
('status', c_uint),
('length', c_int),
('actual_length', c_int),
('callback', libusb_transfer_cb_fn),
('user_data', c_void_p),
('buffer', POINTER(c_uint8)),
('num_iso_packets', c_int),
# struct libusb_iso_packet_descriptor iso_packet_desc[ZERO_SIZED_ARRAY];
]
# typedef struct libusb_context libusb_context; - c_void_p
# typedef struct libusb_device libusb_device; - c_void_p
# typedef struct libusb_device_handle libusb_device_handle; c_void_p
# int LIBUSB_CALL libusb_init(libusb_context **ctx);
_lib.libusb_init.restype = c_int
_lib.libusb_init.argtypes = [POINTER(c_void_p)]
# void LIBUSB_CALL libusb_exit(libusb_context *ctx);
_lib.libusb_exit.restype = None
_lib.libusb_exit.argtypes = [c_void_p]
# ssize_t LIBUSB_CALL libusb_get_device_list(libusb_context *ctx,
# libusb_device ***list);
_lib.libusb_get_device_list.restype = c_ssize_t
_lib.libusb_get_device_list.argtypes = [c_void_p, POINTER(POINTER(c_void_p))]
# void LIBUSB_CALL libusb_free_device_list(libusb_device **list,
# int unref_devices);
_lib.libusb_free_device_list.restype = None
_lib.libusb_free_device_list.argtypes = [POINTER(c_void_p), c_int]
# int LIBUSB_CALL libusb_open(libusb_device *dev, libusb_device_handle **dev_handle);
_lib.libusb_open.restype = c_int
_lib.libusb_open.argtypes = [c_void_p, POINTER(c_void_p)]
# void LIBUSB_CALL libusb_close(libusb_device_handle *dev_handle);
_lib.libusb_close.restype = None
_lib.libusb_close.argtypes = [c_void_p]
# int LIBUSB_CALL libusb_set_configuration(libusb_device_handle *dev_handle,
# int configuration);
_lib.libusb_set_configuration.restype = c_int
_lib.libusb_set_configuration.argtypes = [c_void_p, c_int]
# int LIBUSB_CALL libusb_claim_interface(libusb_device_handle *dev_handle,
# int interface_number);
_lib.libusb_claim_interface.restype = c_int
_lib.libusb_claim_interface.argtypes = [c_void_p, c_int]
# int LIBUSB_CALL libusb_release_interface(libusb_device_handle *dev_handle,
# int interface_number);
_lib.libusb_release_interface.restype = c_int
_lib.libusb_release_interface.argtypes = [c_void_p, c_int]
# int LIBUSB_CALL libusb_set_interface_alt_setting(libusb_device_handle *dev_handle,
# int interface_number, int alternate_setting);
_lib.libusb_set_interface_alt_setting.restype = c_int
_lib.libusb_set_interface_alt_setting.argtypes = [c_void_p, c_int, c_int]
# int LIBUSB_CALL libusb_get_device_descriptor(libusb_device *dev,
# struct libusb_device_descriptor *desc);
_lib.libusb_get_device_descriptor.restype = c_int
_lib.libusb_get_device_descriptor.argtypes = [c_void_p, POINTER(_libusb_device_descriptor)]
# int LIBUSB_CALL libusb_control_transfer(libusb_device_handle *dev_handle,
# uint8_t request_type, uint8_t bRequest, uint16_t wValue, uint16_t wIndex,
# unsigned char *data, uint16_t wLength, unsigned int timeout);
_lib.libusb_control_transfer.restype = c_int
_lib.libusb_control_transfer.argtypes = [c_void_p, c_uint8, c_uint8, c_uint16, c_uint16,
POINTER(c_uint8), c_uint16, c_int]
# struct libusb_transfer * LIBUSB_CALL libusb_alloc_transfer(int iso_packets);
_lib.libusb_alloc_transfer.restype = POINTER(_libusb_transfer)
_lib.libusb_alloc_transfer.argtypes = [c_int]
# int LIBUSB_CALL libusb_submit_transfer(struct libusb_transfer *transfer);
_lib.libusb_submit_transfer.restype = c_int
_lib.libusb_submit_transfer.argtypes = [POINTER(_libusb_transfer)]
# int LIBUSB_CALL libusb_cancel_transfer(struct libusb_transfer *transfer);
_lib.libusb_cancel_transfer.restype = c_int
_lib.libusb_cancel_transfer.argtypes = [POINTER(_libusb_transfer)]
# void LIBUSB_CALL libusb_free_transfer(struct libusb_transfer *transfer);
_lib.libusb_free_transfer.restype = None
_lib.libusb_free_transfer.argtypes = [POINTER(_libusb_transfer)]
class TimeVal(Structure):
_fields_ = [
("tv_sec", ctypes.c_long),
("tv_usec", ctypes.c_long)
]
# int LIBUSB_CALL libusb_handle_events_timeout(libusb_context *ctx,
# struct timeval *tv);
_lib.libusb_handle_events_timeout.restype = c_int
_lib.libusb_handle_events_timeout.argtypes = [c_void_p, POINTER(TimeVal)]
# int LIBUSB_CALL libusb_handle_events(libusb_context *ctx)
_lib.libusb_handle_events.restype = c_int
_lib.libusb_handle_events.argtypes = [c_void_p]
class HotplugFlag:
NONE = 0
ENUMERATE = 1 << 0
class HotplugEvent:
DEVICE_ARRIVED = 0x01
DEVICE_LEFT = 0x02
HOTPLUG_MATCH_ANY = -1
# typedef int (LIBUSB_CALL *libusb_hotplug_callback_fn)(libusb_context *ctx,
# libusb_device *device,
# libusb_hotplug_event event,
# void *user_data);
_libusb_hotplug_callback_fn = ctypes.CFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_void_p)
# int LIBUSB_CALL libusb_hotplug_register_callback(libusb_context *ctx,
# libusb_hotplug_event events,
# libusb_hotplug_flag flags,
# int vendor_id, int product_id,
# int dev_class,
# libusb_hotplug_callback_fn cb_fn,
# void *user_data,
# libusb_hotplug_callback_handle *callback_handle);
_lib.libusb_hotplug_register_callback.restype = c_int
_lib.libusb_hotplug_register_callback.argtypes = [c_void_p, c_int, c_int, c_int, c_int, c_int,
_libusb_hotplug_callback_fn, c_void_p, POINTER(c_int)]
# void LIBUSB_CALL libusb_hotplug_deregister_callback(libusb_context *ctx,
# libusb_hotplug_callback_handle callback_handle);
_lib.libusb_hotplug_deregister_callback.restype = c_int
_lib.libusb_hotplug_deregister_callback.argtypes = [c_void_p, c_int]
class Capability:
HAS_CAPABILITY = 0x0000
HAS_HOTPLUG = 0x0001
HAS_HID_ACCESS = 0x0100
SUPPORTS_DETACH_KERNEL_DRIVER = 0x0101
# int LIBUSB_CALL libusb_has_capability(uint32_t capability);
_lib.libusb_has_capability.restype = c_int
_lib.libusb_has_capability.argtypes = [c_uint32]
def _libusb_context_create():
ctx = c_void_p()
rc = _lib.libusb_init(pointer(ctx))
if rc:
raise RuntimeError('Could not open libusb')
return ctx
def _libusb_context_destroy(ctx):
_lib.libusb_exit(ctx)
@contextmanager
def _libusb_context():
ctx = _libusb_context_create()
try:
yield ctx
finally:
_libusb_context_destroy(ctx)
def _path_split(path):
vid, pid, serial_number = path.split('/')
return int(vid, 16), int(pid, 16), serial_number
def _get_string_descriptor(device, index):
request_type = usb_core.RequestType(direction='in', type_='standard', recipient='device').u8
byte_buffer = bytearray(STRING_LENGTH_MAX)
buffer_type = c_uint8 * STRING_LENGTH_MAX
buffer = buffer_type.from_buffer(byte_buffer)
# determine default language
rv = _lib.libusb_control_transfer(device, request_type, usb_core.Request.GET_DESCRIPTOR,
(DescriptorType.STRING << 8), 0,
buffer, STRING_LENGTH_MAX,
1000)
if rv < 0:
raise RuntimeError('control_transfer could not get language: %d' % (rv, ))
langid = int(byte_buffer[2]) | (int(byte_buffer[3]) << 8)
rv = _lib.libusb_control_transfer(device, request_type, usb_core.Request.GET_DESCRIPTOR,
(DescriptorType.STRING << 8) | (index & 0xff), langid,
buffer, STRING_LENGTH_MAX,
1000)
if rv < 0:
raise RuntimeError('control transfer could not get string descriptor: %d' % (rv, ))
buffer_len = min(rv, byte_buffer[0])
# byte 0 is length, byte 1 is string identifier
return byte_buffer[2:buffer_len].decode('UTF-16-LE')
_transfer_callback_discard_fn = libusb_transfer_cb_fn(lambda x: None)
"""Default null callback that is always safe."""
class Transfer:
def __init__(self, size):
try:
self.size = len(size) # also serves as list-like duck-typing test
self.buffer = np.frombuffer(size, dtype=np.uint8)
log.debug('Transfer: copy buffer %d', self.size)
except TypeError:
self.size = size
self.buffer = np.empty(self.size, dtype=np.uint8)
log.debug('Transfer: create buffer %d', self.size)
self.transfer = _lib.libusb_alloc_transfer(0) # type: _libusb_transfer
self.addr = ctypes.addressof(self.transfer.contents)
transfer = self.transfer[0]
self.buffer_ptr = self.buffer.ctypes.data_as(POINTER(c_uint8))
transfer.buffer = self.buffer_ptr
transfer.flags = 0
transfer.length = self.size
transfer.actual_length = 0
transfer.user_data = None
transfer.num_iso_packets = 0
transfer.status = TransferStatus.COMPLETED
transfer.timeout_ms = 1000 # milliseconds
transfer.callback = _transfer_callback_discard_fn
def __del__(self):
_lib.libusb_free_transfer(self.transfer)
class ControlTransferAsync:
def __init__(self, handle):
"""Manage asynchronous control transfers.
:param handle: The device handle.
"""
self._handle = handle
self._transfer_callback_fn = libusb_transfer_cb_fn(self._transfer_callback)
self._commands = [] # Pending control transfer commands as list of [cbk_fn, setup_packet, buffer]
self._transfer_pending = None # type: Transfer
self._time_start = None
self.stop_code = None
def __str__(self):
return 'ControlTransferAsync()'
def _transfer_callback(self, transfer_void_ptr):
if self._transfer_pending is None:
log.warning('Transfer callback when none pending')
return
if self._transfer_pending.addr != transfer_void_ptr:
log.warning('Transfer mismatch')
return
transfer, self._transfer_pending = self._transfer_pending, None
if self._commands:
self._finish(self._commands.pop(0), transfer)
else:
log.warning('Transfer callback when no commands')
self._issue()
def _abort_all(self):
commands, self._commands = self._commands, []
for cbk_fn, setup_packet, _ in commands:
try:
response = usb_core.ControlTransferResponse(setup_packet, TransferStatus.CANCELLED, None)
cbk_fn(response)
except Exception:
log.exception('in callback while aborting')
def close(self):
if self._handle and self._transfer_pending:
log.info('ControlTransferAsync.close cancel pending transfer, %d', len(self._commands))
transfer, self._transfer_pending = self._transfer_pending, None
transfer.transfer[0].callback = _transfer_callback_discard_fn
_lib.libusb_cancel_transfer(transfer.transfer)
# callback function will be invoked later
else:
log.info('ControlTransferAsync.close %d', len(self._commands))
self._handle = None
self._abort_all()
def pend(self, cbk_fn, setup_packet: usb_core.SetupPacket, buffer=None):
"""Pend an asynchronous Control Transfer.
:param cbk_fn: The function to call when the control transfer completes.
A :class:`usb_core.ControlTransferResponse` is the sole argument.
:param setup_packet:
:param buffer: The buffer (if length > 0) for write transactions.
:return: True if pending, False on error.
"""
if self.stop_code is not None:
response = usb_core.ControlTransferResponse(setup_packet, self.stop_code, None)
cbk_fn(response)
return False
command = [cbk_fn, setup_packet, buffer]
was_empty = not bool(self._commands)
self._commands.append(command)
if was_empty:
return self._issue()
return True
def _issue(self):
if not self._commands:
return True
if not self._handle:
log.info('_issue but handle not valid')
self._abort_all()
return False
log.debug('preparing')
_, setup_packet, buffer = self._commands[0]
hdr = struct.pack('<BBHHH', setup_packet.request_type, setup_packet.request,
setup_packet.value, setup_packet.index, setup_packet.length)
if buffer is not None:
transfer = Transfer(hdr + buffer)
else:
transfer = Transfer(len(hdr) + setup_packet.length)
transfer.buffer[:len(hdr)] = np.frombuffer(hdr, dtype=np.uint8)
t = transfer.transfer[0]
t.dev_handle = self._handle
t.endpoint_id = 0
t.endpoint_type = TransferType.CONTROL
t.callback = self._transfer_callback_fn
self._transfer_pending = transfer
self._time_start = time.time()
rv = _lib.libusb_submit_transfer(transfer.transfer)
if 0 == rv:
log.debug('libusb_submit_transfer [control]')
else:
log.warning('libusb_submit_transfer [control] => %d', rv)
if t.status == 0:
if rv == ReturnCodes.ERROR_NO_DEVICE:
log.info('control transfer but no device')
t.status = TransferStatus.NO_DEVICE
else:
t.status = TransferStatus.ERROR
if self.stop_code is None:
self.stop_code = DeviceEvent.COMMUNICATION_ERROR
| |
<reponame>kustodian/google-cloud-sdk
"""Generated client library for cloudbuild version v1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.cloudbuild.v1 import cloudbuild_v1_messages as messages
class CloudbuildV1(base_api.BaseApiClient):
"""Generated client library for service cloudbuild version v1."""
MESSAGES_MODULE = messages
BASE_URL = u'https://cloudbuild.googleapis.com/'
_PACKAGE = u'cloudbuild'
_SCOPES = [u'https://www.googleapis.com/auth/cloud-platform']
_VERSION = u'v1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_CLIENT_CLASS_NAME = u'CloudbuildV1'
_URL_VERSION = u'v1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new cloudbuild handle."""
url = url or self.BASE_URL
super(CloudbuildV1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.operations = self.OperationsService(self)
self.projects_builds = self.ProjectsBuildsService(self)
self.projects_triggers = self.ProjectsTriggersService(self)
self.projects = self.ProjectsService(self)
class OperationsService(base_api.BaseApiService):
"""Service class for the operations resource."""
_NAME = u'operations'
def __init__(self, client):
super(CloudbuildV1.OperationsService, self).__init__(client)
self._upload_configs = {
}
def Cancel(self, request, global_params=None):
r"""Starts asynchronous cancellation on a long-running operation. The server.
makes a best effort to cancel the operation, but success is not
guaranteed. If the server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`. Clients can use
Operations.GetOperation or
other methods to check whether the cancellation succeeded or whether the
operation completed despite cancellation. On successful cancellation,
the operation is not deleted; instead, it becomes an operation with
an Operation.error value with a google.rpc.Status.code of 1,
corresponding to `Code.CANCELLED`.
Args:
request: (CloudbuildOperationsCancelRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Cancel')
return self._RunMethod(
config, request, global_params=global_params)
Cancel.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/operations/{operationsId}:cancel',
http_method=u'POST',
method_id=u'cloudbuild.operations.cancel',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1/{+name}:cancel',
request_field=u'cancelOperationRequest',
request_type_name=u'CloudbuildOperationsCancelRequest',
response_type_name=u'Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets the latest state of a long-running operation. Clients can use this.
method to poll the operation result at intervals as recommended by the API
service.
Args:
request: (CloudbuildOperationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/operations/{operationsId}',
http_method=u'GET',
method_id=u'cloudbuild.operations.get',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1/{+name}',
request_field='',
request_type_name=u'CloudbuildOperationsGetRequest',
response_type_name=u'Operation',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists operations that match the specified filter in the request. If the.
server doesn't support this method, it returns `UNIMPLEMENTED`.
NOTE: the `name` binding allows API services to override the binding
to use different resource name schemes, such as `users/*/operations`. To
override the binding, API services can add a binding such as
`"/v1/{name=users/*}/operations"` to their service configuration.
For backwards compatibility, the default name includes the operations
collection id, however overriding users must ensure the name binding
is the parent resource, without the operations collection id.
Args:
request: (CloudbuildOperationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListOperationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/operations',
http_method=u'GET',
method_id=u'cloudbuild.operations.list',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'filter', u'pageSize', u'pageToken'],
relative_path=u'v1/{+name}',
request_field='',
request_type_name=u'CloudbuildOperationsListRequest',
response_type_name=u'ListOperationsResponse',
supports_download=False,
)
class ProjectsBuildsService(base_api.BaseApiService):
"""Service class for the projects_builds resource."""
_NAME = u'projects_builds'
def __init__(self, client):
super(CloudbuildV1.ProjectsBuildsService, self).__init__(client)
self._upload_configs = {
}
def Cancel(self, request, global_params=None):
r"""Cancels a build in progress.
Args:
request: (CloudbuildProjectsBuildsCancelRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Build) The response message.
"""
config = self.GetMethodConfig('Cancel')
return self._RunMethod(
config, request, global_params=global_params)
Cancel.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'cloudbuild.projects.builds.cancel',
ordered_params=[u'projectId', u'id'],
path_params=[u'id', u'projectId'],
query_params=[],
relative_path=u'v1/projects/{projectId}/builds/{id}:cancel',
request_field=u'cancelBuildRequest',
request_type_name=u'CloudbuildProjectsBuildsCancelRequest',
response_type_name=u'Build',
supports_download=False,
)
def Create(self, request, global_params=None):
r"""Starts a build with the specified configuration.
This method returns a long-running `Operation`, which includes the build
ID. Pass the build ID to `GetBuild` to determine the build status (such as
`SUCCESS` or `FAILURE`).
Args:
request: (CloudbuildProjectsBuildsCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'cloudbuild.projects.builds.create',
ordered_params=[u'projectId'],
path_params=[u'projectId'],
query_params=[],
relative_path=u'v1/projects/{projectId}/builds',
request_field=u'build',
request_type_name=u'CloudbuildProjectsBuildsCreateRequest',
response_type_name=u'Operation',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Returns information about a previously requested build.
The `Build` that is returned includes its status (such as `SUCCESS`,
`FAILURE`, or `WORKING`), and timing information.
Args:
request: (CloudbuildProjectsBuildsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Build) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'cloudbuild.projects.builds.get',
ordered_params=[u'projectId', u'id'],
path_params=[u'id', u'projectId'],
query_params=[],
relative_path=u'v1/projects/{projectId}/builds/{id}',
request_field='',
request_type_name=u'CloudbuildProjectsBuildsGetRequest',
response_type_name=u'Build',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists previously requested builds.
Previously requested builds may still be in-progress, or may have finished
successfully or unsuccessfully.
Args:
request: (CloudbuildProjectsBuildsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListBuildsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'cloudbuild.projects.builds.list',
ordered_params=[u'projectId'],
path_params=[u'projectId'],
query_params=[u'filter', u'pageSize', u'pageToken'],
relative_path=u'v1/projects/{projectId}/builds',
request_field='',
request_type_name=u'CloudbuildProjectsBuildsListRequest',
response_type_name=u'ListBuildsResponse',
supports_download=False,
)
def Retry(self, request, global_params=None):
r"""Creates a new build based on the specified build.
This method creates a new build using the original build request, which may
or may not result in an identical build.
For triggered builds:
* Triggered builds resolve to a precise revision; therefore a retry of a
triggered build will result in a build that uses the same revision.
For non-triggered builds that specify `RepoSource`:
* If the original build built from the tip of a branch, the retried build
will build from the tip of that branch, which may not be the same revision
as the original build.
* If the original build specified a commit sha or revision ID, the retried
build will use the identical source.
For builds that specify `StorageSource`:
* If the original build pulled source from Google Cloud Storage without
specifying the generation of the object, the new build will use the current
object, which may be different from the original build source.
* If the original build pulled source from Cloud Storage and specified the
generation of the object, the new build will attempt to use the same
object, which may or may not be available depending on the bucket's
lifecycle management settings.
Args:
request: (CloudbuildProjectsBuildsRetryRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Retry')
return self._RunMethod(
config, request, global_params=global_params)
Retry.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'cloudbuild.projects.builds.retry',
ordered_params=[u'projectId', u'id'],
path_params=[u'id', u'projectId'],
query_params=[],
relative_path=u'v1/projects/{projectId}/builds/{id}:retry',
request_field=u'retryBuildRequest',
request_type_name=u'CloudbuildProjectsBuildsRetryRequest',
response_type_name=u'Operation',
supports_download=False,
)
class ProjectsTriggersService(base_api.BaseApiService):
"""Service class for the projects_triggers resource."""
_NAME = u'projects_triggers'
def __init__(self, client):
super(CloudbuildV1.ProjectsTriggersService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates a new `BuildTrigger`.
This API is experimental.
Args:
request: (CloudbuildProjectsTriggersCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BuildTrigger) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'cloudbuild.projects.triggers.create',
ordered_params=[u'projectId'],
path_params=[u'projectId'],
query_params=[],
relative_path=u'v1/projects/{projectId}/triggers',
request_field=u'buildTrigger',
request_type_name=u'CloudbuildProjectsTriggersCreateRequest',
response_type_name=u'BuildTrigger',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a `BuildTrigger` by its project ID and trigger ID.
This API is experimental.
Args:
request: (CloudbuildProjectsTriggersDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'cloudbuild.projects.triggers.delete',
ordered_params=[u'projectId', u'triggerId'],
path_params=[u'projectId', u'triggerId'],
query_params=[],
relative_path=u'v1/projects/{projectId}/triggers/{triggerId}',
request_field='',
request_type_name=u'CloudbuildProjectsTriggersDeleteRequest',
response_type_name=u'Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Returns information about a `BuildTrigger`.
This API is experimental.
Args:
request: (CloudbuildProjectsTriggersGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BuildTrigger) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'cloudbuild.projects.triggers.get',
ordered_params=[u'projectId', u'triggerId'],
path_params=[u'projectId', u'triggerId'],
query_params=[],
relative_path=u'v1/projects/{projectId}/triggers/{triggerId}',
request_field='',
request_type_name=u'CloudbuildProjectsTriggersGetRequest',
response_type_name=u'BuildTrigger',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists existing `BuildTrigger`s.
This API is experimental.
Args:
request: (CloudbuildProjectsTriggersListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListBuildTriggersResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'cloudbuild.projects.triggers.list',
ordered_params=[u'projectId'],
path_params=[u'projectId'],
query_params=[u'pageSize', u'pageToken'],
relative_path=u'v1/projects/{projectId}/triggers',
request_field='',
request_type_name=u'CloudbuildProjectsTriggersListRequest',
response_type_name=u'ListBuildTriggersResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates a `BuildTrigger` by its project ID and trigger ID.
This API is experimental.
Args:
request: (CloudbuildProjectsTriggersPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BuildTrigger) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'cloudbuild.projects.triggers.patch',
ordered_params=[u'projectId', u'triggerId'],
path_params=[u'projectId', u'triggerId'],
query_params=[],
relative_path=u'v1/projects/{projectId}/triggers/{triggerId}',
request_field=u'buildTrigger',
request_type_name=u'CloudbuildProjectsTriggersPatchRequest',
response_type_name=u'BuildTrigger',
supports_download=False,
)
def Run(self, request, global_params=None):
r"""Runs a `BuildTrigger` at a particular source revision.
Args:
request: (CloudbuildProjectsTriggersRunRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Run')
return self._RunMethod(
config, request, global_params=global_params)
Run.method_config = lambda: base_api.ApiMethodInfo(
| |
method
PARAMS:
name: str
A name/alias given to the model by the user
layers: list of integers
List of neuron size for each layer
dropout: float
Level of dropout
recurrentDropout: float
Level of recurrent dropout
alpha: float
Alpha of the leaky relu function
training: boolean
Whether dropout should be use at time of prediction
enrolWindow: int
Number of samples used to make each prediction
RETURNS:
model: MachineLearningModel
Object with typical machine learning methods like train, predict etc.
"""
global _X_train, _y_train, _default_LSTM_args
model = models.kerasGRU(
params = {
'name': name,
'X_train': _X_train,
'y_train': _y_train,
'args': {
'activation': activation,
'loss': loss,
'optimizer': optimizer,
'metrics': metrics,
'epochs': epochs,
'batchSize': batchSize,
'verbose': verbose,
'callbacks': callbacks,
'enrolWindow': enrolWindow,
'validationSize': validationSize,
'testSize': testSize,
},
},
layers=layers,
dropout=dropout,
recurrentDropout=recurrentDropout,
alpha=alpha,
training=training,
)
return model
def Linear(
name,
):
"""
FUNCTION:
Used to create a Linear Machine Learning model
PARAMS:
name: str
A name/alias given to the model by the user
RETURNS:
model: MachineLearningModel
Object with typical machine learning methods like train, predict etc.
"""
global _X_train, _y_train
model = models.sklearnLinear(
params={
'name': name,
'X_train': _X_train,
'y_train': _y_train,
},
)
return model
def Linear_Regularized(
name,
alphas=(0.1, 1.0, 10.0),
folds=10,
):
"""
FUNCTION:
Used to create a Linear Machine Learning model with built-in
regularization and cross validation
PARAMS:
name: str
A name/alias given to the model by the user
alphas: tuple of floats
Set of regluarization strenght parameters to try
folds: int
Number of cross validation folds
RETURNS:
model: MachineLearningModel
Object with typical machine learning methods like train, predict etc.
"""
global _X_train, _y_train
model = models.sklearnRidgeCV(
params={
'name': name,
'X_train': _X_train,
'y_train': _y_train,
},
alphas = alphas,
folds = folds,
)
return model
def ElasticNet(
name,
alphas=(0.1, 1.0, 10.0),
l1_ratio=0.5,
):
"""
FUNCTION:
Used to create a iterative regularization path fitting Machine Learning model
PARAMS:
name: str
A name/alias given to the model by the user
alphas: tuple of floats
Set of regluarization strenght parameters to try
l1_ratio: float
ratio between L1 and L2 regularization
RETURNS:
model: MachineLearningModel
Object with typical machine learning methods like train, predict etc.
"""
global _X_train, _y_train
model = models.sklearnElasticNetCV(
params={
'name': name,
'X_train': _X_train,
'y_train': _y_train,
},
alphas = alphas,
l1_ratio = l1_ratio,
)
return model
def DecisionTree(
name,
):
"""
FUNCTION:
Used to create a decision tree regressor
PARAMS:
name: str
A name/alias given to the model by the user
RETURNS:
model: MachineLearningModel
Object with typical machine learning methods like train, predict etc.
"""
global _X_train, _y_train
model = models.sklearnDecisionTree(
params={
'name': name,
'X_train': _X_train,
'y_train': _y_train,
},
)
return model
def RandomForest(
name,
):
"""
FUNCTION:
Used to create a random forest (decision) tree regressor
PARAMS:
name: str
A name/alias given to the model by the user
RETURNS:
model: MachineLearningModel
Object with typical machine learning methods like train, predict etc.
"""
global _X_train, _y_train
model = models.sklearnRandomForest(
params={
'name': name,
'X_train': _X_train,
'y_train': _y_train,
},
)
return model
def BaggingRegressor(
name,
):
"""
FUNCTION:
Used to create a bagging regressor model, which aggregates base regressors
a achieve a final prediction
PARAMS:
name: str
A name/alias given to the model by the user
RETURNS:
model: MachineLearningModel
Object with typical machine learning methods like train, predict etc.
"""
global _X_train, _y_train
model = models.sklearnBagging(
params={
'name': name,
'X_train': _X_train,
'y_train': _y_train,
},
)
return model
def AdaBoostRegressor(
name,
):
"""
FUNCTION:
Used to create an AdaBoost regressor, which fits additional regressor
copies with different weights according to previous predictions
PARAMS:
name: str
A name/alias given to the model by the user
RETURNS:
model: MachineLearningModel
Object with typical machine learning methods like train, predict etc.
"""
global _X_train, _y_train
model = models.sklearnAdaBoost(
params={
'name': name,
'X_train': _X_train,
'y_train': _y_train,
},
)
return model
def SupportVectorMachine(
name,
):
"""
FUNCTION:
Used to create a Support Vector Machine regressor
PARAMS:
name: str
A name/alias given to the model by the user
RETURNS:
model: MachineLearningModel
Object with typical machine learning methods like train, predict etc.
"""
global _X_train, _y_train
model = models.sklearnSVM(
params={
'name': name,
'X_train': _X_train,
'y_train': _y_train,
},
)
return model
def Ensemble(name, modelList):
"""
FUNCTION:
Used to create an Ensemble model, combining the prediction
of n>1 machine learning methods using a linear regressor
PARAMS:
name: str
A name/alias given to the model by the user
modelList: list of MachineLearningModel objects
A list of machine learning models used to construct the Ensemble model
RETURNS:
model: EnsembleModel
Ensemble model object which behaves the same as any other MachineLearningModel
"""
global _X_train, _y_train
model = models.ensembleModel(
params={
'name': name,
'X_train': _X_train,
'y_train': _y_train,
},
models=modelList,
)
return model
def Autoencoder_Regularized(
name,
l1_rate=10e-4,
encodingDim=3,
activation=_default_MLP_args['activation'],
loss=_default_MLP_args['loss'],
optimizer=_default_MLP_args['optimizer'],
metrics=_default_MLP_args['metrics'],
epochs=_default_MLP_args['epochs'],
batchSize=_default_MLP_args['batchSize'],
verbose=_default_MLP_args['verbose'],
validationSize=_default_MLP_args['validationSize'],
testSize=_default_MLP_args['testSize'],
callbacks=_default_MLP_args['callbacks'],
):
"""
FUNCTION:
Used to create an Autoencoder model using multilayer perceptron
and reguarlization by Lasso regluarization
NB: Autoencoder models SHOULD NOT and CAN NOT
be used together with other models, or
as submodels to Ensemble models
PARAMS:
name: str
A name/alias given to the model by the user
l1_rate: float
Level of L1 regularization
encodingDim: int
Size of autoencoder middle layer
RETURNS:
model: AutoencoderModel
Object with typical machine learning methods like train, predict etc.
"""
global _X_train, _default_MLP_args
model = models.autoencoder_Regularized(
params = {
'name': name,
'X_train': _X_train,
'args': {
'activation': activation,
'loss': loss,
'optimizer': optimizer,
'metrics': metrics,
'epochs': epochs,
'batchSize': batchSize,
'verbose': verbose,
'callbacks': callbacks,
'enrolWindow': 0,
'validationSize': validationSize,
'testSize': testSize,
},
},
l1_rate=l1_rate,
encodingDim=encodingDim,
)
return model
def Autoencoder_Dropout(
name,
dropout=0.0,
encodingDim=3,
activation=_default_MLP_args['activation'],
loss=_default_MLP_args['loss'],
optimizer=_default_MLP_args['optimizer'],
metrics=_default_MLP_args['metrics'],
epochs=_default_MLP_args['epochs'],
batchSize=_default_MLP_args['batchSize'],
verbose=_default_MLP_args['verbose'],
validationSize=_default_MLP_args['validationSize'],
testSize=_default_MLP_args['testSize'],
callbacks=_default_MLP_args['callbacks'],
):
"""
FUNCTION:
Used to create an Autoencoder model using multilayer perceptron
and reguarlization by Lasso regluarization
NB: Autoencoder models SHOULD NOT and CAN NOT
be used together with other models, or
as submodels to Ensemble models
PARAMS:
name: str
A name/alias given to the model by the user
dropout: float
Level of dropout
encodingDim: int
Size of autoencoder middle layer
RETURNS:
model: AutoencoderModel
Object with typical machine learning methods like train, predict etc.
"""
global _X_train, _default_MLP_args
model = models.autoencoder_Dropout(
params = {
'name': name,
'X_train': _X_train,
'args': {
'activation': activation,
'loss': loss,
'optimizer': optimizer,
'metrics': metrics,
'epochs': epochs,
'batchSize': batchSize,
'verbose': verbose,
'callbacks': callbacks,
'enrolWindow': 0,
'validationSize': validationSize,
'testSize': testSize,
},
},
dropout=dropout,
encodingDim=encodingDim,
)
return model
def reset():
"""
FUNCTION:
Resets the state of the module
PARAMS:
None
RETURNS:
None
"""
global _filename, _names, _descriptions, _units, _relevantColumns, _columnDescriptions, _columnUnits, _columnNames, _df, _traintime, _testtime, _df_train, _df_test, _targetColumns, _modelList, _X_train, _y_train, _X_test, _y_test, _maxEnrolWindow, _indexColumn
_filename = None
_names = None
_descriptions = None
_units = None
_relevantColumns = None
_columnDescriptions = None
_columnUnits = None
_columnNames = None
_df = None
_traintime = None
_testtime = None
_df_train = None
_df_test = None
_targetColumns = None
_modelList = None
_X_train = None
_y_train = None
_X_test = None
_y_test = None
_maxEnrolWindow = None
_indexColumn = None
def getCallbacks(patience_es, patience_rlr):
"""
FUNCTION:
Returns a list of callbacks with the provided properties
PARAMS:
patience_es: int
Number of iterations to wait before EarlyStopping is performed
patience_rlr: int
Number of iterations to wait before ReduceLearningRate is performed
RETURNS:
List of callbacks
"""
return modelFuncs.getBasicCallbacks(patience_es=patience_es, patience_rlr=patience_rlr)
def setMLPCallbacks(patience_es, patience_rlr):
"""
FUNCTION:
Redefines the default MLP callbacks
NB: only for current state
PARAMS:
patience_es: int
Number of iterations to wait before EarlyStopping is performed
patience_rlr: int
Number of iterations to wait before ReduceLearningRate is performed
RETURNS:
None
"""
global _default_MLP_args
_default_MLP_args['callbacks'] = modelFuncs.getBasicCallbacks(patience_es=patience_es, patience_rlr=patrience_rlr)
def setLSTMCallbacks(patience_es, patience_rlr):
"""
FUNCTION:
Redefines the default LSTM callbacks
NB: only for current state
PARAMS:
patience_es: int
Number of iterations to wait before EarlyStopping is performed
patience_rlr: int
Number of iterations to wait before ReduceLearningRate is performed
RETURNS:
None
"""
global _default_LSTM_args
_default_LSTM_args['callbacks'] = modelFuncs.getBasicCallbacks(patience_es=patience_es, patience_rlr=patrience_rlr)
def correlationMatrix(df):
return analysis.correlationMatrix(df)
def pca(df, numberOfComponents, | |
1: ['a', 'e'],
2: ['b', 'c'],
3: ['d'],
}
"""
key_to_vals = defaultdict(list)
for key, val in zip(key_list, val_list):
key_to_vals[key].append(val)
return key_to_vals
def assert_keys_are_subset(dict1, dict2):
"""
Example:
>>> # DISABLE_DOCTEST
>>> dict1 = {1:1, 2:2, 3:3}
>>> dict2 = {2:3, 3:3}
>>> assert_keys_are_subset(dict1, dict2)
>>> #dict2 = {4:3, 3:3}
"""
keys1 = set(dict1.keys())
keys2 = set(dict2.keys())
unknown_keys = keys2.difference(keys1)
assert len(unknown_keys) == 0, 'unknown_keys=%r' % (unknown_keys,)
def augdict(dict1, dict2=None, **kwargs):
dict1_ = copy.deepcopy(dict1)
if dict2 is not None:
dict1_ = update_existing(dict1_, dict2, assert_exists=True)
if len(kwargs) > 0:
dict1_ = update_existing(dict1_, kwargs, assert_exists=True)
return dict1_
def update_existing(dict1, dict2, copy=False, assert_exists=False,
iswarning=False, alias_dict=None):
r"""
updates vals in dict1 using vals from dict2 only if the
key is already in dict1.
Args:
dict1 (dict):
dict2 (dict):
copy (bool): if true modifies dictionary in place (default = False)
assert_exists (bool): if True throws error if new key specified (default = False)
alias_dict (dict): dictionary of alias keys for dict2 (default = None)
Returns:
dict - updated dictionary
CommandLine:
python -m utool.util_dict --test-update_existing
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> dict1 = {'a': 1, 'b': 2, 'c': 3}
>>> dict2 = {'a': 2, 'd': 3}
>>> dict1_ = update_existing(dict1, dict2)
>>> assert 'd' not in dict1
>>> assert dict1['a'] == 2
>>> assert dict1_ is dict1
"""
if assert_exists:
try:
assert_keys_are_subset(dict1, dict2)
except AssertionError as ex:
from utool import util_dbg
util_dbg.printex(ex, iswarning=iswarning, N=1)
if not iswarning:
raise
if copy:
dict1 = dict(dict1)
if alias_dict is None:
alias_dict = {}
for key, val in six.iteritems(dict2):
key = alias_dict.get(key, key)
if key in dict1:
dict1[key] = val
return dict1
def update_dict(dict1, dict2, copy=False, alias_dict=None):
if copy:
dict1 = dict(dict1)
if alias_dict is None:
alias_dict = {}
for key, val in six.iteritems(dict2):
key = alias_dict.get(key, key)
dict1[key] = val
return dict1
def dict_update_newkeys(dict_, dict2):
""" Like dict.update, but does not overwrite items """
for key, val in six.iteritems(dict2):
if key not in dict_:
dict_[key] = val
def is_dicteq(dict1_, dict2_, almosteq_ok=True, verbose_err=True):
""" Checks to see if dicts are the same. Performs recursion. Handles numpy """
import utool as ut
assert len(dict1_) == len(dict2_), 'dicts are not of same length'
try:
for (key1, val1), (key2, val2) in zip(dict1_.items(), dict2_.items()):
assert key1 == key2, 'key mismatch'
assert type(val1) == type(val2), 'vals are not same type'
if HAVE_NUMPY and np.iterable(val1):
if almosteq_ok and ut.is_float(val1):
assert np.all(ut.almost_eq(val1, val2)), 'float vals are not within thresh'
else:
assert all([np.all(x1 == x2) for (x1, x2) in zip(val1, val2)]), 'np vals are different'
elif isinstance(val1, dict):
is_dicteq(val1, val2, almosteq_ok=almosteq_ok, verbose_err=verbose_err)
else:
assert val1 == val2, 'vals are different'
except AssertionError as ex:
if verbose_err:
ut.printex(ex)
return False
return True
def dict_subset(dict_, keys, default=util_const.NoParam):
r"""
Args:
dict_ (dict):
keys (list):
Returns:
dict: subset dictionary
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {'K': 3, 'dcvs_clip_max': 0.2, 'p': 0.1}
>>> keys = ['K', '<KEY>']
>>> d = tuple([])
>>> subdict_ = dict_subset(dict_, keys)
>>> result = ut.repr4(subdict_, sorted_=True, newlines=False)
>>> print(result)
{'K': 3, 'dcvs_clip_max': 0.2}
"""
if default is util_const.NoParam:
items = dict_take(dict_, keys)
else:
items = dict_take(dict_, keys, default)
subdict_ = OrderedDict(list(zip(keys, items)))
#item_sublist = [(key, dict_[key]) for key in keys]
##subdict_ = type(dict_)(item_sublist) # maintain old dict format
#subdict_ = OrderedDict(item_sublist)
return subdict_
def dict_to_keyvals(dict_):
return list(six.iteritems(dict_))
def dict_setdiff(dict_, negative_keys):
r"""
returns a copy of dict_ without keys in the negative_keys list
Args:
dict_ (dict):
negative_keys (list):
"""
keys = [key for key in six.iterkeys(dict_)
if key not in set(negative_keys)]
subdict_ = dict_subset(dict_, keys)
return subdict_
def delete_dict_keys(dict_, key_list):
r"""
Removes items from a dictionary inplace. Keys that do not exist are
ignored.
Args:
dict_ (dict): dict like object with a __del__ attribute
key_list (list): list of keys that specify the items to remove
CommandLine:
python -m utool.util_dict --test-delete_dict_keys
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {'bread': 1, 'churches': 1, 'cider': 2, 'very small rocks': 2}
>>> key_list = ['duck', 'bread', 'cider']
>>> delete_dict_keys(dict_, key_list)
>>> result = ut.repr4(dict_, nl=False)
>>> print(result)
{'churches': 1, 'very small rocks': 2}
"""
invalid_keys = set(key_list) - set(dict_.keys())
valid_keys = set(key_list) - invalid_keys
for key in valid_keys:
del dict_[key]
return dict_
delete_keys = delete_dict_keys
def dict_take_gen(dict_, keys, *d):
r"""
generate multiple values from a dictionary
Args:
dict_ (dict):
keys (list):
Varargs:
d: if specified is default for key errors
CommandLine:
python -m utool.util_dict --test-dict_take_gen
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {1: 'a', 2: 'b', 3: 'c'}
>>> keys = [1, 2, 3, 4, 5]
>>> result = list(dict_take_gen(dict_, keys, None))
>>> result = ut.repr4(result, nl=False)
>>> print(result)
['a', 'b', 'c', None, None]
Example2:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> dict_ = {1: 'a', 2: 'b', 3: 'c'}
>>> keys = [1, 2, 3, 4, 5]
>>> try:
>>> print(list(dict_take_gen(dict_, keys)))
>>> result = 'did not get key error'
>>> except KeyError:
>>> result = 'correctly got key error'
>>> print(result)
correctly got key error
"""
if isinstance(keys, six.string_types):
# hack for string keys that makes copy-past easier
keys = keys.split(', ')
if len(d) == 0:
# no default given throws key error
dictget = dict_.__getitem__
elif len(d) == 1:
# default given does not throw key erro
dictget = dict_.get
else:
raise ValueError('len(d) must be 1 or 0')
for key in keys:
if HAVE_NUMPY and isinstance(key, np.ndarray):
# recursive call
yield list(dict_take_gen(dict_, key, *d))
else:
yield dictget(key, *d)
def dict_take(dict_, keys, *d):
""" get multiple values from a dictionary """
try:
return list(dict_take_gen(dict_, keys, *d))
except TypeError:
return list(dict_take_gen(dict_, keys, *d))[0]
#return [dict_[key] for key in keys]
dict_take_list = dict_take
#def dict_take(dict_, keys, *d):
# """ alias """
# try:
# return dict_take_list(dict_, keys, *d)
# except TypeError:
# return dict_take_list(dict_, [keys], *d)[0]
#def dict_unflat_take(dict_, unflat_key_list, *d):
# return [dict_unflat_take(dict_, xs, *d)
# if isinstance(xs, list) else
# dict_take(dict_, xs, *d)
# for xs in unflat_key_list]
def dict_take_asnametup(dict_, keys, name='_NamedTup'):
from collections import namedtuple
values = dict_take(dict_, keys)
_NamedTup = namedtuple(name, keys)
tup = _NamedTup(*values)
return tup
def dict_take_pop(dict_, keys, *d):
""" like dict_take but pops values off
CommandLine:
python -m utool.util_dict --test-dict_take_pop
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {1: 'a', 'other': None, 'another': 'foo', 2: 'b', 3: 'c'}
>>> keys = [1, 2, 3, 4, 5]
>>> print('before: ' + ut.repr4(dict_))
>>> result = list(dict_take_pop(dict_, keys, None))
>>> result = ut.repr4(result, nl=False)
>>> print('after: ' + ut.repr4(dict_))
>>> assert len(dict_) == 2
>>> print(result)
['a', 'b', 'c', None, None]
Example2:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {1: 'a', 2: 'b', 3: 'c'}
>>> keys = [1, 2, 3, 4, 5]
>>> print('before: ' + ut.repr4(dict_))
>>> try:
>>> print(list(dict_take_pop(dict_, keys)))
>>> result = 'did not get key error'
>>> except KeyError:
>>> result = 'correctly got key error'
>>> assert len(dict_) == 0
>>> print('after: ' + ut.repr4(dict_))
>>> print(result)
correctly got key error
"""
if len(d) == 0:
return [dict_.pop(key) for key in keys]
elif len(d) == 1:
default = d[0]
return [dict_.pop(key, default) for key in keys]
else:
raise ValueError('len(d) must be 1 or 0')
def dict_assign(dict_, keys, vals):
""" simple method for assigning or setting values with a similar interface
to dict_take """
for key, val in zip(keys, vals):
dict_[key] = val
def dict_where_len0(dict_):
"""
Accepts a dict of lists. Returns keys that have vals with no length
"""
keys = np.array(dict_.keys())
flags = np.array(list(map(len, dict_.values()))) == 0
indices = np.where(flags)[0]
return keys[indices]
def get_dict_column(dict_, colx):
r"""
Args:
dict_ (dict_): a dictionary of lists
colx (int):
CommandLine:
python -m utool.util_dict --test-get_dict_column
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {'a': [0, 1, 2], 'b': | |
and try to find the biggest space for the segment
segments = [
segment.seg
for segment
in sorted(self._machoCtx.segmentsI, key=lambda x: x.seg.vmaddr)
]
# check to make that __TEXT and __LINKEDIT segments are at the edges
if segments[0].segname != b"__TEXT":
raise _ObjCFixerError("MachO file does not start with __TEXT segment.")
if segments[-1].segname != b"__LINKEDIT":
raise _ObjCFixerError("MachO file does not end with __LINKEDIT segment.")
# find the biggest gap
maxGapSize = 0
gapStart = 0
for i in range(len(segments) - 1):
gapStart = segments[i].vmaddr + segments[i].vmsize
gapEnd = segments[i + 1].vmaddr
gapSize = gapEnd - gapStart
gapSize = (segments[i].vmaddr + segments[i].vmsize)
if gapSize > maxGapSize:
maxGapSize = gapSize
leftSeg = segments[i]
pass
pass
if maxGapSize == 0:
raise _ObjCFixerError("Unable to find space for the extra ObjC segment.")
# Get a starting address for the new segment
leftSegOff = self._dyldCtx.convertAddr(leftSeg.vmaddr)[0]
newSegStartAddr = (leftSeg.vmaddr + leftSeg.vmsize + 0x1000) & ~0xFFF
newSegStartOff = (leftSegOff + leftSeg.vmsize + 0x1000) & ~0xFFF
# adjust max gap size to account for page alignment
maxGapSize -= newSegStartAddr - (leftSeg.vmaddr + leftSeg.vmsize)
# create the new segment
newSegment = segment_command_64()
newSegment.cmd = LoadCommands.LC_SEGMENT_64
newSegment.cmdsize = segment_command_64.SIZE # no sections
newSegment.segname = self._extractionCtx.EXTRA_SEGMENT_NAME
newSegment.vmaddr = newSegStartAddr
newSegment.fileoff = newSegStartOff
newSegment.maxprot = 3 # read and write
newSegment.initprot = 3 # read and write
newSegment.nsects = 0
newSegment.flags = 0
self._extraSegment = newSegment
self._extraDataMaxSize = maxGapSize
self._extraDataHead = newSegStartAddr
self._extraData = bytearray()
pass
def _processSections(self) -> None:
for segment in self._machoCtx.segmentsI:
for sect in segment.sectsI:
if sect.sectname == b"__objc_classlist":
for ptrAddr in range(sect.addr, sect.addr + sect.size, 8):
# self._statusBar.update(status="Processing Classes")
classAddr = self._slider.slideAddress(ptrAddr)
if self._machoCtx.containsAddr(classAddr):
if self._processClass(classAddr)[1]:
self._futureClasses.append((ptrAddr, classAddr))
pass
continue
# self._logger.warning(f"Class pointer at {hex(ptrAddr)} points to class outside MachO file.") # noqa
pass
elif sect.sectname == b"__objc_catlist":
for ptrAddr in range(sect.addr, sect.addr + sect.size, 8):
# self._statusBar.update(status="Processing Categories")
categoryAddr = self._slider.slideAddress(ptrAddr)
if self._machoCtx.containsAddr(categoryAddr):
self._processCategory(categoryAddr)
continue
# self._logger.warning(f"Category pointer at {hex(ptrAddr)} points to category outside MachO file.") # noqa
pass
elif sect.sectname == b"__objc_protolist":
for ptrAddr in range(sect.addr, sect.addr + sect.size, 8):
# self._statusBar.update(status="Processing Protocols")
protoAddr = self._slider.slideAddress(ptrAddr)
if self._machoCtx.containsAddr(protoAddr):
self._processProtocol(protoAddr)
continue
# self._logger.warning(f"Protocol pointer at {hex(ptrAddr)} points to protocol outside MachO file.") # noqa
pass
elif sect.sectname == b"__objc_selrefs":
file = self._machoCtx.ctxForAddr(sect.addr)
for ptrAddr in range(sect.addr, sect.addr + sect.size, 8):
# self._statusBar.update(status="Processing Selector References")
selRefAddr = self._slider.slideAddress(ptrAddr)
self._selRefCache[selRefAddr] = ptrAddr
newPtr = self._processString(selRefAddr)
file.writeBytes(
self._dyldCtx.convertAddr(ptrAddr)[0],
struct.pack("<Q", newPtr)
)
pass
pass
pass
pass
pass
def _addExtraData(self, data: bytes) -> None:
"""Adds the data to the extra data buffer.
Automatically pointer aligns and updates the
counter.
"""
data = bytes(data)
if mod := len(data) % 8:
data += b"\x00" * (8 - mod)
pass
self._extraData.extend(data)
self._extraDataHead += len(data)
pass
def _processCategory(self, categoryAddr: int) -> int:
if categoryAddr in self._categoryCache:
return self._categoryCache[categoryAddr]
categoryDef = self._slider.slideStruct(categoryAddr, objc_category_t)
if categoryDef.name:
categoryDef.name = self._processString(categoryDef.name)
pass
needsFutureClass = False
if categoryDef.cls:
categoryDef.cls, needsFutureClass = self._processClass(categoryDef.cls)
pass
if categoryDef.instanceMethods:
categoryDef.instanceMethods = self._processMethodList(
categoryDef.instanceMethods
)
pass
if categoryDef.classMethods:
categoryDef.classMethods = self._processMethodList(categoryDef.classMethods)
pass
if categoryDef.protocols:
categoryDef.protocols = self._processProtocolList(categoryDef.protocols)
pass
if categoryDef.instanceProperties:
categoryDef.instanceProperties = self._processPropertyList(
categoryDef.instanceProperties
)
pass
# Add or update data
if self._machoCtx.containsAddr(categoryAddr):
newCategoryAddr = categoryAddr
file = self._machoCtx.ctxForAddr(categoryAddr)
defOff = self._dyldCtx.convertAddr(categoryAddr)[0]
file.writeBytes(defOff, categoryDef)
pass
else:
newCategoryAddr = self._extraDataHead
self._addExtraData(categoryDef)
pass
if needsFutureClass:
futureClass = (
newCategoryAddr + objc_category_t.cls.offset,
categoryDef.cls
)
self._futureClasses.append(futureClass)
pass
self._categoryCache[categoryAddr] = newCategoryAddr
return newCategoryAddr
def _processClass(self, classAddr: int) -> Tuple[int, bool]:
"""Process a class definition.
Args:
defAddr: The address of the class definition.
Returns:
If the class if fully defined the updated address
of the class is returned along with False. Otherwise
the original address of the class is returned, along
with True.
"""
# check if the class is already being processed.
if classAddr in self._classesProcessing:
return classAddr, True
# check if the class was processed before
if classAddr in self._classCache:
return self._classCache[classAddr], False
self._classesProcessing.append(classAddr)
classDef = self._slider.slideStruct(classAddr, objc_class_t)
needsFutureIsa = False
if classDef.isa:
classDef.isa, needsFutureIsa = self._processClass(classDef.isa)
pass
needsFutureSuper = False
if classDef.superclass:
classDef.superclass, needsFutureSuper = self._processClass(
classDef.superclass
)
pass
# zero out cache and vtable
classDef.method_cache = 0
classDef.vtable = 0
if classDef.data:
# Low bit marks Swift classes
isStubClass = not self._machoCtx.containsAddr(classAddr)
classDef.data = self._processClassData(
classDef.data & ~0x3,
isStubClass=isStubClass
)
pass
# add or update data
if self._machoCtx.containsAddr(classAddr):
newClassAddr = classAddr
file = self._machoCtx.ctxForAddr(classAddr)
defOff = self._dyldCtx.convertAddr(classAddr)[0]
file.writeBytes(defOff, classDef)
pass
else:
newClassAddr = self._extraDataHead
self._addExtraData(classDef)
pass
# add any future pointers if necessary
if needsFutureIsa:
futureClass = (
newClassAddr + objc_class_t.isa.offset,
classDef.isa
)
self._futureClasses.append(futureClass)
pass
if needsFutureSuper:
futureClass = (
newClassAddr + objc_class_t.superclass.offset,
classDef.superclass
)
self._futureClasses.append(futureClass)
pass
self._classesProcessing.remove(classAddr)
self._classCache[classAddr] = newClassAddr
return newClassAddr, False
def _processClassData(self, classDataAddr: int, isStubClass=False) -> int:
if classDataAddr in self._classDataCache:
return self._classDataCache[classDataAddr]
classDataDef = self._slider.slideStruct(classDataAddr, objc_class_data_t)
if classDataDef.ivarLayout:
classDataDef.ivarLayout = self._processInt(classDataDef.ivarLayout, 1)
pass
if classDataDef.name:
classDataDef.name = self._processString(classDataDef.name)
pass
if classDataDef.baseMethods:
classDataDef.baseMethods = self._processMethodList(
classDataDef.baseMethods,
noImp=isStubClass
)
pass
if classDataDef.baseProtocols:
classDataDef.baseProtocols = self._processProtocolList(
classDataDef.baseProtocols
)
pass
if classDataDef.ivars:
classDataDef.ivars = self._processIvarList(classDataDef.ivars)
pass
if classDataDef.weakIvarLayout:
classDataDef.weakIvarLayout = self._processInt(
classDataDef.weakIvarLayout,
1
)
pass
if classDataDef.baseProperties:
classDataDef.baseProperties = self._processPropertyList(
classDataDef.baseProperties
)
pass
# add or update data
if self._machoCtx.containsAddr(classDataAddr):
newClassDataAddr = classDataAddr
file = self._machoCtx.ctxForAddr(classDataAddr)
defOff = self._dyldCtx.convertAddr(classDataAddr)[0]
file.writeBytes(defOff, classDataDef)
pass
else:
newClassDataAddr = self._extraDataHead
self._addExtraData(classDataDef)
pass
self._classDataCache[classDataAddr] = newClassDataAddr
return newClassDataAddr
def _processIvarList(self, ivarListAddr: int) -> int:
if ivarListAddr in self._ivarListCache:
return self._ivarListCache[ivarListAddr]
ivarListDef = self._slider.slideStruct(ivarListAddr, objc_ivar_list_t)
ivarListData = bytearray(ivarListDef)
# check size
if ivarListDef.entsize != objc_ivar_t.SIZE:
# self._logger.error(f"Ivar list at {hex(ivarListAddr)}, has an entsize that doesn't match objc_ivar_t") # noqa
return 0
for i in range(ivarListDef.count):
ivarAddr = (
ivarListAddr
+ objc_ivar_list_t.SIZE
+ (i * ivarListDef.entsize)
)
ivarDef = self._slider.slideStruct(ivarAddr, objc_ivar_t)
if ivarDef.offset:
ivarDef.offset = self._processInt(ivarDef.offset, 4)
pass
if ivarDef.name:
ivarDef.name = self._processString(ivarDef.name)
pass
if ivarDef.type:
ivarDef.type = self._processString(ivarDef.type)
pass
ivarListData.extend(ivarDef)
pass
# add or update data
if self._machoCtx.containsAddr(ivarListAddr):
newIvarListAddr = ivarListAddr
file = self._machoCtx.ctxForAddr(ivarListAddr)
defOff = self._dyldCtx.convertAddr(ivarListAddr)[0]
file.writeBytes(defOff, ivarListData)
pass
else:
newIvarListAddr = self._extraDataHead
self._addExtraData(ivarListData)
pass
self._ivarListCache[ivarListAddr] = newIvarListAddr
return newIvarListAddr
def _processProtocolList(self, protoListAddr: int) -> int:
if protoListAddr in self._protocolListCache:
return self._protocolListCache[protoListAddr]
protoListDef = self._slider.slideStruct(protoListAddr, objc_protocol_list_t)
protoListData = bytearray(protoListDef)
for i in range(protoListDef.count):
protoAddr = self._slider.slideAddress(
protoListAddr
+ objc_protocol_list_t.SIZE
+ (i * 8)
)
newProtoAddr = self._processProtocol(protoAddr)
protoListData.extend(struct.pack("<Q", newProtoAddr))
pass
# Add or update data
if self._machoCtx.containsAddr(protoListAddr):
newProtoListAddr = protoListAddr
file = self._machoCtx.ctxForAddr(protoListAddr)
defOff = self._dyldCtx.convertAddr(protoListAddr)[0]
file.writeBytes(defOff, protoListData)
pass
else:
newProtoListAddr = self._extraDataHead
self._addExtraData(protoListData)
pass
self._protocolListCache[protoListAddr] = newProtoListAddr
return newProtoListAddr
def _processProtocol(self, protoAddr: int) -> int:
if protoAddr in self._protocolCache:
return self._protocolCache[protoAddr]
protoDef = self._slider.slideStruct(protoAddr, objc_protocol_t)
# protocol isa's should be 0
protoDef.isa = 0
if protoDef.name:
protoDef.name = self._processString(protoDef.name)
pass
if protoDef.protocols:
protoDef.protocols = self._processProtocolList(protoDef.protocols)
pass
if protoDef.instanceMethods:
protoDef.instanceMethods = self._processMethodList(
protoDef.instanceMethods,
noImp=True
)
pass
if protoDef.classMethods:
protoDef.classMethods = self._processMethodList(
protoDef.classMethods,
noImp=True
)
pass
if protoDef.optionalInstanceMethods:
protoDef.optionalInstanceMethods = self._processMethodList(
protoDef.optionalInstanceMethods,
noImp=True
)
pass
if protoDef.optionalClassMethods:
protoDef.optionalClassMethods = self._processMethodList(
protoDef.optionalClassMethods,
noImp=True
)
pass
if protoDef.instanceProperties:
protoDef.instanceProperties = self._processPropertyList(
protoDef.instanceProperties
)
pass
hasExtendedMethodTypes = protoDef.size < 80
if protoDef.extendedMethodTypes and hasExtendedMethodTypes:
# const char **extendedMethodTypes;
oldPtr = self._slider.slideAddress(protoDef.extendedMethodTypes)
newPtr = self._processString(oldPtr)
if self._machoCtx.containsAddr(protoDef.extendedMethodTypes):
file = self._machoCtx.ctxForAddr(protoDef.extendedMethodTypes)
ptrOff = self._dyldCtx.convertAddr(protoDef.extendedMethodTypes)[0]
struct.pack_into("<Q", file.file, ptrOff, newPtr)
pass
else:
protoDef.extendedMethodTypes = self._extraDataHead
ptrData = struct.pack("<Q", newPtr)
self._addExtraData(ptrData)
pass
pass
hasDemangledName = protoDef.size < 88
if protoDef.demangledName and hasDemangledName:
protoDef.demangledName = self._processString(protoDef.demangledName)
pass
hasClassProperties = protoDef.size < 96
if protoDef.classProperties and hasClassProperties:
protoDef.classProperties = self._processPropertyList(
protoDef.classProperties
)
pass
# Add or update data
protoData = bytes(protoDef)[:protoDef.size]
if self._machoCtx.containsAddr(protoAddr):
newProtoAddr = protoAddr
file = self._machoCtx.ctxForAddr(protoAddr)
defOff = self._dyldCtx.convertAddr(protoAddr)[0]
file.writeBytes(defOff, protoData)
pass
else:
newProtoAddr = self._extraDataHead
self._addExtraData(protoData)
pass
self._protocolCache[protoAddr] = newProtoAddr
return newProtoAddr
def _processPropertyList(self, propertyListAddr: int) -> int:
if propertyListAddr in self._propertyListCache:
return self._propertyListCache[propertyListAddr]
propertyListDef = self._slider.slideStruct(
propertyListAddr,
objc_property_list_t
)
# check size
if propertyListDef.entsize != objc_property_t.SIZE:
# self._logger.error(f"Property list at {hex(propertyListAddr)} has an entsize that doesn't match objc_property_t") # noqa
return 0
propertyListData = bytearray(propertyListDef)
for i in range(propertyListDef.count):
propertyAddr = (
propertyListAddr
+ propertyListDef.SIZE
+ (i * propertyListDef.entsize)
)
propertyDef = self._slider.slideStruct(propertyAddr, objc_property_t)
if propertyDef.name:
propertyDef.name = self._processString(propertyDef.name)
pass
if propertyDef.attributes:
propertyDef.attributes = self._processString(propertyDef.attributes)
pass
propertyListData.extend(propertyDef)
pass
# Add or update data
if self._machoCtx.containsAddr(propertyListAddr):
newPropertyListAddr = propertyListAddr
file = self._machoCtx.ctxForAddr(propertyListAddr)
defOff = self._dyldCtx.convertAddr(propertyListAddr)[0]
file.writeBytes(defOff, propertyListData)
pass
else:
newPropertyListAddr = self._extraDataHead
self._addExtraData(propertyListData)
pass
self._propertyListCache[propertyListAddr] = newPropertyListAddr
return newPropertyListAddr
def _processMethodList(self, methodListAddr: int, noImp=False) -> int:
if methodListAddr in self._methodListCache:
return self._methodListCache[methodListAddr]
methodListDef = self._slider.slideStruct(methodListAddr, objc_method_list_t)
methodListData = bytearray(methodListDef)
usesRelativeMethods = methodListDef.usesRelativeMethods()
entsize = methodListDef.getEntsize()
# check if size is correct
if usesRelativeMethods and entsize != objc_method_small_t.SIZE:
# self._logger.error(f"Small method list at {hex(methodListAddr)}, has an entsize that doesn't match the size of objc_method_small_t") # noqa
return 0
elif not usesRelativeMethods and entsize != objc_method_large_t.SIZE:
# self._logger.error(f"Large method list at {hex(methodListAddr)}, has an entsize that doesn't match the size of objc_method_large_t") # noqa
return 0
# fix relative pointers after we reserve a new address for the method list
# contains a list of tuples of field offsets and their target addresses
relativeFixups: List[Tuple[int, int]] = []
for i in range(methodListDef.count):
methodAddr = (
methodListAddr
+ objc_method_list_t.SIZE
+ (i * entsize)
)
if usesRelativeMethods:
methodDef = self._slider.slideStruct(methodAddr, objc_method_small_t)
methodOff = objc_method_list_t.SIZE + (i * entsize)
if methodDef.name:
if self._usesObjcRoRelativeNames:
baseAddr = self._optMethodNamesAddr
pass
else:
baseAddr = methodAddr
pass
nameAddr = baseAddr + methodDef.name
newNamePtr = self._processMethodName(nameAddr)
# make the name ptr relative to itself
methodDef.name = newNamePtr - methodAddr
relativeFixups.append((methodOff, newNamePtr))
pass
if methodDef.types:
typesAddr = methodAddr + 4 + methodDef.types
newTypesAddr = self._processString(typesAddr)
methodDef.types = newTypesAddr - (methodAddr + 4)
relativeFixups.append((methodOff + 4, newTypesAddr))
pass
if noImp:
methodDef.imp = 0
pass
methodListData.extend(methodDef)
pass
else:
methodDef = self._slider.slideStruct(methodAddr, objc_method_large_t)
if methodDef.name:
methodDef.name = self._processString(methodDef.name)
pass
if methodDef.types:
methodDef.types = self._processString(methodDef.types)
pass
if noImp:
methodDef.imp = 0
pass
methodListData.extend(methodDef)
pass
pass
# add or update data
if self._machoCtx.containsAddr(methodListAddr):
newMethodListAddr = methodListAddr
file = self._machoCtx.ctxForAddr(methodListAddr)
defOff = self._dyldCtx.convertAddr(methodListAddr)[0]
file.writeBytes(defOff, methodListData)
pass
else:
newMethodListAddr = self._extraDataHead
# fix relative offsets now that we changed the address
for fieldOff, fieldTarget in relativeFixups:
newValue = fieldTarget - (newMethodListAddr + fieldOff)
struct.pack_into("<i", methodListData, fieldOff, newValue)
pass
self._addExtraData(methodListData)
pass
self._methodListCache[methodListAddr] = newMethodListAddr
return newMethodListAddr
def _processString(self, stringAddr: int) -> int:
if stringAddr in self._stringCache:
return self._stringCache[stringAddr]
# add or update data
if self._machoCtx.containsAddr(stringAddr):
newStringAddr = stringAddr
pass
else:
newStringAddr = self._extraDataHead
stringOff, ctx = self._dyldCtx.convertAddr(stringAddr) or (None, None)
if not stringOff:
return None
stringData = ctx.readString(stringOff)
self._addExtraData(stringData)
pass
self._stringCache[stringAddr] = newStringAddr
return newStringAddr
def _processInt(self, intAddr: int, intSize: int) -> int:
if intAddr in self._intCache:
return self._intCache[intAddr]
if self._machoCtx.containsAddr(intAddr):
newIntAddr = intAddr
pass
else:
newIntAddr = self._extraDataHead
intOff, ctx = self._dyldCtx.convertAddr(intAddr)
intData = ctx.getBytes(intOff, intSize)
self._addExtraData(intData)
pass
self._intCache[intAddr] = newIntAddr
return newIntAddr
def _processMethodName(self, stringAddr: int) -> int:
"""Process a method name.
Returns:
A the address of the pointer that points
to the method string.
"""
if stringAddr in self._methodNameCache:
return self._methodNameCache[stringAddr]
# TODO: search selrefs first
newStringAddr = self._processString(stringAddr)
ptrAddr = self._extraDataHead
self._addExtraData(struct.pack("<Q", newStringAddr))
self._methodNameCache[stringAddr] = ptrAddr
return ptrAddr
def _finalizeFutureClasses(self) -> None:
extraSegStart = self._extraDataHead - len(self._extraData)
while len(self._futureClasses):
futureClass = self._futureClasses.pop()
newAddr, needsFuture = self._processClass(futureClass[1])
if needsFuture:
# self._logger.error(f"Unable to resolve class pointer at {hex(futureClass[0])}") # noqa
continue
destPtr = futureClass[0]
if destPtr >= extraSegStart and destPtr < self._extraDataHead:
ptrOffset = destPtr - extraSegStart
struct.pack_into("<Q", self._extraData, ptrOffset, newAddr)
pass
else:
file = self._machoCtx.ctxForAddr(destPtr)
ptrOffset = self._dyldCtx.convertAddr(destPtr)[0]
struct.pack_into("Q", file.file, ptrOffset, newAddr)
pass
pass
pass
def _checkSpaceConstraints(self) -> None:
"""Check if we have enough space to add the new segment.
"""
# Check header
headerEnd = (
self._machoCtx.segmentsI[0].seg.vmaddr
+ self._machoCtx.header.sizeofcmds
+ mach_header_64.SIZE
)
textSectStart = self._machoCtx.segments[b"__TEXT"].sects[b"__text"].addr
if (headerEnd + segment_command_64.SIZE) > textSectStart:
spaceNeeded = (headerEnd + segment_command_64.SIZE) - textSectStart
self._makeHeaderSpace(spaceNeeded)
pass
# Check data space
if len(self._extraData) > self._extraDataMaxSize:
raise _ObjCFixerError("Not enough space to add ObjC data.")
pass
def _makeHeaderSpace(self, spaceNeeded: int) -> None:
"""Attempt to make more space in the header.
"""
bytesSaved = 0
commandsToRemove = []
# LC_UUID
# self._logger.info("Not enough header space, removing UUID command.")
if uuidCmd | |
import os
from typing import Dict, Optional, Union
import geopandas as gpd
import numpy as np
import pandas as pd
from geopandas import GeoDataFrame
from shapely.geometry import Point
from pyproj import CRS
from .logger import RanchLogger
from .osm import add_two_way_osm, highway_attribute_list_to_value
from .parameters import Parameters
from .sharedstreets import extract_osm_link_from_shst_extraction, read_shst_extraction
from .utils import (
buffer1,
fill_na,
generate_centroid_connectors_link,
generate_centroid_connectors_shape,
get_non_near_connectors,
haversine_distance,
identify_dead_end_nodes,
)
from .parameters import standard_crs, alt_standard_crs
class Roadway(object):
"""
Roadway Network Object
"""
def __init__(
self,
nodes: GeoDataFrame,
links: GeoDataFrame,
shapes: GeoDataFrame,
parameters: Union[Parameters, dict] = {},
):
"""
Constructor
Args:
nodes: geodataframe of nodes
links: dataframe of links
shapes: geodataframe of shapes
parameters: dictionary of parameter settings (see Parameters class) or an instance of Parameters. If not specified, will use default parameters.
"""
# convert to standard crs
nodes_df = nodes.to_crs(parameters.standard_crs)
links_df = links.to_crs(parameters.standard_crs)
shapes_df = shapes.to_crs(parameters.standard_crs)
self.nodes_df = nodes_df
self.links_df = links_df
self.shapes_df = shapes_df
# will have to change if want to alter them
if type(parameters) is dict:
self.parameters = Parameters(**parameters)
elif isinstance(parameters, Parameters):
self.parameters = Parameters(**parameters.__dict__)
else:
msg = "Parameters should be a dict or instance of Parameters: found {} which is of type:{}".format(
parameters, type(parameters)
)
RanchLogger.error(msg)
raise ValueError(msg)
def create_roadway_network_from_extracts(
shst_extract_dir: str,
osm_extract_dir: str,
parameters: Dict,
):
"""
creates roadway network from shst and osm extracts
"""
if not shst_extract_dir:
msg = "Please specify directory for sharedstreet extraction files."
RanchLogger.error(msg)
raise ValueError(msg)
if not osm_extract_dir:
msg = "Please specify directory for osmnx extraction files."
RanchLogger.error(msg)
raise ValueError(msg)
if shst_extract_dir:
RanchLogger.info("Reading sharedstreets data")
shst_link_gdf = read_shst_extraction(shst_extract_dir, "*.out.geojson")
# shst geometry file might have duplicates, if multiple geometries has overlapping tiles
# drop duplicates
RanchLogger.info("Removing duplicates in shst extraction data")
RanchLogger.info(
"...before removing duplicates, shst extraction has {} geometries.".format(
shst_link_gdf.shape[0]
)
)
shst_link_non_dup_gdf = shst_link_gdf.drop_duplicates(
subset=[
"id",
"fromIntersectionId",
"toIntersectionId",
"forwardReferenceId",
"backReferenceId",
]
)
RanchLogger.info(
"...after removing duplicates, shst extraction has {} geometries.".format(
shst_link_non_dup_gdf.shape[0]
)
)
if osm_extract_dir:
RanchLogger.info("Reading osmnx data")
osmnx_link_gdf = gpd.read_file(
os.path.join(osm_extract_dir, "link.geojson")
)
osmnx_node_gdf = gpd.read_file(
os.path.join(osm_extract_dir, "node.geojson")
)
RanchLogger.info("Extracting corresponding osm ways for every shst geometry")
osm_from_shst_link_df = extract_osm_link_from_shst_extraction(
shst_link_non_dup_gdf
)
# add two-way osm links
osm_from_shst_link_df = add_two_way_osm(osm_from_shst_link_df, osmnx_link_gdf)
# fill na
osm_from_shst_link_df = fill_na(osm_from_shst_link_df)
# aggregate osm data back to shst geometry based links
link_gdf = Roadway.consolidate_osm_way_to_shst_link(osm_from_shst_link_df)
# calculate roadway property
highway_to_roadway_df = pd.read_csv(
parameters.highway_to_roadway_crosswalk_file
).fillna("")
highway_to_roadway_dict = pd.Series(
highway_to_roadway_df.roadway.values, index=highway_to_roadway_df.highway
).to_dict()
roadway_hierarchy_dict = pd.Series(
highway_to_roadway_df.hierarchy.values, index=highway_to_roadway_df.roadway
).to_dict()
link_gdf["roadway"] = link_gdf.apply(
lambda x: highway_attribute_list_to_value(
x, highway_to_roadway_dict, roadway_hierarchy_dict
),
axis=1,
)
# there are links with different shstgeomid, but same shstrefid, to/from nodes
# drop one of the links that have two shstGeomId
link_gdf.drop_duplicates(subset=["shstReferenceId"], inplace=True)
# add network type variables
network_type_df = pd.read_csv(parameters.network_type_file)
link_gdf = pd.merge(link_gdf, network_type_df, how="left", on="roadway")
# create node gdf
node_gdf = Roadway.create_node_gdf(link_gdf)
node_gdf = Roadway.add_network_type_for_nodes(link_gdf, node_gdf)
# create shape gdf
shape_gdf = shst_link_non_dup_gdf[
shst_link_non_dup_gdf.id.isin(link_gdf.shstGeometryId.tolist())
].copy()
roadway_network = Roadway(
nodes=node_gdf, links=link_gdf, shapes=shape_gdf, parameters=parameters
)
return roadway_network
def consolidate_osm_way_to_shst_link(osm_link):
"""
if a shst link has more than one osm ways, aggregate info into one, e.g. series([1,2,3]) to cell value [1,2,3]
Parameters
----------
osm link with shst info
return
----------
shst link with osm info
"""
osm_link_gdf = osm_link.copy()
agg_dict = {
"geometry": lambda x: x.iloc[0],
"u": lambda x: x.iloc[0],
"v": lambda x: x.iloc[-1],
}
for c in osm_link_gdf.columns:
if c in [
"link",
"nodeIds",
"oneWay",
"roadClass",
"roundabout",
"wayId",
"access",
"area",
"bridge",
"est_width",
"highway",
"junction",
"key",
"landuse",
"lanes",
"maxspeed",
"name",
"oneway",
"ref",
"service",
"tunnel",
"width",
]:
agg_dict.update(
{c: lambda x: list(x) if len(list(x)) > 1 else list(x)[0]}
)
RanchLogger.info(
"Start aggregating osm segments to one shst link for forward links"
)
forward_link_gdf = osm_link_gdf[osm_link_gdf.reverse_out == 0].copy()
if len(forward_link_gdf) > 0:
forward_link_gdf = (
forward_link_gdf.groupby(
[
"shstReferenceId",
"id",
"shstGeometryId",
"fromIntersectionId",
"toIntersectionId",
]
)
.agg(agg_dict)
.reset_index()
)
forward_link_gdf["forward"] = 1
else:
forward_link_gdf = None
RanchLogger.info(
"Start aggregating osm segments to one shst link for backward links"
)
backward_link_gdf = osm_link_gdf[osm_link_gdf.reverse_out == 1].copy()
if len(backward_link_gdf) > 0:
agg_dict.update({"u": lambda x: x.iloc[-1], "v": lambda x: x.iloc[0]})
backward_link_gdf = (
backward_link_gdf.groupby(
[
"shstReferenceId",
"id",
"shstGeometryId",
"fromIntersectionId",
"toIntersectionId",
]
)
.agg(agg_dict)
.reset_index()
)
else:
backward_link_gdf = None
shst_link_gdf = None
if forward_link_gdf is None:
RanchLogger.info("back")
shst_link_gdf = backward_link_gdf
if backward_link_gdf is None:
RanchLogger.info("for")
shst_link_gdf = forward_link_gdf
if (forward_link_gdf is not None) and (backward_link_gdf is not None):
RanchLogger.info("all")
shst_link_gdf = pd.concat([forward_link_gdf, backward_link_gdf],
sort = False,
ignore_index = True)
shst_link_gdf = GeoDataFrame(shst_link_gdf,
crs = standard_crs)
return shst_link_gdf
@staticmethod
def create_node_gdf(link_gdf) -> GeoDataFrame:
"""
create shst node gdf from shst geometry
Paramters
---------
link_gdf: shst links with osm info
return
---------
shst nodes with osm info
"""
RanchLogger.info("Start creating shst nodes")
# geometry only matches for forward direction
forward_link_gdf = link_gdf[link_gdf.forward == 1].copy()
# create point geometry from shst linestring
forward_link_gdf["u_point"] = forward_link_gdf.apply(
lambda x: Point(list(x.geometry.coords)[0]), axis=1
)
forward_link_gdf["v_point"] = forward_link_gdf.apply(
lambda x: Point(list(x.geometry.coords)[-1]), axis=1
)
# get from points
point_gdf = forward_link_gdf[["u", "fromIntersectionId", "u_point"]].copy()
point_gdf.rename(
columns={
"u": "osm_node_id",
"fromIntersectionId": "shst_node_id",
"u_point": "geometry",
},
inplace=True,
)
# append to points
point_gdf = pd.concat(
[
point_gdf,
forward_link_gdf[["v", "toIntersectionId", "v_point"]].rename(
columns={
"v": "osm_node_id",
"toIntersectionId": "shst_node_id",
"v_point": "geometry",
}
),
],
sort=False,
ignore_index=True,
)
# drop duplicates
point_gdf.drop_duplicates(subset = ["osm_node_id", "shst_node_id"], inplace = True)
point_gdf = GeoDataFrame(point_gdf,
crs = standard_crs)
return point_gdf
@staticmethod
def add_network_type_for_nodes(links, nodes):
"""
add network type variable for node
"""
A_B_df = pd.concat(
[
links[["u", "drive_access", "walk_access", "bike_access"]].rename(
columns={"u": "osm_node_id"}
),
links[["v", "drive_access", "walk_access", "bike_access"]].rename(
columns={"v": "osm_node_id"}
),
],
sort=False,
ignore_index=True,
)
A_B_df.drop_duplicates(inplace=True)
A_B_df = A_B_df.groupby("osm_node_id").max().reset_index()
node_gdf = pd.merge(nodes, A_B_df, how="left", on="osm_node_id")
return node_gdf
# step 5 tidy roadway
def tidy_roadway(
self,
county_boundary_file: str,
county_variable_name: str,
create_node_link_id: bool = False
):
"""
step 5: clean up roadway object
Args:
county_boundary_file: path to county polygon file with county variable name
county_variable_name: variable name in the county boundary file that has the name of county
create_node_link_id: Boolean, if create internal node and link id, which is in addition to osm and shst ids.
"""
if not county_boundary_file:
msg = "Missing polygon file for county boundary."
RanchLogger.error(msg)
raise ValueError(msg)
if county_boundary_file:
filename, file_extension = os.path.splitext(county_boundary_file)
if file_extension in [".shp", ".geojson"]:
county_gdf = gpd.read_file(county_boundary_file)
self.county_gdf = county_gdf
self.county_variable_name = county_variable_name
else:
msg = "Invalid boundary file, should be .shp or .geojson"
RanchLogger.error(msg)
raise ValueError(msg)
RanchLogger.info("Starting Step 5 Tidy Roadway")
## 5.0 join county name to shapes and nodes
self._calculate_county(
county_gdf=county_gdf, county_variable_name=county_variable_name
)
## 5.1 keep links within county boundary, keep nodes and shapes accordingly
self._keep_links_nodes_within_county()
## 5.2 drop circular links
self._drop_circular_links()
## 5.3 flag dead end
self._make_dead_end_non_drive()
## 5.4 drop duplicate links between same AB node pair
self._drop_alternative_links_between_same_AB_nodes()
## 5.5 link and node numbering
if create_node_link_id:
self._link_node_numbering()
def _calculate_county(
self,
county_gdf: GeoDataFrame,
county_variable_name: str,
):
links_df = self.links_df.copy()
nodes_df = self.nodes_df.copy()
# links_centroid_df['geometry'] = links_centroid_df["geometry"].centroid
RanchLogger.info(
"Joining network with county boundary file for {} county".format(
county_gdf[county_variable_name].unique()
)
)
if county_gdf.crs == alt_standard_crs:
county_gdf.crs = standard_crs
# convert to lat-long
county_gdf = county_gdf.to_crs(standard_crs)
joined_links_gdf = gpd.sjoin(
links_df,
county_gdf,
how="left",
predicate="intersects"
)
# for links that cross county boudaries and potentially sjoin-ed to two counties
# drop duplciates, keep one county match
joined_links_gdf.drop_duplicates(subset=["shstReferenceId"], inplace=True)
joined_links_gdf.rename(columns={county_variable_name: "county"}, inplace=True)
joined_nodes_gdf = gpd.sjoin(
nodes_df,
county_gdf,
how="left",
predicate="intersects"
)
# for nodes that cross county boudaries and potentially sjoin-ed to two counties
# drop duplciates, keep one county match
joined_nodes_gdf.drop_duplicates(
subset=["osm_node_id", "shst_node_id"], inplace=True
)
joined_nodes_gdf.rename(
columns = {county_variable_name : 'county'},
inplace = True
)
joined_nodes_gdf['county'].fillna('outside', inplace = True)
# join back to roadway object
self.links_df = pd.merge(
self.links_df,
joined_links_gdf[["shstReferenceId", "county"]],
how="left",
on=["shstReferenceId"],
)
self.nodes_df = pd.merge(
self.nodes_df,
joined_nodes_gdf[["osm_node_id", "shst_node_id", "county"]],
how="left",
on=["osm_node_id", "shst_node_id"],
)
def _keep_links_nodes_within_county(
self,
):
"""
drop links and nodes that are outside of the region
"""
RanchLogger.info(
"Dropping links and nodes that are outside of {} county".format(
self.links_df.county.dropna().unique()
)
)
self.links_df = self.links_df[self.links_df.county.notnull()]
self.nodes_df = self.nodes_df[
self.nodes_df.shst_node_id.isin(
self.links_df.fromIntersectionId.tolist()
+ self.links_df.toIntersectionId.tolist()
)
]
self.shapes_df = self.shapes_df[
self.shapes_df.id.isin(self.links_df.shstGeometryId.tolist())
]
def _make_dead_end_non_drive(
self,
):
"""
iterative process to identify dead end nodes
make dead end links and nodes drive_access = | |
<filename>server/src/weblab/admin/script/creation.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
from __future__ import print_function, unicode_literals
try:
from PIL import Image
except ImportError:
PIL_AVAILABLE = False
else:
PIL_AVAILABLE = True
import os
import getpass
import sys
import stat
import uuid
import traceback
import sqlite3
import urlparse
import StringIO
from collections import OrderedDict
from optparse import OptionParser, OptionGroup
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
import sqlalchemy
import weblab.configuration_doc as configuration_doc
from weblab.util import data_filename
import weblab.db.model as model
from weblab.db.upgrade import DbSchedulingUpgrader
import weblab.admin.deploy as deploy
from weblab.admin.script.httpd_config_generate import httpd_config_generate
from .utils import ordered_dump
import voodoo.sessions.db_lock_data as DbLockData
import voodoo.sessions.sqlalchemy_data as SessionSqlalchemyData
from voodoo.dbutil import generate_getconn
#########################################################################################
#
#
#
# W E B L A B D I R E C T O R Y C R E A T I O N
#
#
#
class OptionWrapper(object):
""" OptionWrapper is a wrapper of an OptionParser options object,
which makes it possible to refer to options['force'] instead of options.force.
"""
def __init__(self, options):
self._options = options
def __contains__(self, name):
return hasattr(self._options, name)
def __getitem__(self, name):
return getattr(self._options, name)
def __setitem__(self, name, value):
return setattr(self._options, name, value)
def __getattribute__(self, name):
if name == '_options':
return object.__getattribute__(self, '_options')
return getattr(self._options, name)
def __repr__(self):
return repr(self._options)
class Creation(object):
""" This class wraps the options for creating a new WebLab-Deusto directory """
FORCE = 'force'
QUIET = 'quiet'
VERBOSE = 'verbose'
SOCKET_WAIT = 'socket_wait'
# General information
ADD_TEST_DATA = 'add_test_data'
ADD_FEDERATED = 'add_fed_user'
CORES = 'cores'
START_PORTS = 'start_ports'
SYSTEM_IDENTIFIER = 'system_identifier'
ENABLE_HTTPS = 'enable_https'
BASE_URL = 'base_url'
ENTITY_LINK = 'entity_link'
SERVER_HOST = 'server_host'
POLL_TIME = 'poll_time'
INLINE_LAB_SERV = 'inline_lab_serv'
NO_LAB = 'no_lab'
HTTP_SERVER_PORT = 'http_server_port'
LAB_COPIES = 'lab_copies'
ADMIN_USER = 'admin_user'
ADMIN_NAME = 'admin_name'
ADMIN_PASSWORD = '<PASSWORD>'
ADMIN_MAIL = 'admin_mail'
LOGO_PATH = 'logo_path'
# XMLRPC experiment
XMLRPC_EXPERIMENT = 'xmlrpc_experiment'
XMLRPC_EXPERIMENT_PORT = 'xmlrpc_experiment_port'
# Dummy experiment
DUMMY_NAME = 'dummy_name'
DUMMY_CATEGORY_NAME = 'dummy_category_name'
DUMMY_COPIES = 'dummy_copies'
DUMMY_SILENT = 'dummy_silent'
# Visir
VISIR_SERVER = 'visir_server'
VISIR_SLOTS = 'visir_slots'
VISIR_EXPERIMENT_NAME = 'visir_experiment_name'
VISIR_BASE_URL = 'visir_base_url'
VISIR_MEASUREMENT_SERVER = 'visir_measurement_server'
VISIR_USE_PHP = 'visir_use_php'
VISIR_LOGIN = 'visir_login'
VISIR_PASSWORD = '<PASSWORD>'
# Logic experiment
LOGIC_SERVER = 'logic_server'
# Virtual Machine experiment
VM_SERVER = 'vm_server'
VM_EXPERIMENT_NAME = 'vm_experiment_name'
VM_STORAGE_DIR = 'vm_storage_dir'
VBOX_VM_NAME = 'vbox_vm_name'
VBOX_BASE_SNAPSHOT = 'vbox_base_snapshot'
VM_URL = 'vm_url'
HTTP_QUERY_USER_MANAGER_URL = 'http_query_user_manager_url'
VM_ESTIMATED_LOAD_TIME = 'vm_estimated_load_time'
# Federated laboratories
ADD_FEDERATED_LOGIC = 'federated_logic'
ADD_FEDERATED_ROBOT = 'federated_robot'
ADD_FEDERATED_VISIR = 'federated_visir'
ADD_FEDERATED_SUBMARINE = 'federated_submarine'
# Sessions
SESSION_STORAGE = 'session_storage'
SESSION_DB_ENGINE = 'session_db_engine'
SESSION_DB_HOST = 'session_db_host'
SESSION_DB_PORT = 'session_db_port'
SESSION_DB_NAME = 'session_db_name'
SESSION_DB_USER = 'session_db_user'
SESSION_DB_PASSWD = '<PASSWORD>'
SESSION_REDIS_DB = 'session_redis_db'
SESSION_REDIS_HOST = 'session_redis_host'
SESSION_REDIS_PORT = 'session_redis_port'
# Database
DB_ENGINE = 'db_engine'
DB_NAME = 'db_name'
DB_HOST = 'db_host'
DB_PORT = 'db_port'
DB_USER = 'db_user'
DB_PASSWD = '<PASSWORD>'
# Coordination
COORD_ENGINE = 'coord_engine'
COORD_DB_ENGINE = 'coord_db_engine'
COORD_DB_NAME = 'coord_db_name'
COORD_DB_USER = 'coord_db_user'
COORD_DB_PASSWD = '<PASSWORD>'
COORD_DB_HOST = 'coord_db_host'
COORD_DB_PORT = 'coord_db_port'
COORD_REDIS_DB = 'coord_redis_db'
COORD_REDIS_PASSWD = 'coord_redis_passwd'
COORD_REDIS_PORT = 'coord_redis_port'
COORD_REDIS_HOST = 'coord_redis_host'
# Other
NOT_INTERACTIVE = 'not_interactive'
MYSQL_ADMIN_USER = 'mysql_admin_username'
MYSQL_ADMIN_PASSWORD = '<PASSWORD>'
IGNORE_LOCATIONS = 'ignore_locations'
class CreationFlags(object):
HTTP_SERVER_PORT = '--http-server-port'
COORDINATION_ENGINES = ['sql', 'redis' ]
DATABASE_ENGINES = ['mysql', 'sqlite' ]
SESSION_ENGINES = ['sql', 'redis', 'memory']
def load_template(name, stdout = sys.stdout, stderr = sys.stderr):
""" Reads the specified template file. Only the name needs to be specified. The file should be located
in the config_templates folder. """
path = "weblab" + os.sep + "admin" + os.sep + "config_templates" + os.sep + name
try:
f = file(path, "r")
template = f.read()
f.close()
except:
print("Error: Could not load template file %s. Probably couldn't be found." % path, file=stderr)
return template
def _test_redis(what, verbose, redis_port, redis_passwd, redis_db, redis_host, stdout, stderr, exit_func):
if verbose: print("Checking redis connection for %s..." % what, end="", file=stdout); stdout.flush()
kwargs = {}
if redis_port is not None: kwargs['port'] = redis_port
if redis_passwd is not None: kwargs['password'] = <PASSWORD>_passwd
if redis_db is not None: kwargs['db'] = redis_db
if redis_host is not None: kwargs['host'] = redis_host
try:
import redis
except ImportError:
print("redis selected for %s; but redis module is not available. Try installing it with 'pip install redis'" % what, file=stderr)
exit_func(-1)
else:
try:
client = redis.Redis(**kwargs)
client.get("this.should.not.exist")
except:
print("redis selected for %s; but could not use the provided configuration" % what, file=stderr)
traceback.print_exc(file=stderr)
exit_func(-1)
else:
if verbose: print("[done]", file=stdout)
def uncomment_json(lines):
new_lines = []
for line in lines:
if '//' in line:
if '"' in line or "'" in line:
single_quote_open = False
double_quote_open = False
previous_slash = False
counter = 0
comment_found = False
last_c = ''
for c in line:
if c == '/':
if previous_slash and not single_quote_open and not double_quote_open:
comment_found = True
break # counter is the previous one
previous_slash = True
else:
previous_slash = False
if c == '"' and last_c != '\\':
double_quote_open = not double_quote_open
if c == "'" and last_c != '\\':
single_quote_open = not single_quote_open
last_c = c
counter += 1
if comment_found:
new_lines.append(line[:counter - 1] + '\n')
else:
new_lines.append(line)
else:
new_lines.append(line.split('//')[0])
else:
new_lines.append(line)
return new_lines
DB_ROOT = None
DB_PASSWORD = None
def _check_database_connection(what, metadata, upgrader_class, directory, verbose, db_engine, db_host, db_port, db_name, db_user, db_passwd, options, stdout, stderr, exit_func):
if verbose: print("Checking database connection for %s..." % what, end="", file=stdout); stdout.flush()
if db_engine == 'sqlite':
base_location = os.path.join(os.path.abspath(directory), 'db', '%s.db' % db_name)
if sys.platform.startswith('win'):
sqlite_location = base_location
location = '/' + base_location
else:
sqlite_location = '/' + base_location
location = '/' + base_location
sqlite3.connect(database = sqlite_location).close()
else:
if db_port is not None:
port_str = ':%s' % db_port
else:
port_str = ''
if db_engine == 'mysql':
try:
import MySQLdb
assert MySQLdb is not None # Avoid warnings
except ImportError:
try:
import pymysql_sa
except ImportError:
pass
else:
pymysql_sa.make_default_mysql_dialect()
location = "%(user)s:%(password)s@%(host)s%(port)s/%(name)s" % {
'user' : db_user,
'password' : <PASSWORD>,
'host' : db_host,
'name' : db_name,
'port' : port_str,
}
db_str = "%(engine)s://%(location)s" % {
'engine' : db_engine,
'location' : location,
}
getconn = generate_getconn(db_engine, db_user, db_passwd, db_host, db_port, db_name, dirname = directory)
pool = sqlalchemy.pool.QueuePool(getconn)
try:
engine = create_engine(db_str, echo = False, pool = pool)
engine.execute("select 1")
except Exception as e:
print("error: database used for %s is misconfigured" % what, file=stderr)
print("error: %s" % str(e), file=stderr)
if verbose:
traceback.print_exc(file=stderr)
else:
print("error: Use -v to get more detailed information", file=stderr)
try:
create_database = deploy.generate_create_database(db_engine)
except Exception as e:
print("error: You must create the database and the db credentials", file=stderr)
print("error: reason: there was an error trying to offer you the creation of users:", str(e), file=stderr)
exit_func(-1)
else:
if create_database is None:
print("error: You must create the database and the db credentials", file=stderr)
print("error: reason: weblab does not support creating a database with engine %s" % db_engine, file=stderr)
exit_func(-1)
else:
if options[Creation.NOT_INTERACTIVE]:
should_create = True
else:
should_create = raw_input('Would you like to create it now? (y/N) ').lower().startswith('y')
if not should_create:
print("not creating", file=stderr)
exit_func(-1)
if db_engine == 'sqlite':
create_database(admin_username = None, admin_password = None, database_name = db_name, new_user = None, new_password = <PASSWORD>, db_dir = os.path.join(directory, 'db'))
elif db_engine == 'mysql':
if Creation.MYSQL_ADMIN_USER in options and Creation.MYSQL_ADMIN_PASSWORD in options:
admin_username = options[Creation.MYSQL_ADMIN_USER]
admin_password = options[Creation.MYSQL_ADMIN_PASSWORD]
else:
if options[Creation.NOT_INTERACTIVE]:
exit_func(-5)
global DB_ROOT, DB_PASSWORD
if DB_ROOT is None or DB_PASSWORD is None:
admin_username = raw_input("Enter the MySQL administrator username [default: root]: ") or 'root'
admin_password = <PASSWORD>("Enter the MySQL administrator password: ".encode('utf8'))
else:
admin_username = DB_ROOT
admin_password = <PASSWORD>
try:
create_database("Did you type your password incorrectly?", admin_username, admin_password, db_name, db_user, db_passwd, db_host, db_port)
except Exception as e:
print("error: could not create database. reason:", str(e), file=stderr)
exit_func(-1)
else:
DB_ROOT = admin_username
DB_PASSWORD = <PASSWORD>
else:
print("error: You must create the database and the db credentials", file=stderr)
print("error: reason: weblab does not support gathering information to create a database with engine %s" % db_engine, file=stderr)
exit_func(-1)
engine = create_engine(db_str, echo = False, pool = pool)
if verbose: print("[done]", file=stdout)
if verbose: print("Adding information to the %s database..." % what, end="", file=stdout); stdout.flush()
metadata.drop_all(engine)
metadata.create_all(engine)
if upgrader_class is not None:
if 'alembic_version' | |
<filename>frb/dlas.py
""" Module for assessing impact of intervening galaxies
(DLAs) on FRB measurements
Based on calclations presented in Prochaska & Neeleman 2017
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import pdb
from scipy.interpolate import interp1d
from astropy import units as u
from frb.io import load_dla_fits
from frb.turb_scattering import Turbulence
def approx_avgDM(zeval, dla_model='atan', verbose=False):
""" Calculate the average DM from intervening galaxies
This method is approximate (and fast) and accurate
to better than 1% in-so-far as the analysis is correct.
From Prochaska & Neeleman 2017
Parameters
----------
zeval : float or ndarray
Redshift(s) for evaluation
dla_model : str, optional
Returns
-------
avgDM : Quantity (depending on type of input z)
Units of pc/cm**3
"""
# Init
mlz = _model_lz(dla_model)
if isinstance(zeval, float):
flg_float = True
zeval = np.array([zeval])
else:
flg_float = False
# Error on upper bound
if np.max(zeval) > 5.:
raise IOError("Calculation is only valid to z=5")
# Calculate
zcalc = np.linspace(0., 5., 10000)
dz = np.median(zcalc-np.roll(zcalc,1))
# Load DLA fits model
dla_fits = load_dla_fits()
# Evaluate l(z)
lz = mlz['eval'](zcalc)
# Average NHI
avgNHI = _avgN_dbl_pow(dla_fits=dla_fits)
# Take ne/nH
nenH_p = dla_fits['nenH']['loglog']
nenH = nenH_p['bp'] + nenH_p['m'] * (avgNHI - 20.3)
# Integrate lz for n(z)
cumul = np.cumsum(lz * dz)
# Average <z>
avgz = np.cumsum(zcalc * lz * dz) / cumul
'''
# <DM> for a single DLA (rest-frame)
DM_DLA = 10. ** (avgNHI + nenH) / u.cm ** 2
if verbose:
print("DM for an average DLA = {} (rest-frame)".format(DM_DLA.to('pc/cm**3')))
'''
# Altogether now
avgDM_values = 10 ** avgNHI * 10 ** nenH * cumul / (1 + avgz) #/ u.cm ** 2
# Finish up
DM_values = np.zeros_like(zeval)
for kk,iz in enumerate(zeval):
iminz = np.argmin(np.abs(iz - zcalc))
DM_values[kk] = avgDM_values[iminz]
# Return
return (DM_values / u.cm**2).to('pc/cm**3')
def monte_DM(zeval, model='atan', nrand=100, verbose=False):
"""
Parameters
----------
zeval : float or ndarray
Array of redshifts for evaluation
model
nrand : int, optional
Number of samples on NHI
verbose : bool, optional
Returns
-------
rand_DM : ndarray
Random DM values
Reported in pc/cm**3 (unitless array)
"""
# Convert to array
if isinstance(zeval, float):
zeval = np.array([zeval])
# Init
dla_fits = load_dla_fits()
nenH_param = dla_fits['nenH']['loglog']
mlz = _model_lz(model)
lgNmax = np.linspace(20.3, 22., 10000)
intfN = _int_dbl_pow(dla_fits['fN']['dpow'], lgNmax=lgNmax)
# Interpolate (cubic is *very* slow)
interp_fN = interp1d(intfN/intfN[-1], lgNmax)
# l(z) model
# Evaluate l(z) in small z intervals
zmax = np.max(zeval)
z = np.linspace(0., zmax, 50000)
dz = np.median(z-np.roll(z,1))
lz = mlz['eval'](z, param=mlz['param'])
# Setup for n(z) and drawing zdla
nzc = np.cumsum(lz*dz) # Note nzc[0] is not 0
avgz = np.cumsum(z*lz*dz) / nzc
interp_avgz = interp1d(z, avgz)
nzc[0] = 0.
interp_nz = interp1d(z, nzc)
interp_revnz = interp1d((nzc-nzc[0])/nzc[-1], z) # Accurate to ~1%
#
rand_DM = np.zeros((nrand, zeval.size))
nz = interp_nz(zeval)
for kk,inz in enumerate(nz):
# Random number of DLAs
rn = np.random.poisson(inz, size=nrand)
ndla = np.sum(rn)
if ndla == 0:
continue
# Draw NHI
rval = np.random.uniform(size=ndla)
rNHI = interp_fN(rval)
# nenH
nenH = nenH_param['bp'] + nenH_param['m'] * (rNHI-20.3)
# Draw zdla
rval2 = np.random.uniform(size=ndla)
zdla = interp_revnz(rval2*inz/nzc[-1])
# DM values
DMi = 10.**(rNHI + nenH) / (1+zdla)
# Generate a dummy array
DMarr = np.zeros((nrand, max(rn)))
cnt = 0
for jj in range(nrand): # Fill
if rn[jj] > 0:
DMarr[jj,:rn[jj]] = DMi[cnt:cnt+rn[jj]]
cnt += rn[jj]
# Fill em up
rand_DM[:,kk] = np.sum(DMarr,axis=1)
# Return
unit_conv = (1/u.cm**2).to('pc/cm**3').value
return rand_DM * unit_conv
def monte_tau(zeval, nrand=100, nHI=0.1, avg_ne=-2.6,
sigma_ne=0.5, cosmo=None, lobs=50*u.cm, turb=None):
""" Generate random draws of tau at a series of redshifts
Parameters
----------
zeval : ndarray
Array of redshifts for evaluation
nrand : int, optional
Number of samples on NHI
avg_ne : float, optional
Average log10 electron density / cm**3
sigma_ne : float, optional
Error in log10 ne
nHI : float, optional
Fiducial value for n_HI; used for DL value
lobs : Quantity
Wavelength for analysis
turb : Turbulence object, optional
Usually defined internally and that is the highly recommended approach
cosmo : astropy.cosmology, optional
Defaults to Planck15
Returns
-------
rand_tau : ndarray (nrand, nz)
Random tau values reported in ms (but without explicit astropy Units)
"""
# Init
ne_param = dict(value=avg_ne, sigma=sigma_ne) # Neeleman+15
dla_fits = load_dla_fits()
if cosmo is None:
from astropy.cosmology import Planck15 as cosmo
# Setup NHI
lgNmax = np.linspace(20.3, 22., 10000)
intfN = _int_dbl_pow(dla_fits['fN']['dpow'], lgNmax=lgNmax)
# Spline
interp_fN = interp1d(intfN/intfN[-1], lgNmax)#, kind='cubic')
# Setup z
zvals = np.linspace(0., 7., 10000)
nz_s = _dla_nz(zvals)
nz_s[0] = 0.
# Turbulence
if turb is None:
turb = _init_dla_turb()
f_ne=turb.ne
zsource = 2.
turb.set_rdiff(lobs)
fiducial_tau = turb.temporal_smearing(lobs, zsource)
# Take out the cosmology
f_D_S = cosmo.angular_diameter_distance(zsource)
f_D_L = cosmo.angular_diameter_distance(turb.zL)
f_D_LS = cosmo.angular_diameter_distance_z1z2(turb.zL, zsource)
fiducial_tau = fiducial_tau / f_D_LS / f_D_L * f_D_S * (1+turb.zL)**3 # ms/Mpc
kpc_cm = (1*u.kpc).to('cm').value
rand_tau = np.zeros((nrand, zeval.size))
# Loop on zeval
for ss,izeval in enumerate(zeval):
avg_nz = _dla_nz(izeval)
rn = np.random.poisson(avg_nz, size=nrand)
ndla = np.sum(rn)
if ndla == 0:
continue
# Get random NHI
rval = np.random.uniform(size=ndla)
rNHI = interp_fN(rval)
DL = 10.**rNHI / nHI / kpc_cm
# Get random z
imin = np.argmin(np.abs(zvals-izeval))
interp_z = interp1d(nz_s[0:imin]/nz_s[imin-1], zvals[0:imin])#, kind='cubic')
rval = np.random.uniform(size=ndla)
rz = interp_z(rval)
# Cosmology
D_S = cosmo.angular_diameter_distance(izeval)
D_L = cosmo.angular_diameter_distance(rz)
D_LS = cosmo.angular_diameter_distance_z1z2(rz, izeval)
# Get random n_e
rne = 10.**(ne_param['value'] + ne_param['sigma']*np.random.normal(size=ndla))
# Calculate (scale)
rtau = fiducial_tau * (D_LS * D_L / D_S) * (rne/f_ne.to('cm**-3').value)**2 / (1+rz)**3
# Generate, fill
taus = np.zeros((nrand, np.max(rn)))
kk = 0
for jj,irn in enumerate(rn):
if irn > 0:
taus[jj,0:irn] = rtau[kk:kk+irn]
kk += irn
# Finish -- add in quadrature
final_tau = np.sqrt(np.sum(taus**2, axis=1))
# Save
rand_tau[:,ss] = final_tau
# Return
return rand_tau
def _avgN_dbl_pow(lgNmin=20.3, dla_fits=None):
""" Calculate <NHI> for the double power-law
Parameters
----------
lgNmin : float, optional
Returns
-------
avglgN : float
log10 <NHI>
"""
if dla_fits is None:
dla_fits = load_dla_fits()
# Parameters
param = dla_fits['fN']['dpow']
# Calculate
fterm = 1/(param['a3']+2) - 1./(param['a4']+2)
sterm = (10**(lgNmin-param['Nd']))**(param['a3']+2) / (param['a3']+2)
# Numerator
num = (10**param['Nd'])**2 *(fterm-sterm)
# Denom
denom = _int_dbl_pow(param, lgNmin=lgNmin)
return np.log10(num/denom)
def _atan_lz(zval, param=None):
""" arctan l(z) model
Parameters
----------
zval : float or ndarray
Returns
-------
atan_lz : float or ndarray
"""
if param is None:
dfits = load_dla_fits()
param = dfits['lz']['atan']
lz = param['A'] + param['B'] * np.arctan(zval-param['C'])
return lz
def _dla_nz(zarr, mlz=None, model='atan'):
""" Calculate the number of DLAs intersected on average
to a given redshift
Parameters
----------
zarr : ndarray
mlz : model, optional
model : str, optional
Returns
-------
nz : ndarray
"""
# Load model
if mlz is None:
mlz = _model_lz(model)
z = np.linspace(0., 10., 10000)
dz = np.median(z-np.roll(z,1))
lz = mlz['eval'](z, param=mlz['param'])
# Sum
nz = np.cumsum(lz*dz)
# Interpolate onto input redshifts
interp_nz = interp1d(z, nz)
# Return
return interp_nz(zarr)
def _init_dla_turb(ne=4e-3/u.cm**3, zL=1.):
""" Initialize a Turbulence object for a fiducial DLA
Parameters
----------
ne : Quantity
Electron density
Default is based on Neeleman+15
zL : float
Redshift of the DLA
Returns
-------
turb : Turbulence object
"""
# Sizes
l0 = 1 * u.AU
L0 = 0.001 * u.pc
DL = 1 * u.kpc
# Init
turb = Turbulence(ne, l0, L0, zL)
turb.set_SM_obj(DL)
# Return
return turb
def _int_dbl_pow(param, lgNmin=20.3, lgNmax=None):
""" Integrate the double power-law for f(N)
For normalizing with l(z) and for doing random draws
Parameters
----------
lgNmin : float, optional
lgNmax : ndarray, optional
If None, integrate to Infinity
Returns
-------
val : float or ndarray
Integral of f(N) dN [modulo the j(z) term]
Really just the integral of h(N) dN
"""
# Calculate
if lgNmax is None: # Integrate to Infinity
fterm = 1/(param['a3']+1) - 1./(param['a4']+1)
else: # Indefinite integral
fterm = np.zeros_like(lgNmax)
high = lgNmax > param['Nd']
fterm[high] = 1/(param['a3']+1) - 1./(param['a4']+1)
fterm[high] += (10**(lgNmax[high]-param['Nd']))**(param['a4']+1) / (param['a4']+1)
fterm[~high] = (10**(lgNmax[~high]-param['Nd']))**(param['a3']+1) / (param['a3']+1)
# Nmin term
sterm = (10**(lgNmin-param['Nd']))**(param['a3']+1) / (param['a3']+1)
# Finish
val = 10**param['Nd'] * (fterm-sterm)
return val
def _model_lz(name):
""" Return the model for l(z)
Enables multiple ways to model the DLA observations
Returns
| |
<==> x<=y
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __lt__(*args, **kwargs):
"""
x.__lt__(y) <==> x<y
"""
pass
def __mul__(*args, **kwargs):
"""
x.__mul__(y) <==> x*y
"""
pass
def __ne__(*args, **kwargs):
"""
x.__ne__(y) <==> x!=y
"""
pass
def __radd__(*args, **kwargs):
"""
x.__radd__(y) <==> y+x
"""
pass
def __rdiv__(*args, **kwargs):
"""
x.__rdiv__(y) <==> y/x
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __rmul__(*args, **kwargs):
"""
x.__rmul__(y) <==> y*x
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def getColor(*args, **kwargs):
"""
Returns a list containing the color's components, in the specified color model.
"""
pass
def setColor(*args, **kwargs):
"""
Sets the color's components and color model.
"""
pass
a = None
b = None
g = None
r = None
__new__ = None
kByte = 1
kCMY = 2
kCMYK = 3
kFloat = 0
kHSV = 1
kOpaqueBlack = None
kRGB = 0
kShort = 2
class MSelectionList(object):
"""
A heterogenous list of MObjects, MPlugs and MDagPaths.
__init__()
Initializes a new, empty MSelectionList object.
__init__(MSelectionList other)
Initializes a new MSelectionList object containing the same
items as another list.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def add(*args, **kwargs):
"""
add(pattern, searchChildNamespaces=False) -> self
add(item, mergeWithExisting=True) -> self
The first version adds to the list any nodes, DAG paths, components
or plugs which match the given the pattern string.
The second version adds the specific item to the list, where the
item can be a plug (MPlug), a node (MObject), a DAG path (MDagPath)
or a component (tuple of (MDagPath, MObject) ).
"""
pass
def clear(*args, **kwargs):
"""
clear() -> self
Empties the selection list.
"""
pass
def copy(*args, **kwargs):
"""
copy(src) -> self
Replaces the contents of the selection list with a copy of those from src (MSelectionList).
"""
pass
def getComponent(*args, **kwargs):
"""
getComponent(index) -> (MDagPath, MObject)
Returns the index'th item of the list as a component, represented by
a tuple containing an MDagPath and an MObject. If the item is just a
DAG path without a component then MObject.kNullObj will be returned
in the second element of the tuple. Raises TypeError if the item is
neither a DAG path nor a component. Raises IndexError if index is
out of range.
"""
pass
def getDagPath(*args, **kwargs):
"""
getDagPath(index) -> MDagPath
Returns the DAG path associated with the index'th item of the list.
Raises TypeError if the item is neither a DAG path nor a component.
Raises IndexError if index is out of range.
"""
pass
def getDependNode(*args, **kwargs):
"""
getDependNode(index) -> MObject
Returns the node associated with the index'th item, whether it be a
dependency node, DAG path, component or plug. Raises IndexError if
index is out of range.
"""
pass
def getPlug(*args, **kwargs):
"""
getPlug(index) -> MPlug
Returns the index'th item of the list as a plug. Raises TypeError if
the item is not a plug. Raises IndexError if index is out of range.
"""
pass
def getSelectionStrings(*args, **kwargs):
"""
getSelectionStrings(index=None) -> (string, string, ...)
Returns a tuple containing the string representation of the
specified item. For nodes, DAG paths, plugs and contiguous
components the tuple will only contain a single string, but for non-
contiguous components there will be a separate string for each
distinct block of contiguous elements. If index is not specified
then the string representations of all the items in the selection
list are returned. Raises IndexError if index is out of bounds.
"""
pass
def hasItem(*args, **kwargs):
"""
hasItem(item) -> bool
Returns True if the given item is on the selection list. For a
component this means that all of the elements of the component must
be on the list. A component is passed as a tuple containing the
MDagPath of the DAG node and an MObject containing the component.
"""
pass
def hasItemPartly(*args, **kwargs):
"""
hasItemPartly(dagPath, component) -> bool
Returns True if at least one of the component's elements is on the
selection list. Raises TypeError if dagPath is invalid or component
does not contain a component.
"""
pass
def isEmpty(*args, **kwargs):
"""
isEmpty() -> bool
Returns True if the selection list is empty.
"""
pass
def length(*args, **kwargs):
"""
length() -> int
Returns the number of items on the selection list.
"""
pass
def merge(*args, **kwargs):
"""
merge(other, strategy=kMergeNormal) -> self
merge(dagPath, component, strategy=kMergeNormal) -> self
The first version merges the items from another selection list in
with those already on the list, using the given strategy.
The second version merges the specified component with those already
on the list.
"""
pass
def remove(*args, **kwargs):
"""
remove(index) -> self
Removes the index'th item from the list. Raises IndexError if the
index is out of range.
"""
pass
def replace(*args, **kwargs):
"""
replace(index, newItem) -> self
Replaces the index'th item on the list with a new item. A component
is passed as a tuple containing the MDagPath of the DAG node and an
MObject containing the component. Raises IndexError if the index is
out of range.
"""
pass
def toggle(*args, **kwargs):
"""
toggle(dagPath, component) -> self
Removes from the list those elements of the given component which
are already on it and adds those which are not.
"""
pass
__new__ = None
kMergeNormal = 0
kRemoveFromList = 2
kXORWithList = 1
class MFn(object):
"""
Static class providing constants for all API types.
"""
kAISEnvFacade = 961
kAddDoubleLinear = 5
kAdskMaterial = 1049
kAffect = 6
kAimConstraint = 111
kAir = 257
kAlignCurve = 41
kAlignManip = 897
kAlignSurface = 42
kAmbientLight = 303
kAngle = 270
kAngleBetween = 21
kAnimBlend = 781
kAnimBlendInOut = 782
kAnimCurve = 7
kAnimCurveTimeToAngular = 8
kAnimCurveTimeToDistance = 9
kAnimCurveTimeToTime = 10
kAnimCurveTimeToUnitless = 11
kAnimCurveUnitlessToAngular = 12
kAnimCurveUnitlessToDistance = 13
kAnimCurveUnitlessToTime = 14
kAnimCurveUnitlessToUnitless = 15
kAnimLayer = 1002
kAnisotropy = 609
kAnnotation = 271
kAnyGeometryVarGroup = 115
kArcLength = 273
kAreaLight = 305
kArrayMapper = 517
kArrowManip = 123
kAssembly = 1063
kAsset = 1000
kAttachCurve = 43
kAttachSurface = 44
kAttribute = 554
kAttribute2Double = 734
kAttribute2Float = 735
kAttribute2Int = 737
kAttribute2Long = 737
kAttribute2Short = 736
kAttribute3Double = 738
kAttribute3Float = 739
kAttribute3Int = 741
kAttribute3Long = 741
kAttribute3Short = 740
| |
<gh_stars>1-10
import os
import matplotlib as mpl
if os.environ.get('DISPLAY','') == '':
print('no display found. Using non-interactive Agg backend')
mpl.use('Agg')
import sys
import json
import re
import matplotlib.pyplot as plt
sys.path.insert(0, './include')
from plot_utils import *
from common import *
from utils import *
from data_utils import *
tick = 1 # mins
## plot utilization: number of busy nodes.
cap = 64
mem_cap = 128
data_range=[10, 1000000]
target_qos = 0.99
cpuStr = 'cpu'
memStr = 'memory'
show=False
plotObj = True
plotOverload = False
plotOverbook = False
plotQoS=True
plotPredictionPenalty=True
plotUtilization=True
loads = [plotUtilization, False, plotOverload, plotOverbook, plotQoS, plotPredictionPenalty]
showBarValue = True
path = "./log"
arg_len = len(sys.argv) - 1
if arg_len > 0:
path=sys.argv[1]
# path = "./"
line_num = -1 # 60*24
def loadLog(filepath) :
cpuUsages = []
maxCpuUsages = []
cpuRequests = []
memRequests = []
totalCpuAllocations = []
totalMemAllocations = []
memUsages = []
gpuUsages = []
cpuAllocatables = []
memAllocatables = []
requests = []
busyNodes = []
overloadNodes = []
overBookNodes = []
QoS = []
NumSatifiesPods = []
NumPods = []
PredPenalty = []
cpuUsageStd = []
memUsageStd = []
with open(filepath) as fp:
line = fp.readline()
# content = fp.readlines()
i = 0
while line:
# for line in content:ot
busyNode = 0
overloadNode = 0
overBookNode = 0
totalCpuUsage = 0
totalMemUsage = 0
totalCpuAllocation = 0
totalMemAllocation = 0
totalCpuCapacity = 0
totalMemCapacity = 0
maxCpuUsage = 0
totalCpuRequest = 0
totalMemRequest = 0
maxMemUsage = 0
try:
data = json.loads(line)
except:
print("An json.loads(line) exception occurred")
continue
nodeDict = data['Nodes']
cUsages = []
mUsages =[]
for nodeName, node in nodeDict.items():
cpuUsage = 0
memUsage = 0
cpuAllocatable = 0
memAllocatable = 0
cpuRequest = 0
memRequest = 0
runningPodsNum = int(node['RunningPodsNum'])
usageDict = node['TotalResourceUsage']
for rsName in usageDict:
if(rsName==cpuStr):
cpuUsage = formatCpuQuatity(usageDict[rsName])
cUsages.append(cpuUsage/cap)
totalCpuUsage = totalCpuUsage+ cpuUsage
if cpuUsage > maxCpuUsage:
maxCpuUsage = cpuUsage
elif(rsName==memStr):
memUsage = formatMemQuatity(usageDict[rsName])
mUsages.append(memUsage/mem_cap)
totalMemUsage = totalMemUsage+ memUsage
if memUsage > maxMemUsage:
maxMemUsage = memUsage
allocatableDict = node['Allocatable']
for rsName in allocatableDict:
if(rsName==cpuStr):
cpuAllocatable = formatCpuQuatity(allocatableDict[rsName])
totalCpuCapacity = totalCpuCapacity + cpuAllocatable
elif(rsName==memStr):
memAllocatable = formatMemQuatity(allocatableDict[rsName])
totalMemCapacity = totalMemCapacity + memAllocatable
requestDict = node['TotalResourceRequest']
for rsName in requestDict:
if(rsName==cpuStr):
cpuRequest = formatCpuQuatity(requestDict[rsName])
totalCpuRequest = totalCpuRequest + cpuRequest
elif(rsName==memStr):
memRequest = formatMemQuatity(requestDict[rsName])
totalMemRequest = totalMemRequest + memRequest
allocationDict = node['TotalResourceAllocation']
for rsName in allocationDict:
if(rsName==cpuStr):
cpuAllocation = formatCpuQuatity(allocationDict[rsName])
totalCpuAllocation = totalCpuAllocation + cpuAllocation
elif(rsName==memStr):
memAllocation = formatMemQuatity(allocationDict[rsName])
totalMemAllocation = totalMemAllocation + memAllocation
if(cpuUsage > cpuAllocatable or memUsage > memAllocatable):
overloadNode = overloadNode+1
if(cpuRequest > cpuAllocatable or memRequest > memAllocatable):
overBookNode = overBookNode +1
if(runningPodsNum > 0):
busyNode = busyNode + 1
if (loads[0]):
cpuUsages.append(totalCpuUsage)
memUsages.append(totalMemUsage)
cpuAllocatables.append(totalCpuCapacity)
memAllocatables.append(totalMemCapacity)
cpuRequests.append(totalCpuRequest)
memRequests.append(totalMemRequest)
maxCpuUsages.append(maxCpuUsage)
totalCpuAllocations.append(totalCpuAllocation)
totalMemAllocations.append(totalMemAllocation)
memUsageStd.append(numpy.std(mUsages))
cpuUsageStd.append(numpy.std(cUsages))
if (loads[1]):
busyNodes.append(busyNode)
if (loads[2]):
overloadNodes.append(overloadNode)
if (loads[3]):
overBookNodes.append(overBookNode)
# Queue":{"PendingPodsNum":1,"QualityOfService":1,"PredictionPenalty":2.97}
queue = data['Queue']
if (loads[4]):
QoS.append(float(queue['QualityOfService']))
NumSatifiesPods.append(float(queue['NumSatifisedPods']))
NumPods.append(float(queue['NumPods']))
if (loads[5]):
PredPenalty.append(float(queue['PredictionPenalty']))
i=i+1
if line_num > 0 and i >= line_num:
break
line = fp.readline()
fp.close()
return busyNodes, overloadNodes, overBookNodes, cpuUsages, memUsages, cpuRequests, \
memRequests, totalCpuAllocations, totalMemAllocations, maxCpuUsages, cpuAllocatables, memAllocatables, \
QoS, NumSatifiesPods, NumPods, PredPenalty, cpuUsageStd, memUsageStd
def formatCpuQuatity(str):
strArray = re.split('(\d+)', str)
val = float(strArray[1])
scaleStr = strArray[2]
if(scaleStr == "m"):
val = val/1000
elif (scaleStr == "Mi"):
val = val/1024
elif (scaleStr == ""):
val = val
else:
print("error @ formatMemQuatity "+str)
return val
def formatMemQuatity(str):
strArray = re.split('(\d+)', str)
val = float(strArray[1])
scaleStr = strArray[2]
if(scaleStr == "m"):
val = val/(1000*1000)
elif (scaleStr == "Mi"):
val = val/(1024)
elif (scaleStr == "Ki"):
val = val/(1024*1024)
elif (scaleStr == "Gi"):
va = val
elif (scaleStr == ""): # byte
val = val/(1024*1024*1024)
else:
print("error @ formatMemQuatity "+str)
return val
methods = ["worstfit", "oversub", "proposed_list", "proposed_largest"]
methodNames = [STR_WORSTFIT, STR_OVERSUB, STR_FLEX_F, STR_FLEX_L]
colors = [COLOR_WORST_FIT, COLOR_OVER_SUB, COLOR_PROPOSED_1, COLOR_PROPOSED_2]
proposed_idx = 2
# methods = ["oneshot","worstfit"]
methodsNum = len(methods)
busyNodes = []
overloadNodes = []
overbookNodes = []
cpuUsages = []
memUsages = []
maxCpuUsages = []
cpuAllocatables = []
memAllocatables = []
cpuAllocations = []
memAllocations = []
cpuRequests = []
memRequests = []
QoSs = []
NumSatifiesPods = []
NumPods = []
PredPenalties = []
cpuUsageStd = []
memUsageStd = []
for m in methods:
b, ol, ob, u_cpu, u_mem, ur_cpu, ur_mem, a_cpu, a_mem, mu, c_cpu, c_mem, q, nsp, nps, p, cStd, mStd = loadLog(path+"/kubesim_"+m+".log")
busyNodes.append(b)
overloadNodes.append(ol)
overbookNodes.append(ob)
cpuUsages.append(u_cpu)
memUsages.append(u_mem)
maxCpuUsages.append(mu)
cpuAllocatables.append(c_cpu)
memAllocatables.append(c_mem)
cpuAllocations.append(a_cpu)
memAllocations.append(a_mem)
cpuRequests.append(ur_cpu)
memRequests.append(ur_mem)
QoSs.append(q)
NumSatifiesPods.append(nsp)
NumPods.append(nps)
PredPenalties.append(p)
cpuUsageStd.append(cStd)
memUsageStd.append(mStd)
for i in range(methodsNum):
if (len(cpuRequests[i]) < data_range[1]):
data_range[1] = len(cpuRequests[i])
if (len(cpuRequests[i]) < data_range[0]):
data_range[0] = 0
############# PLOTTING ##############
if not os.path.exists(FIG_PATH):
os.makedirs(FIG_PATH)
if plotObj:
# Y_MAX = cap*1.5
fig = plt.figure(figsize=FIG_ONE_COL)
max_len = 0
for i in range(methodsNum):
plt.plot([x / 60.0 for x in range(0,len(maxCpuUsages[i])*tick,tick)], maxCpuUsages[i], color=colors[i])
if max_len < len(maxCpuUsages[i]):
max_len = len(maxCpuUsages[i])
plt.plot([x / 60.0 for x in range(0,max_len*tick,tick)], [cap] * max_len, color=COLOR_CAP)
legends = methodNames
legends.append('capacity')
plt.legend(legends, loc='best')
plt.xlabel(STR_TIME_HOUR)
plt.ylabel(STR_CPU_CORES)
plt.suptitle("Max Cpu Usage")
# plt.ylim(0,Y_MAX)
fig.savefig(FIG_PATH+"/max_cpu_usage.pdf", bbox_inches='tight')
## plot STD of usage
Y_MAX = 0.3
fig = plt.figure(figsize=FIG_ONE_COL)
max_len = 0
for i in range(methodsNum):
plt.plot([x / 60.0 for x in range(0,len(memUsageStd[i])*tick,tick)], memUsageStd[i], color=colors[i])
plt.legend(legends, loc='best')
plt.xlabel(STR_TIME_HOUR)
plt.ylabel("Memory Usage std.")
plt.ylim(0,Y_MAX)
plt.xlim(0,24)
fig.savefig(FIG_PATH+"/std_mem_usage.pdf", bbox_inches='tight')
##
Y_MAX = 0
fig = plt.figure(figsize=FIG_ONE_COL)
max_len = 0
for i in range(methodsNum):
plt.plot([x / 60.0 for x in range(0,len(cpuUsageStd[i])*tick,tick)], cpuUsageStd[i], color=colors[i])
plt.legend(legends, loc='best')
plt.xlabel(STR_TIME_HOUR)
plt.ylabel("CPU Usage std.")
# plt.ylim(0,Y_MAX)
fig.savefig(FIG_PATH+"/std_cpu_usage.pdf", bbox_inches='tight')
if plotUtilization:
cpuReqUtil = []
memReqUtil = []
cpuDemandUtil = []
memDemandUtil = []
cpuUsageUtil = []
memUsageUtil = []
cpuCap = np.average(cpuAllocatables[0])
memCap = np.average(memAllocatables[0])
if memCap == 0:
memCap = 1.0
if cpuCap == 0:
cpuCap = 1.0
Y_MAX = 300
for i in range(methodsNum):
cpuR = cpuRequests[i]
memR = memRequests[i]
cpuD = cpuUsages[i]
memD = memUsages[i]
cpuU = cpuAllocations[i]
memU = memAllocations[i]
cpuReqUtil.append(int(round(np.average(cpuR[data_range[0]:data_range[1]])/cpuCap*100, 2)))
memReqUtil.append(int(round(np.average(memR[data_range[0]:data_range[1]])/memCap*100, 2)))
cpuDemandUtil.append(int(round(np.average(cpuD[data_range[0]:data_range[1]])/cpuCap*100, 2)))
memDemandUtil.append(int(round(np.average(memD[data_range[0]:data_range[1]])/memCap*100, 2)))
cpuUsageUtil.append(int(round(np.average(cpuU[data_range[0]:data_range[1]])/cpuCap*100,2)))
memUsageUtil.append(int(round(np.average(memU[data_range[0]:data_range[1]])/memCap*100,2)))
x = np.arange(methodsNum)
width = GBAR_WIDTH/2
## plot
# request
fig, ax = plt.subplots(figsize=FIG_ONE_COL)
rects = ax.bar(x - width, cpuReqUtil, width, label=STR_CPU, color=COLOR_CPU)
if showBarValue:
autolabel(rects, ax)
rects = ax.bar(x, memReqUtil, width, label=STR_MEM, color=COLOR_MEM)
if showBarValue:
autolabel(rects, ax)
labels = methodNames
ax.set_ylabel('Request (%)')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend( loc='best')
plt.ylim(0,Y_MAX)
fig.savefig(FIG_PATH+"/request-avg.pdf", bbox_inches='tight')
# demand
fig, ax = plt.subplots(figsize=FIG_ONE_COL)
rects = ax.bar(x - width, cpuDemandUtil, width, label=STR_CPU, color=COLOR_CPU)
if showBarValue:
autolabel(rects, ax)
rects = ax.bar(x, memDemandUtil, width, label=STR_MEM, color=COLOR_MEM)
if showBarValue:
autolabel(rects, ax)
labels = methodNames
ax.set_ylabel('Demand (%)')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend( loc='best')
plt.ylim(0,Y_MAX)
fig.savefig(FIG_PATH+"/demand-avg.pdf", bbox_inches='tight')
# usage
fig, ax = plt.subplots(figsize=FIG_ONE_COL)
rects = ax.bar(x - width, cpuUsageUtil, width, label=STR_CPU, color=COLOR_CPU)
if showBarValue:
autolabel(rects, ax)
rects = ax.bar(x, memUsageUtil, width, label=STR_MEM, color=COLOR_MEM)
if showBarValue:
autolabel(rects, ax)
labels = methodNames
ax.set_ylabel('Usage (%)')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend( loc='best')
plt.ylim(0,Y_MAX)
fig.savefig(FIG_PATH+"/usage-avg.pdf", bbox_inches='tight')
if plotUtilization:
# Y_MAX = np.amax(cpuRequests)
fig = plt.figure(figsize=FIG_ONE_COL)
for i in range(methodsNum):
plt.plot([x / 60.0 for x in range(0,len(cpuRequests[i])*tick,tick)], cpuRequests[i], color=colors[i])
plt.plot([x / 60.0 for x in range(0,len(cpuAllocatables[0])*tick,tick)], cpuAllocatables[0], color=COLOR_CAP)
legends = methodNames
legends.append('capacity')
plt.legend(legends, loc='best')
plt.xlabel(STR_TIME_HOUR)
plt.ylabel(STR_CPU_CORES)
# plt.ylim(0,Y_MAX)
fig.savefig(FIG_PATH+"/total-request-cpu.pdf", bbox_inches='tight')
fig = plt.figure(figsize=FIG_ONE_COL)
for i in range(methodsNum):
plt.plot([x / 60.0 for x in range(0,len(memRequests[i])*tick,tick)], memRequests[i], color=colors[i])
plt.plot([x / 60.0 for x in range(0,len(memAllocatables[0])*tick,tick)], memAllocatables[0], color=COLOR_CAP)
legends = methodNames
legends.append('capacity')
plt.legend(legends, loc='best')
plt.xlabel(STR_TIME_HOUR)
plt.ylabel(STR_MEM_GB)
# plt.ylim(0,Y_MAX)
fig.savefig(FIG_PATH+"/total-request-mem.pdf", bbox_inches='tight')
# Y_MAX = np.amax(cpuRequests)
fig = plt.figure(figsize=FIG_ONE_COL)
for i in range(methodsNum):
plt.plot([x / 60.0 for x in range(0,len(cpuUsages[i])*tick,tick)], cpuUsages[i], color=colors[i])
plt.plot([x / 60.0 for x in range(0,len(cpuAllocatables[0])*tick,tick)], cpuAllocatables[0], color=COLOR_CAP)
legends = methodNames
legends.append('capacity')
plt.legend(legends, loc='best')
plt.xlabel(STR_TIME_HOUR)
plt.ylabel(STR_CPU_CORES)
# plt.ylim(0,Y_MAX)
fig.savefig(FIG_PATH+"/total-demand-cpu.pdf", bbox_inches='tight')
fig = plt.figure(figsize=FIG_ONE_COL)
for i in range(methodsNum):
plt.plot([x / 60.0 for x in range(0,len(memUsages[i])*tick,tick)], memUsages[i], color=colors[i])
plt.plot([x / 60.0 for x in range(0,len(memAllocatables[0])*tick,tick)], memAllocatables[0], color=COLOR_CAP)
legends = methodNames
legends.append('capacity')
plt.legend(legends, loc='best')
plt.xlabel(STR_TIME_HOUR)
plt.ylabel(STR_MEM_GB)
# plt.ylim(0,Y_MAX)
fig.savefig(FIG_PATH+"/total-demand-mem.pdf", bbox_inches='tight')
# Y_MAX = np.amax(cpuRequests)
fig = plt.figure(figsize=FIG_ONE_COL)
for i in range(methodsNum):
plt.plot([x / 60.0 for x in range(0,len(cpuAllocations[i])*tick,tick)], cpuAllocations[i], color=colors[i])
plt.plot([x / 60.0 for x in range(0,len(cpuAllocatables[0])*tick,tick)], cpuAllocatables[0], color=COLOR_CAP)
legends = methodNames
legends.append('capacity')
plt.legend(legends, loc='best')
plt.xlabel(STR_TIME_HOUR)
plt.ylabel(STR_CPU_CORES)
# plt.ylim(0,Y_MAX)
fig.savefig(FIG_PATH+"/total-usage-cpu.pdf", bbox_inches='tight')
fig = plt.figure(figsize=FIG_ONE_COL)
for i in range(methodsNum):
plt.plot([x / 60.0 for x in range(0,len(memAllocations[i])*tick,tick)], memAllocations[i], color=colors[i])
plt.plot([x / 60.0 for x in range(0,len(memAllocatables[0])*tick,tick)], memAllocatables[0], color=COLOR_CAP)
legends = methodNames
legends.append('capacity')
plt.legend(legends, loc='best')
plt.xlabel(STR_TIME_HOUR)
plt.ylabel(STR_MEM_GB)
| |
<filename>cartopy_fun.py
# Having fun with Cartopy.
# inspired by the work of @pythonmaps on Twitter
# eg. https://twitter.com/PythonMaps/status/1391056641546768388
# <NAME>, 10th of May 2021, MIT-License
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from numpy import genfromtxt
import time
import cartopy.crs as ccrs # https://www.lfd.uci.edu/~gohlke/pythonlibs/#cartopy
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
def get_data(url,delimiter):
'''
Get the data
'''
df = pd.read_csv(url, delimiter=delimiter, low_memory=False)
return df
def save_df(df, name):
""" _ _ _ """
OUTPUT_DIR = ""
name_ = OUTPUT_DIR + name + ".csv"
compression_opts = dict(method=None, archive_name=name_)
df.to_csv(name_, index=False, compression=compression_opts)
print("--- Saving " + name_ + " ---")
def show_cities():
'''
Show fixed points of interest on a map
'''
cities = pd.DataFrame({'City': ['Utrecht', 'Amsterdam', 'Rotterdam', 'The Hague', 'Arnhem', 'Hilversum', 'Amersfoort', 'Almere', 'Lelystad', 'Apeldoorn', 'Den Burg', 'Harlingen', 'Zwolle', 'Gorinchem'],
'Lon': [5.1214, 4.9041, 4.4777, 4.3007, 5.8987, 5.1669, 5.3878, 5.2647, 5.4714, 5.9699, 4.7997, 5.4252, 6.0830, 4.9758],
'Lat': [52.0907, 52.3676, 51.9244, 52.0705, 51.9851, 52.2292, 52.1561, 52.3508, 52.5185, 52.2112, 53.0546, 53.1746, 52.5168, 51.8372]})
plt.scatter(cities.Lon, cities.Lat, marker = 'o', color = 'red', s = 50)
for i in range(cities.shape[0]):
plt.text(cities.Lon[i] + 0.02, cities.Lat[i], cities.City[i], color = 'red')
def show_locations(df, how, cities):
'''
Show the locations given in a dataframe
df = dataframe
how = points or scatter. The first reads the POI's in a loop, and adds the placename (or whatever field), the second plots
a scatter which is (much) faster.
cities = True/False Show the fixed POI's on the map
'''
df = df[df['type']=="large_airport"]
fig = plt.figure()
ax = plt.axes(projection=ccrs.PlateCarree())
ax.stock_img() #show a background
ax.coastlines()
if how == "points":
# PLOTTING AS DIFFERENT POINTS - 12.8sec
for i in range (0, len(df)):
plt.plot( df.iloc[i]["longitude_deg"], df.iloc[i]["latitude_deg"], markersize=1, marker='o', color='red')
plt.text(df.iloc[i]["longitude_deg"], df.iloc[i]["latitude_deg"] , df.iloc[i]["iata_code"],
horizontalalignment='left',
fontsize='smaller',
transform=ccrs.PlateCarree())
else:
# PLOTTING AS SCATTERPOINTS - 5.1 seconds
plt.scatter(
x=df["longitude_deg"],
y=df["latitude_deg"],
color="red",
s=1,
alpha=1,
transform=ccrs.PlateCarree()
)
if cities:
show_cities()
plt.title("Large airports in the world")
plt.show()
def show_heatmap_with_histogram2d(df, cities):
'''
# turn a CSV file with points of interests into a heatmap.
# np.histogram2d calculates the number of instances in a certain area
# Thanks to https://medium.com/analytics-vidhya/custom-strava-heatmap-231267dcd084
'''
df = df[df['type']=="large_airport"]
Z, xedges, yedges = np.histogram2d(np.array(df.longitude_deg, dtype=float),
np.array(df.latitude_deg, dtype=float), bins = 100)
fig = plt.figure() # I created a new figure and set up its size
ax = plt.axes(projection=ccrs.PlateCarree())
#extent = [-10, 30, 35, 75] # Europe
extent = [df.longitude_deg.min(),-20, 7,df.latitude_deg.max()] # North America
ax.set_extent(extent, crs=ccrs.PlateCarree())
ax.coastlines(resolution='10m', color='black', linewidth=.3)
heatmap = ax.pcolormesh(xedges, yedges, Z.T, vmin=0, vmax=5, cmap = 'Set1')
plt.colorbar(heatmap)
plt.title("Large airports in the world")
if cities:
show_cities()
plt.show()
def show_heatmap_from_values(url, delimiter, resolution, extent, field_lon, field_lat, field_value, value_type,value_process,
min_colorbar_std, max_colorbar_std, colorbar_colors, colorbar_nodes, title, log_value, show_colorbar):
'''
# turn a CSV file with points of interests into a heatmap.
# It takes the average of values or the frequence in a certain area
# and creates a pivot table which serves as base for the heatmap
# inspired by https://twitter.com/PythonMaps/status/1386727574894792707
parameters :
URL : URL (string)
delimiter : the delimiter (string)
resolution (integer/float)
extent : the exent of what to slow: lon-left, lon-right, lat-up, lat-down. (list with numbers)
Take care : resolution and the extent have to be congruent otherwise there might be unexpected results.
Use rounded numbers.
If lon-left = -180, lon-right has to be 180, otherwise there might be unexpected results too.
field_lon, field_lat, field_value : the fieldnames for resp. longitude, latitude and the value. (string)
value_type : "frequence" -> it adds the a field with the name field_value and fills it with the value 1.0
np.histogram2d could also be used and might be faster.
or something else : doesn't matter, it only checks if the value_type is "frequence" (string)
value_proces : "sum" or "mean" -> calculates the sum or the mean (string)
min_colorbar_std : for the colorbar: (mean - (min_colorbar_std * std)). Put 999 to set minimum of the colorbar to 0 (integer)
max : for the colorbar: (mean + (min_colorbar_std * std)) (integer)
colorbar_colors : colors of the colorbar (list with strings)
colorbar_nodes : the nodes of the colorbar (list with strings)
title : Title of the heatmap (string)
log_value : calculate the 10log of the value -> True / False (boolean)
show_colorbar : show the colorbar? -> True / False (boolean)
'''
t1=time.time()
df = get_data(url,delimiter)
df = df[df[field_lon] >= extent[0] ]
df = df[df[field_lon] <= extent[1] ]
df = df[df[field_lat] >= extent[2] ]
df = df[df[field_lat] <= extent[3] ]
if value_type == "frequence":
df.loc[:,field_value] = 1
if log_value:
df.loc[:,field_value]= np.log10(df.loc[:,field_value])
df.loc[:, field_lat]= round( df.loc[:,field_lat]/resolution)*resolution
df.loc[:, field_lon]= round( df.loc[:,field_lon]/resolution)*resolution
if value_process == "mean":
df = df.groupby([field_lat, field_lon]).mean()
else:
df = df.groupby([field_lat, field_lon]).sum().reset_index()
if min_colorbar_std == 999:
min_value = 0
else:
min_value =df[field_value].mean() - (min_colorbar_std*df[field_value].std() )
max_value =df[field_value].mean() + (max_colorbar_std*df[field_value].std() )
# Make an dataframe with a grid with NaN-values which covers the total
# area and merge it with your data-dataframe
row_list = []
for lon_x in np.arange (extent[0],extent[1]+resolution,resolution):
for lat_x in np.arange (extent[3],(extent[2]-resolution),-1*resolution):
row_list.append([lat_x, lon_x, 0])
df_temp = pd.DataFrame(row_list, columns=[field_lat, field_lon,field_value])
df = pd.merge(
df_temp,
df,
how="outer",
on=[field_lat, field_lon]
)
df = df.fillna(0) # I have to fill the NaN, because otherwise I cant merge the two value-fields together
field_value_x = field_value + "_x"
field_value_y = field_value + "_y"
df[field_value]= df[field_value_y] +df[field_value_y]
df = df.drop(columns=[field_value_x], axis=1)
df = df.drop(columns=[field_value_y], axis=1)
# I have to repeat this otherwise I get Nan Values and duplicate columns in my pivot table when resolution<1
df[field_lat]= round(df[field_lat],2)
df[field_lon]= round(df[field_lon],2)
df = df.groupby([field_lat, field_lon]).sum()
df = df.sort_values(by=[field_lat,field_lon]).reset_index()
df = df.pivot_table(index=field_lat, columns=field_lon, values=field_value, aggfunc = np.sum)
df = df.sort_values(by=field_lat, ascending=False)
df = df.replace(0.0, np.nan) # i don't want a color for 0 or NaN values
xedges = df.columns.tolist()
yedges = df.index.tolist()
values__ = df.to_numpy()
fig = plt.figure()
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines(resolution='10m', color='black', linewidth=.3)
colorbar_cmap = LinearSegmentedColormap.from_list("mycmap", list(zip(colorbar_nodes, colorbar_colors)))
heatmap = ax.pcolormesh(xedges, yedges, values__, vmin=min_value, vmax=max_value, cmap = colorbar_cmap)
if show_colorbar:
plt.colorbar(heatmap, orientation = 'horizontal')
plt.title(title)
t2=time.time()
print (f"Done in {round(t2-t1,2)} seconds with {title}")
plt.show()
def show_heatmap_from_array():
'''
Show a heatmap, generated from an array
# thanks to https://stackoverflow.com/a/44952031/4173718
'''
extent = [-180, 180, -90, 90]
# CH4 IN THE AIR
# heat_data = genfromtxt('https://raw.githubusercontent.com/rcsmit/cartopy_fun/main/MOD_LSTD_M_2021-04-01_rgb_360x180.CSV', delimiter=',')
# heat_data = genfromtxt('C:\\Users\\rcxsm\\Documents\\phyton_scripts\\MOD_LSTD_M_2021-04-01_rgb_1440x720.CSV', delimiter=',')
# source : https://neo.sci.gsfc.nasa.gov/view.php?datasetId=MOD_LSTD_M - april 2021, csv, 1.0 degree
# heat_data[heat_data == 99999.0] = np.nan # filter out the seas and ocean
# POPULATION COUNT
#heat_data = genfromtxt('C:\\Users\\rcxsm\\Documents\\phyton_scripts\\gpw_v4_population_count_rev11_2020_1_deg.csv', delimiter=',')
#heat_data = genfromtxt('C:\\Users\\rcxsm\\Documents\\phyton_scripts\\gpw_v4_population_count_rev11_2020_15_min.asc', delimiter=' ')
heat_data = genfromtxt('gpw_v4_population_count_rev11_2020_2pt5_min.asc', delimiter=' ')
# https://sedac.ciesin.columbia.edu/data/set/gpw-v4-population-count-rev11/data-download
# 30 Second (approx. 1km)
# 2.5 Minute (approx. 5km)
# 15 Minute (approx. 30km)
# 30 Minute (approx. 55km)
# 60 Minute/1 Degree (approx. 110km)
heat_data[heat_data == -9999] = np.nan # filter out the seas and ocean
heat_data = np.flip(heat_data) #no idea why I have to flip the data twice
heat_data = np.flip(heat_data,1)
#heat_data = np.log10(heat_data)
print (heat_data)
#std = np.matrix.std(heat_data)
#print (f"{mean} {std}")
mean = np.nanmean(heat_data)
std = np.nanstd(heat_data)
print (mean)
fig, ax = plt.subplots()
ax = plt.axes(projection=ccrs.PlateCarree())
#ax.stock_img()
ax.coastlines(resolution='10m', color='black', linewidth=.3, alpha=.5)
lon = np.linspace(extent[0],extent[1],heat_data.shape[1])
lat = np.linspace(extent[2],extent[3],heat_data.shape[0])
Lon, Lat = np.meshgrid(lon,lat)
neo_colors = [ "white", "cyan","blue", "purple", "magenta", "red", "orange", "yellow", "lightyellow"]
nodes = [0.0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.825, 1.0]
neo_cmap = LinearSegmentedColormap.from_list("mycmap", list(zip(nodes, neo_colors)))
#heatmap = ax.pcolormesh(Lon,Lat,(heat_data) ,vmin=-25, vmax=50, cmap = neo_cmap)
heatmap = ax.pcolormesh(Lon,Lat,(heat_data), vmin=0, vmax=(mean+(1*std)), cmap = 'YlOrBr')
plt.colorbar(heatmap)
plt.title("LAND SURFACE TEMPERATURE [DAY] \nHeatmap based on a np.array from a CSV file")
plt.show()
def rasterio_and_geotiff():
# https://geoscripting-wur.github.io/PythonRaster/
# under construction
pass
def main():
# url = 'C:\\Users\\rcxsm\\Documents\\phyton_scripts\\airports.csv' #10 mb big file, takes a lot of time to process !
# url = "https://ourairports.com/data/airports.csv" #10 mb big file, takes a lot of time to process !
# url = 'https://raw.githubusercontent.com/rcsmit/cartopy_fun/main/airports2.csv'
# df = get_data(url,",")
# SHOW THE LOCATIONS ON A MAP
# show_locations(df, "points", False) # show the airports on a map
# GENERATE A HEATMAP WITH A GENERATED HISTOGRAM
#show_show_heatmap_with_histogram2d(df, False) # number of airports in a certain area
# GENERATE A HEATMAP FROM AN ARRAY WITH VALUES
#show_heatmap_from_array() # land temperatures
# GENERATE A | |
# and does not need any further processing
return p
else:
return ctypes.c_void_p(p)
else:
error = yami4py.yami4_get_error(result)
yami4py.yami4_destroy_result(result)
raise YAMIError(str(error))
def _string(result):
"""Extracts the string result from underlying library."""
if result == None:
raise YAMIError("Not enough memory to allocate result object.")
if yami4py.yami4_is_success(result):
s = yami4py.yami4_get_string(result)
yami4py.yami4_destroy_result(result)
return str(s)
else:
error = yami4py.yami4_get_error(result)
yami4py.yami4_destroy_result(result)
raise YAMIError(str(error))
def _binary(result):
"""Extracts the binary result from underlying library."""
if result == None:
raise YAMIError("Not enough memory to allocate result object.")
if yami4py.yami4_is_success(result):
p = yami4py.yami4_get_pointer(result)
size = yami4py.yami4_get_int_i(result)
yami4py.yami4_destroy_result(result)
# TODO: faster method? a custom buffer class, perhaps?
bin = array.array("B", size * "\x00")
if _use_standard_extension_API:
for i in range(size):
# note: bytes (c_bytes) from ctypes are signed,
# whereas bytearray expects unsigned ranges
v = yami4py.yami4_read_from_binary_array(p, i)
if v >= 0:
bin[i] = v
else:
bin[i] = 256 + v
else:
p = ctypes.cast(p, ctypes.POINTER(ctypes.c_byte))
for i in range(size):
# note: bytes (c_bytes) from ctypes are signed,
# whereas bytearray expects unsigned ranges
v = p[i]
if v >= 0:
bin[i] = v
else:
bin[i] = 256 + v
return bin.tostring()
else:
error = yami4py.yami4_get_error(result)
yami4py.yami4_destroy_result(result)
raise YAMIError(str(error))
def _check(result):
"""Checks result for error condition."""
if result == None:
raise YAMIError("Not enough memory to allocate result object.")
if yami4py.yami4_is_success(result) == 0:
error = yami4py.yami4_get_error(result)
yami4py.yami4_destroy_result(result)
raise YAMIError(str(error))
yami4py.yami4_destroy_result(result)
def _utf8(s):
"""Converts the given string to sequence of bytes according to UTF-8."""
return s.encode("utf8")
# API bound to dynamic library:
class OutgoingMessage(object):
"""Outgoing message.
The handler allowing to track the progress of outgoing message,
inspect its state and to obtain the reply content.
Note:
The objects of this class can be safely used from multiple threads."""
POSTED = 1
TRANSMITTED = 2
ABANDONED = 3
REPLIED = 4
REJECTED = 5
def __init__(self, msg):
self.__msg = msg
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def __del__(self):
if self.__msg != None:
self.close()
def close(self):
"""Deallocates internal resources associated with this object.
This function is called automatically if the object is used
as a context manager."""
yami4py.yami4_destroy_outgoing_message(self.__msg)
self.__msg = None
def get_state(self):
"""Returns the state of this message.
This function allows to inspect the progress of the message
transmission and returns a 3-tuple: state, sent and total_byte_count.
During transmission the sent value
is always smaller than total_byte_count.
When these two values become equal, it means that the transmission
was either succesful or abandoned."""
result = yami4py.yami4_outgoing_message_get_state(self.__msg)
state = yami4py.yami4_get_int_i(result)
bytes_sent = yami4py.yami4_get_int_j(result)
total_byte_count = yami4py.yami4_get_int_k(result)
yami4py.yami4_destroy_result(result)
return state, bytes_sent, total_byte_count
def wait_for_transmission(self, timeout = 0):
"""Waits for the transmission to finish.
Waits for the transmission to finish - that is, to either send all
the message data or to abandon it.
If the timeout value is greater than 0, it means relative timeout
in milliseconds; the function returns True if the transmission was
finished before the timeout expired and False otherwise.
If the timeout value is non-positive, there is no timeout and the
function can wait indefinitely.
After this function returns True the state of the message is either
TRANSMITTED, ABANDONED, REPLIED or REJECTED."""
_check(yami4py.yami4_outgoing_message_wait_for_transmission(
self.__msg, timeout))
def wait_for_completion(self, timeout = 0):
"""Waits for the full message roundtrip.
Waits for the full message roundtrip - that is, for some confirmation
that the message has been received and reacted upon by the
target agent.
If the timeout value is greater than 0, it means relative timeout
in milliseconds; the function returns True if the message was
completed before the timeout expired and False otherwise.
If the timeout value is non-positive, there is no timeout and the
function can wait indefinitely.
After this function returns True the state of the message is either
ABANDONED, REPLIED or REJECTED.
Note:
This function should not be called if the intended semantics of the
message is "one-way" - in this case this function would block
indefinitely."""
_check(yami4py.yami4_outgoing_message_wait_for_completion(
self.__msg, timeout))
def get_reply(self):
"""Provides access to the reply content."""
params = Parameters()
params.deserialize(_binary(
yami4py.yami4_outgoing_message_get_raw_reply(self.__msg)))
return params
def get_exception_msg(self):
"""Returns the human-readable reason for message rejection."""
return _string(
yami4py.yami4_outgoing_message_get_exception_msg(self.__msg))
class IncomingMessage(object):
"""Incoming message.
The handler allowing to inspect the details of the incoming message
and sent back replies or rejection notifications.
The user code interacts with objects of this type mainly in the
functors that are provided during object registration and that are later
called back when the incoming message arrives. The handler objects
can be stored aside for further processing even after the callback
returns, but should not be kept alive longer than the agent itself.
Note:
The objects of this class are not supposed to be used
from multiple threads."""
def __init__(self, msg):
self.__msg = msg
def __del__(self):
if self.__msg != None:
self.close()
def close(self):
"""Deallocates internal resources associated with this object.
This function is called automatically if the object is used
as a context manager."""
yami4py.yami4_destroy_incoming_message(self.__msg)
self.__msg = None
def get_source(self):
"""Returns the source of this incoming message."""
return _string(
yami4py.yami4_incoming_message_get_source(self.__msg))
def get_object_name(self):
"""Returns the destination object name."""
return _string(
yami4py.yami4_incoming_message_get_object_name(self.__msg))
def get_message_name(self):
"""Returns the message name."""
return _string(
yami4py.yami4_incoming_message_get_message_name(self.__msg))
def get_parameters(self):
"""Provides access to the message content."""
params = Parameters()
params.deserialize(_binary(
yami4py.yami4_incoming_message_get_raw_content(self.__msg)))
return params
def reply(self, content = {}, priority = 0):
"""Sends back the reply.
Sends back the reply to the message identified by this object.
The reply (or rejection) can be sent only once."""
serialized_content = serialize(content)
yami4py.yami4_incoming_message_reply(
self.__msg, serialized_content, len(serialized_content),
priority)
def reject(self, reason = "", priority = 0):
"""Sends back the rejection (exception) notification.
Sends back the rejection to the message identified by this object.
The rejection (or reply) can be sent only once."""
yami4py.yami4_incoming_message_reject(
self.__msg, _utf8(reason), priority)
class Agent(object):
"""Message broker.
The message broker that encapsulates physical channel management,
incoming and outgoing message queues, listeners and resource
management.
A single agent object can manage many listeners, which are responsible
for accepting remote connections, and many incoming and outgoing
connections.
The agent objects can be created and destroyed without constraints
on the stack, on the free store or as static objects.
The objects of this class can be safely used by multiple threads."""
# connection event values
NEW_INCOMING_CONNECTION = 1
NEW_OUTGOING_CONNECTION = 2
CONNECTION_CLOSED = 3
class OptionNames(object):
# core option names
TCP_LISTEN_BACKLOG = "tcp_listen_backlog"
TCP_REUSEADDR = "tcp_reuseaddr"
TCP_NONBLOCKING = "tcp_nonblocking"
TCP_CONNECT_TIMEOUT = "tcp_connect_timeout"
TCP_NODELAY = "tcp_nodelay"
TCP_KEEPALIVE = "tcp_keepalive"
TCP_FRAME_SIZE = "tcp_frame_size"
UDP_FRAME_SIZE = "udp_frame_size"
UNIX_LISTEN_BACKLOG = "unix_listen_backlog"
UNIX_NONBLOCKING = "unix_nonblocking"
UNIX_FRAME_SIZE = "unix_frame_size"
FILE_NONBLOCKING = "file_nonblocking"
FILE_FRAME_SIZE = "file_frame_size"
# C++ general-purpose option names
DISPATCHER_THREADS = "dispatcher_threads"
CONNECTION_RETRIES = "connection_retries"
CONNECTION_RETRY_DELAY_SPREAD = "connection_retry_delay_spread"
OUTGOING_HIGH_WATER_MARK = "outgoing_high_water_mark"
OUTGOING_LOW_WATER_MARK = "outgoing_low_water_mark"
INCOMING_HIGH_WATER_MARK = "incoming_high_water_mark"
INCOMING_LOW_WATER_MARK = "incoming_low_water_mark"
# note: this is not available in Python
# and hardcoded for the underlying C++ component
#DELIVER_AS_RAW_BINARY = "deliver_as_raw_binary"
# additional Python settings
INCOMING_QUEUE_MAX_LENGTH = "incoming_queue_max_length"
class __DispatcherThread(threading.Thread):
"""Dispatcher thread that consumes incoming messages from the queue
and delivers them to registered callable entities."""
def __init__(self, agent, object_map, object_map_lock,
connection_event_callback):
self.__agent = agent
self.__objects = object_map
self.__objects_lock = object_map_lock
self.__connection_event_callback = connection_event_callback
threading.Thread.__init__(self)
def run(self):
while True:
# first check if there is a regular incoming message
msg_ptr = yami4py.yami4_agent_get_next_incoming_message(
self.__agent)
if msg_ptr == None:
# there is no incoming message -> check connection events
conn_event = _string(
yami4py.yami4_agent_get_next_connection_event(
self.__agent))
if conn_event:
if self.__connection_event_callback:
if conn_event[0] == 'i':
event = Agent.NEW_INCOMING_CONNECTION
elif conn_event[0] == 'o':
event = Agent.NEW_OUTGOING_CONNECTION
else:
event = Agent.CONNECTION_CLOSED
connection_name = conn_event[2:]
try:
self.__connection_event_callback(
connection_name, event)
except:
# ignore exceptions from user code
pass
# continue checking the queue
continue
else:
# no incoming message and no connection event
# -> agent closing
return
else:
# process incoming message
msg = IncomingMessage(_pointer(msg_ptr))
object_name = msg.get_object_name()
handler = None
self.__objects_lock.acquire()
try:
try:
handler = self.__objects[object_name]
except KeyError:
# no such object -> try the default handler
if "*" in self.__objects:
handler = self.__objects["*"]
if handler != None:
# object handler found, call | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#****************************************************************************************************************************************************
# Copyright 2017 NXP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the NXP. nor the names of
# its contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#****************************************************************************************************************************************************
import argparse
import hashlib
import io
import json
import os
import subprocess
import sys
__g_verbosityLevel = 0
__g_debugEnabled = False
__g_licenseFilename = "License.json"
__g_Image = "Image"
__g_Model = "Model"
__g_Video = "Video"
__g_extensions = [
('.bmp', __g_Image),
('.dds', __g_Image),
('.hdr', __g_Image),
('.jpg', __g_Image),
('.ktx', __g_Image),
('.png', __g_Image),
('.psd', __g_Image),
('.tga', __g_Image),
('.tiff', __g_Image),
('.3ds', __g_Model),
('.fbx', __g_Model),
('.fsf', __g_Model),
('.obj', __g_Model),
('.nff', __g_Model),
# video
('.avi', __g_Video),
('.fsf', __g_Video),
('.mp4', __g_Video),
('.mpg', __g_Video),
('.mpeg', __g_Video),
('.mkv', __g_Video),
]
__g_ignore = "example.jpg"
__g_ignoreDir = [
".Config/Templates.gen/Android/Copy/res/drawable-hdpi",
".Config/Templates.gen/Android/Copy/res/drawable-ldpi",
".Config/Templates.gen/Android/Copy/res/drawable-mdpi",
".Config/Templates.gen/Android/Copy/res/drawable-xhdpi",
".Config/Templates.gen/Android/Copy/res/drawable-xxhdpi",
".Config/Templates.gen/Android/Copy/res/drawable-xxxhdpi",
".Config/Templates.gen/AndroidGradleCMake/Copy/app/src/main/res/mipmap-hdpi",
".Config/Templates.gen/AndroidGradleCMake/Copy/app/src/main/res/mipmap-ldpi",
".Config/Templates.gen/AndroidGradleCMake/Copy/app/src/main/res/mipmap-mdpi",
".Config/Templates.gen/AndroidGradleCMake/Copy/app/src/main/res/mipmap-xhdpi",
".Config/Templates.gen/AndroidGradleCMake/Copy/app/src/main/res/mipmap-xxhdpi",
".Config/Templates.gen/AndroidGradleCMake/Copy/app/src/main/res/mipmap-xxxhdpi",
]
class Config(object):
def __init__(self, verbosityLevel):
super(Config, self).__init__()
self.VerbosityLevel = verbosityLevel
self.IsVerbose = verbosityLevel > 0
def LogPrint(self, str):
if self.IsVerbose:
print(str)
sys.stdout.flush()
class JsonLicense(object):
def __init__(self, sourceDict = {}):
super(JsonLicense, self).__init__()
self.Origin = ""
self.License = ""
self.Url = ""
self.Tags = ""
self.TagsIdList = []
self.SourceDict = sourceDict
def SetTags(self, tags):
self.Tags = tags
self.TagsIdList = [entry.lower() for entry in tags.split(';') if len(entry) > 0]
def Compare(self, license):
return self.Origin == license.Origin and self.License == license.License and self.Url == license.Url and self.Tags == license.Tags and self.SourceDict == license.SourceDict
def GetExtensionList(extensions):
return [extension[0] for extension in extensions]
def GetTitle():
return 'FslResourceScan V0.1.0 alpha'
def ShowTitleIfNecessary():
global __g_verbosityLevel
if __g_verbosityLevel > 0:
print((GetTitle()))
def ReadBinaryFile(filename):
content = None
with open(filename, "rb") as theFile:
content = theFile.read()
return content
def WriteFile(filename, content):
with open(filename, "w") as theFile:
theFile.write(content)
def ReadJsonFile(filename):
content = ReadBinaryFile(filename)
return json.loads(content)
def WriteJsonFile(filename, dict):
with io.open(filename, 'w', encoding='utf-8') as currentFile:
currentFile.write(str(json.dumps(dict, ensure_ascii=False, indent=2)))
def ToUnixStylePath(path):
if path == None:
return None
return path.replace("\\", "/")
def GetDirectoryName(path):
return ToUnixStylePath(os.path.dirname(path))
def Join(path1, path2):
return ToUnixStylePath(os.path.join(path1, path2))
class Resource(object):
def __init__(self, sourcePath, relativeSkipChars):
super(Resource, self).__init__()
self.SourcePath = sourcePath
self.SourceDirectory = GetDirectoryName(sourcePath)
self.License = None
self.RelativePath = sourcePath[relativeSkipChars:]
def ScanForFiles(path, extensionList, ignoreFiles):
foundFiles = []
for root, dirs, files in os.walk(path):
for file in files:
fileId = file.lower()
for extension in extensionList:
if fileId.endswith(extension) and not fileId in ignoreFiles:
foundFiles.append(ToUnixStylePath(os.path.join(root, file)))
break
return foundFiles
def HashFile(filename, blocksize=65536):
hasher = hashlib.sha256()
with open(filename, "rb") as theFile:
buf = theFile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = theFile.read(blocksize)
return hasher.hexdigest()
def BuildFileContentHashDict(config, files):
dictHash = {}
for file in files:
hash = HashFile(file)
if hash in dictHash:
dictHash[hash].append(file)
else:
dictHash[hash] = [file]
return dictHash
def BuildFileLengthDict(files):
dict = {}
for file in files:
fileLength = os.stat(file).st_size
if fileLength in dict:
dict[fileLength].append(file)
else:
dict[fileLength] = [file]
return dict
def BuildFileContentHashDict(files):
dict = {}
for file in files:
hash = HashFile(file)
if hash in dict:
dict[hash].append(file)
else:
dict[hash] = [file]
return dict
def BuildDuplicatedList(fileName, files):
srcFilename = files[0]
srcContentSet = set(ReadBinaryFile(srcFilename))
matchingFiles = [fileName]
for file in files:
content = ReadBinaryFile(file)
if len(srcContentSet.intersection(content)) == len(srcContentSet):
matchingFiles.append(file)
return matchingFiles
def BuildDuplicatedDict(config, files, uniqueFiles):
dict = {}
while(len(files) > 1):
srcFile = files[0]
remainingFiles = files[1:]
matchingFiles = BuildDuplicatedList(srcFile, remainingFiles)
if len(matchingFiles) > 1:
dict[srcFile] = matchingFiles
else:
uniqueFiles.append(key)
# Remove all non duplicated files
files.remove(srcFile)
files = []
for file in remainingFiles:
if not file in matchingFiles:
files.append(file)
return dict
def BuildUniqueFileDictByContent(config, files, uniqueFiles):
# we start by sorting files by their hash
# this should limit the amount of files that have to be byte compared quite a bit
duplicationDict = {}
dictHash = BuildFileContentHashDict(files)
for fileList in list(dictHash.values()):
if len(fileList) > 1:
newDuplicationDict = BuildDuplicatedDict(config, fileList, uniqueFiles)
duplicationDict.update(newDuplicationDict)
else:
uniqueFiles.append(fileList[0])
return duplicationDict
def BuildUniqueFileDict(config, files, uniqueFiles):
# we start by sorting files by their size
# this should limit the amount of files that have to be byte compared quite a bit
dictFileLength = BuildFileLengthDict(files)
#config.LogPrint("Initial bins {0}".format(len(dictFileLength)))
duplicationDict = {}
for fileList in list(dictFileLength.values()):
if len(fileList) > 1:
newDuplicationDict = BuildUniqueFileDictByContent(config, fileList, uniqueFiles)
duplicationDict.update(newDuplicationDict)
else:
uniqueFiles.append(fileList[0])
return duplicationDict
def GetFileExtension(filename):
filename, fileExtension = os.path.splitext(filename)
return fileExtension
def BuildExtensionDict(extensions):
dict = {}
for extension in extensions:
dict[extension[0]] = extension[1]
return dict
def GetContentTypeByExtension(extensionDict, filename):
filenameExtension = GetFileExtension(filename).lower()
return extensionDict[filenameExtension] if filenameExtension in extensionDict else ""
def BuildResourceDirectorySet(uniqueFiles, duplicatedFilesDict):
# build unique dir list
resourceDirSet = set()
for entry in uniqueFiles:
dirName = GetDirectoryName(entry)
if not dirName in resourceDirSet:
resourceDirSet.add(dirName)
for fileList in list(duplicatedFilesDict.values()):
for entry in fileList:
dirName = GetDirectoryName(entry)
if not dirName in resourceDirSet:
resourceDirSet.add(dirName)
return resourceDirSet
class LicenseManager(object):
def __init__(self):
super(LicenseManager, self).__init__()
self.KeyOrigin = "Origin"
self.KeyLicense = "License"
self.KeyComment = "Comment"
self.KeyTags = "Tags"
self.KeyURL = "URL"
def TryReadLicense(self, config, filename):
if not os.path.isfile(filename):
return None
content = None
try:
content = ReadJsonFile(filename)
except (Exception) as ex:
print("ERROR: Exception while parsing {0}".format(filename))
raise
if not self.KeyOrigin in content:
config.LogPrint("ERROR: '{0}' not present in file '{1}'".format(self.KeyOrigin, filename));
return None
if not self.KeyLicense in content:
config.LogPrint("ERROR: '{0}' not present in file '{1}'".format(self.KeyLicense, filename));
return None
license = JsonLicense(content)
license.Origin = content[self.KeyOrigin]
license.License = content[self.KeyLicense]
license.Comment = content[self.KeyComment] if self.KeyComment in content else ""
license.URL = content[self.KeyURL] if self.KeyURL in content else ""
license.SetTags(content[self.KeyTags] if self.KeyTags in content else "")
return license
def SaveLicense(self, filename, license):
#contentDict = {}
#self.__AddKeyIfNeeded(contentDict, self.KeyOrigin, license.Origin)
#self.__AddKeyIfNeeded(contentDict, self.KeyLicense, license.License)
#self.__AddKeyIfNeeded(contentDict, self.KeyURL, license.URL)
WriteJsonFile(filename, license.SourceDict)
def __AddKeyIfNeeded(self, dict, key, value):
if len(value) <= 0:
return
dict[key] = value
def BuildDirectoryLicenseDict(config, resourceDirectories, licenseFilename):
licenseManager = LicenseManager()
licenseDict = {}
for dir in resourceDirectories:
license = licenseManager.TryReadLicense(config, Join(dir, licenseFilename))
if license != None:
licenseDict[dir] = license
return licenseDict
def TagListWithLicenses(inputDirectory, files, directoryLicenseDict):
inputDirectory = ToUnixStylePath(inputDirectory)
skipChars = len(inputDirectory) if inputDirectory.endswith('/') else len(inputDirectory)+1
res = []
for entry in files:
resource = Resource(entry, skipChars)
if resource.SourceDirectory in directoryLicenseDict:
resource.License = directoryLicenseDict[resource.SourceDirectory]
res.append(resource)
return res;
def TagDictWithLicenses(inputDirectory, fileDict, directoryLicenseDict):
inputDirectory = ToUnixStylePath(inputDirectory)
skipChars = len(inputDirectory) if inputDirectory.endswith('/') else len(inputDirectory)+1
res = {}
for key, value in fileDict.items():
keyFilename = key[skipChars:]
res[keyFilename] = TagListWithLicenses(inputDirectory, value, directoryLicenseDict)
return res;
def WriteCSV(dstFilename, extensions, uniqueEntries, duplicatedEntryDict):
#count = len(uniqueFiles)
#for list in duplicatedFilesDict.values():
# count += len(list)
#config.LogPrint("Found {0} resource files".format(count))
uniqueEntries.sort(key=lambda s: s.SourcePath.lower())
sortedDuplicatedFiles = list(duplicatedEntryDict.keys())
sortedDuplicatedFiles.sort()
for fileList in list(duplicatedEntryDict.values()):
fileList.sort(key=lambda s: s.SourcePath.lower());
extensionDict = BuildExtensionDict(extensions)
lines = []
lines.append("Unique files ({0});;Origin;License;Type;Comment;URL".format(len(uniqueEntries)))
for entry in uniqueEntries:
contentType = GetContentTypeByExtension(extensionDict, entry.RelativePath)
if entry.License == None:
lines.append("{0};;;;{1};;".format(entry.RelativePath, contentType))
else:
lines.append("{0};;{1};{2};{3};{4};{5}".format(entry.RelativePath, entry.License.Origin, entry.License.License, contentType, entry.License.Comment, entry.License.URL))
lines.append("\n")
lines.append("Duplicated files ({0})".format(len(duplicatedEntryDict)))
for key in sortedDuplicatedFiles:
lines.append("{0};;;;{1};;".format(key, GetContentTypeByExtension(extensionDict, key)))
for entry in duplicatedEntryDict[key]:
contentType = GetContentTypeByExtension(extensionDict, entry.RelativePath)
if entry.License == None:
lines.append(";{0};;;{1};;".format(entry.RelativePath, contentType))
else:
lines.append(";{0};{1};{2};{3};{4};{5}".format(entry.RelativePath, entry.License.Origin, entry.License.License, contentType, entry.License.Comment, entry.License.URL))
WriteFile(dstFilename, "\n".join(lines));
def PrintIssueDirectories(fileList, dict):
uniqueDirs = set()
for entry in fileList:
if not entry.SourceDirectory in uniqueDirs:
uniqueDirs.add(entry.SourceDirectory)
for value in list(dict.values()):
for entry in value:
if not entry.SourceDirectory in uniqueDirs:
uniqueDirs.add(entry.SourceDirectory)
if len(uniqueDirs) > 0:
print("Investigate license for the following directories:")
uniqueDirs = list(uniqueDirs)
uniqueDirs.sort()
for entry in uniqueDirs:
print(" {0}".format(entry))
def Filter(config, ignoreDirList, inputDirectory, files):
inputDirectory = ToUnixStylePath(inputDirectory)
| |
start:
self.quiet += 1
else:
self.quiet -= 1
if tag == "style":
if start:
self.style += 1
else:
self.style -= 1
if tag in ["body"]:
self.quiet = 0 # sites like 9rules.com never close <head>
if tag == "blockquote":
if start:
self.p()
self.o("> ", force=True)
self.start = True
self.blockquote += 1
else:
self.blockquote -= 1
self.p()
def no_preceding_space(self: HTML2Text) -> bool:
return bool(
self.preceding_data and re.match(r"[^\s]", self.preceding_data[-1])
)
if tag in ["em", "i", "u"] and not self.ignore_emphasis:
if start and no_preceding_space(self):
emphasis = " " + self.emphasis_mark
else:
emphasis = self.emphasis_mark
self.o(emphasis)
if start:
self.stressed = True
if tag in ["strong", "b"] and not self.ignore_emphasis:
if start and no_preceding_space(self):
strong = " " + self.strong_mark
else:
strong = self.strong_mark
self.o(strong)
if start:
self.stressed = True
if tag in ["del", "strike", "s"]:
if start and no_preceding_space(self):
strike = " ~~"
else:
strike = "~~"
self.o(strike)
if start:
self.stressed = True
if self.google_doc:
if not self.inheader:
# handle some font attributes, but leave headers clean
self.handle_emphasis(start, tag_style, parent_style)
if tag in ["kbd", "code", "tt"] and not self.pre:
self.o("`") # TODO: `` `this` ``
self.code = not self.code
if tag == "abbr":
if start:
self.abbr_title = None
self.abbr_data = ""
if "title" in attrs:
self.abbr_title = attrs["title"]
else:
if self.abbr_title is not None:
assert self.abbr_data is not None
self.abbr_list[self.abbr_data] = self.abbr_title
self.abbr_title = None
self.abbr_data = None
if tag == "q":
if not self.quote:
self.o(self.open_quote)
else:
self.o(self.close_quote)
self.quote = not self.quote
def link_url(self: HTML2Text, link: str, title: str = "") -> None:
url = urlparse.urljoin(self.baseurl, link)
title = ' "{}"'.format(title) if title.strip() else ""
self.o("]({url}{title})".format(url=escape_md(url), title=title))
if tag == "a" and not self.ignore_links:
if start:
if (
"href" in attrs
and attrs["href"] is not None
and not (self.skip_internal_links and attrs["href"].startswith("#"))
):
self.astack.append(attrs)
self.maybe_automatic_link = attrs["href"]
self.empty_link = True
if self.protect_links:
attrs["href"] = "<" + attrs["href"] + ">"
else:
self.astack.append(None)
else:
if self.astack:
a = self.astack.pop()
if self.maybe_automatic_link and not self.empty_link:
self.maybe_automatic_link = None
elif a:
assert a["href"] is not None
if self.empty_link:
self.o("[")
self.empty_link = False
self.maybe_automatic_link = None
if self.inline_links:
title = a.get("title") or ""
title = escape_md(title)
link_url(self, a["href"], title)
else:
i = self.previousIndex(a)
if i is not None:
a_props = self.a[i]
else:
self.acount += 1
a_props = AnchorElement(a, self.acount, self.outcount)
self.a.append(a_props)
self.o("][" + str(a_props.count) + "]")
if tag == "img" and start and not self.ignore_images:
if "src" in attrs:
assert attrs["src"] is not None
if not self.images_to_alt:
attrs["href"] = attrs["src"]
alt = attrs.get("alt") or self.default_image_alt
# If we have images_with_size, write raw html including width,
# height, and alt attributes
if self.images_as_html or (
self.images_with_size and ("width" in attrs or "height" in attrs)
):
self.o("<img src='" + attrs["src"] + "' ")
if "width" in attrs:
assert attrs["width"] is not None
self.o("width='" + attrs["width"] + "' ")
if "height" in attrs:
assert attrs["height"] is not None
self.o("height='" + attrs["height"] + "' ")
if alt:
self.o("alt='" + alt + "' ")
self.o("/>")
return
# If we have a link to create, output the start
if self.maybe_automatic_link is not None:
href = self.maybe_automatic_link
if (
self.images_to_alt
and escape_md(alt) == href
and self.absolute_url_matcher.match(href)
):
self.o("<" + escape_md(alt) + ">")
self.empty_link = False
return
else:
self.o("[")
self.maybe_automatic_link = None
self.empty_link = False
# If we have images_to_alt, we discard the image itself,
# considering only the alt text.
if self.images_to_alt:
self.o(escape_md(alt))
else:
self.o("![" + escape_md(alt) + "]")
if self.inline_links:
href = attrs.get("href") or ""
self.o(
"(" + escape_md(urlparse.urljoin(self.baseurl, href)) + ")"
)
else:
i = self.previousIndex(attrs)
if i is not None:
a_props = self.a[i]
else:
self.acount += 1
a_props = AnchorElement(attrs, self.acount, self.outcount)
self.a.append(a_props)
self.o("[" + str(a_props.count) + "]")
if tag == "dl" and start:
self.p()
if tag == "dt" and not start:
self.pbr()
if tag == "dd" and start:
self.o(" ")
if tag == "dd" and not start:
self.pbr()
if tag in ["ol", "ul"]:
# Google Docs create sub lists as top level lists
if not self.list and not self.lastWasList:
self.p()
if start:
if self.google_doc:
list_style = google_list_style(tag_style)
else:
list_style = tag
numbering_start = list_numbering_start(attrs)
self.list.append(ListElement(list_style, numbering_start))
else:
if self.list:
self.list.pop()
if not self.google_doc and not self.list:
self.o("\n")
self.lastWasList = True
else:
self.lastWasList = False
if tag == "li":
self.pbr()
if start:
if self.list:
li = self.list[-1]
else:
li = ListElement("ul", 0)
if self.google_doc:
nest_count = self.google_nest_count(tag_style)
else:
nest_count = len(self.list)
# TODO: line up <ol><li>s > 9 correctly.
self.o(" " * nest_count)
if li.name == "ul":
self.o(self.ul_item_mark + " ")
elif li.name == "ol":
li.num += 1
self.o(str(li.num) + ". ")
self.start = True
if tag in ["table", "tr", "td", "th"]:
if self.ignore_tables:
if tag == "tr":
if start:
pass
else:
self.soft_br()
else:
pass
elif self.bypass_tables:
if start:
self.soft_br()
if tag in ["td", "th"]:
if start:
self.o("<{}>\n\n".format(tag))
else:
self.o("\n</{}>".format(tag))
else:
if start:
self.o("<{}>".format(tag))
else:
self.o("</{}>".format(tag))
else:
if tag == "table":
if start:
self.table_start = True
if self.pad_tables:
self.o("<" + config.TABLE_MARKER_FOR_PAD + ">")
self.o(" \n")
else:
if self.pad_tables:
self.o("</" + config.TABLE_MARKER_FOR_PAD + ">")
self.o(" \n")
if tag in ["td", "th"] and start:
if self.split_next_td:
self.o("| ")
self.split_next_td = True
if tag == "tr" and start:
self.td_count = 0
if tag == "tr" and not start:
self.split_next_td = False
self.soft_br()
if tag == "tr" and not start and self.table_start:
# Underline table header
self.o("|".join(["---"] * self.td_count))
self.soft_br()
self.table_start = False
if tag in ["td", "th"] and start:
self.td_count += 1
if tag == "pre":
if start:
self.startpre = True
self.pre = True
else:
self.pre = False
if self.mark_code:
self.out("\n[/code]")
self.p()
# TODO: Add docstring for these one letter functions
def pbr(self) -> None:
"Pretty print has a line break"
if self.p_p == 0:
self.p_p = 1
def p(self) -> None:
"Set pretty print to 1 or 2 lines"
self.p_p = 1 if self.single_line_break else 2
def soft_br(self) -> None:
"Soft breaks"
self.pbr()
self.br_toggle = " "
def o(
self, data: str, puredata: bool = False, force: Union[bool, str] = False
) -> None:
"""
Deal with indentation and whitespace
"""
if self.abbr_data is not None:
self.abbr_data += data
if not self.quiet:
if self.google_doc:
# prevent white space immediately after 'begin emphasis'
# marks ('**' and '_')
lstripped_data = data.lstrip()
if self.drop_white_space and not (self.pre or self.code):
data = lstripped_data
if lstripped_data != "":
self.drop_white_space = 0
if puredata and not self.pre:
# This is a very dangerous call ... it could mess up
# all handling of when not handled properly
# (see entityref)
data = re.sub(r"\s+", r" ", data)
if data and data[0] == " ":
self.space = True
data = data[1:]
if not data and not force:
return
if self.startpre:
# self.out(" :") #TODO: not output when already one there
if not data.startswith("\n") and not data.startswith("\r\n"):
# <pre>stuff...
data = "\n" + data
if self.mark_code:
self.out("\n[code]")
self.p_p = 0
bq = ">" * self.blockquote
if not (force and data and data[0] == ">") and self.blockquote:
bq += " "
if self.pre:
if not self.list:
bq += " "
# else: list content is already partially indented
bq += " " * len(self.list)
data = data.replace("\n", "\n" + bq)
if self.startpre:
self.startpre = False
if self.list:
# use existing initial indentation
data = data.lstrip("\n")
if self.start:
self.space = False
self.p_p = 0
self.start = False
if force == "end":
# It's the end.
self.p_p = 0
self.out("\n")
self.space = False
if self.p_p:
self.out((self.br_toggle + "\n" + bq) * self.p_p)
self.space = False
self.br_toggle = ""
if self.space:
if not self.lastWasNL:
self.out(" ")
self.space = False
if self.a and (
(self.p_p == 2 and self.links_each_paragraph) or force == "end"
):
if force == "end":
self.out("\n")
newa = []
for link in self.a:
if self.outcount > link.outcount:
self.out(
" ["
+ str(link.count)
+ "]: "
+ urlparse.urljoin(self.baseurl, link.attrs["href"])
)
if "title" in link.attrs:
assert link.attrs["title"] is not None
self.out(" (" + link.attrs["title"] + ")")
self.out("\n")
| |
os.getcwd()
self.run_dir = os.path.join(cwd, self.run_dir)
print(self.run_dir)
if os.path.isdir(self.run_dir):
shutil.rmtree(self.run_dir, ignore_errors=True)
if pgm_dir:
shutil.copytree(pgm_dir, self.run_dir)
if pgm_files:
os.makedirs(self.run_dir)
for f in pgm_files:
shutil.copy(f, self.run_dir)
# pre_passes_str (str): pre_passes_str is a string that contains specific passes that we want to use when we reinitialize the training (when we use reset)
self.pre_passes_str= "-prune-eh -functionattrs -ipsccp -globalopt -mem2reg -deadargelim -sroa -early-cse -loweratomic -instcombine -loop-simplify"
# pre_passes (list): pre_passes is a list of integer that contains the indices of the passes written in pre_passes_str.
self.pre_passes = getcycle.passes2indice(self.pre_passes_str)
self.passes = [] # passes (list): passes is a list that contains the passes used for the Rl training
self.best_passes = [] # best_passes (list): best_passes is a list that contains the best passes recorded. (we update the list when the recoded time of cycle count is less than min_cycles)
self.pgm = pgm # pgm_name (str): pgm_name is the file name of the program we are optimizing (which is written in C programming language)
self.pgm_name = pgm.replace('.c','') # bc (str): bs is the file name of the program we are optimizing after being compiled to IR (hardware-independent intermediate representation)
self.bc = self.pgm_name + '.prelto.2.bc'
self.original_obs = [] # original_obs(list): original_obs is a list that contains the original values of the observatyions features.
def __del__(self):
"""
This function closes the log_file (which is a file we use to record information about each episode) when delete_run_dir and log_results are True.
Also deletes the entire directory tree of run_dir (the running directory) if run_dir is an existing directory.
"""
if self.delete_run_dir:
if self.log_results:
self.log_file.close()
if os.path.isdir(self.run_dir):
shutil.rmtree(self.run_dir)
def get_Ox_rewards(self, level=3, sim=False, clang_opt=False):
"""
Examples :
>>> print(get_0x_rewards(self, level=3, clang_opt=False, sim=False))
-45
Args:
level (int): This is an integer that represents different groups of optimizations implemented in the compiler.
Each optimization level is hand-picked by the compiler-designer to benefit specific benchmarks. Defaults to 3.
sim (bool): sim is a Boolean that should be set to True if we want the subprocessor to run the “make clean p v -s” command,
and we should set it to False if we want the subprocessor to run the “make clean accelerationCycle -s” command instead. Defaults to False.
clang_opt (bool): clang_opt is a Boolean that should be set to True if we want to use the clang option when running the HLS, and should be set to False otherwise.
Returns:
Returns the negative number of cycle counts it took to run the synthesized circuit made by using the passes set in the 0x optimization. Which represents for the RL agent the reward.
"""
from gym_hls.envs.getox import getOxCycles
cycle = getOxCycles(self.pgm_name, self.run_dir, level=level, clang_opt=clang_opt, sim=sim)
return -cycle
def print_info(self,message, end = '\n'):
"""
This function is used to print information the episodes of the RL agent.
Args:
message (str): message is a string that will contain information about the episode of the Rl agent that we want to print on our terminal
end (str): end is a string that prints a new line.
"""
sys.stdout.write('\x1b[1;34m' + message.strip() + '\x1b[0m' + end)
def get_cycles(self, passes, sim=False):
"""
Examples :
>>>print(get_cycles(self, [“-correlated-propagation”, “-scalarrepl”, “-lowerinvoke”]))
(55, True)
Args:
passes (list): passes is a list that contains the passes used for the Rl training
sim (bool): sim (bool, optional): sim should be True if you want the arguments used to launch the process to be “make clean p v -s”, or sim should
be False if you want the argument used to launch the process to be "make clean accelerationCycle -s". Defaults to False
Returns:
Returns a tuple where the first element is an integer that represents the number of cycle counts it took to run the synthesized circuit
(the second element doesn’t matter).
"""
if self.shrink:
actual_passes = [self.eff_pass_indices[index] for index in passes]
else:
actual_passes = passes
cycle, _ = getcycle.getHWCycles(self.pgm_name, actual_passes, self.run_dir, sim=sim)
return cycle
def get_rewards(self, diff=True, sim=False):
"""
Examples :
>>>print(get_cycles(self))
-55
Args:
diff (bool): diff is a boolean that is set to True if we want the reward to be the difference of previous cycle count and the current cycle count.
Otherwise, if diff is False, the reward is equal to – the current cycle count.
sim (bool, optional): sim should be True if you want the arguments used to launch the process to be “make clean p v -s”, or sim should be False if you want the
argument used to launch the process to be "make clean accelerationCycle -s". Defaults to False
Returns:
Returns an integer that represents the reward for the RL agent (it shows the improvement of the circuit), and we get it either by calculating the difference
between previous cycle count and the current cycle count or the negative value of the current cycle count.
"""
if self.shrink:
actual_passes = [self.eff_pass_indices[index] for index in self.passes]
else:
actual_passes = self.passes
cycle, done = getcycle.getHWCycles(self.pgm_name, actual_passes, self.run_dir, sim=sim)
if cycle == 10000000:
cycle = 2 * self.O0_cycles
# print("pass: {}".format(self.passes))
# print("prev_cycles: {}".format(self.prev_cycles))
if(self.verbose):
self.print_info("passes: {}".format(actual_passes))
self.print_info("program: {} -- ".format(self.pgm_name)+" cycle: {} -- prev_cycles: {}".format(cycle, self.prev_cycles))
try:
cyc_dict = pickle.load(open('cycles_chstone.pkl','rb'))
except:
cyc_dict = {}
if self.pgm_name in cyc_dict:
if cyc_dict[self.pgm_name]['cycle']>cycle:
cyc_dict[self.pgm_name]['cycle'] = cycle
cyc_dict[self.pgm_name]['passes'] = self.passes
else:
cyc_dict[self.pgm_name] = {}
cyc_dict[self.pgm_name]['cycle'] = cycle
cyc_dict[self.pgm_name]['passes'] = self.passes
output = open('cycles_chstone.pkl', 'wb')
pickle.dump(cyc_dict, output)
output.close()
if (cycle < self.min_cycles):
self.min_cycles = cycle
self.best_passes = actual_passes
if (diff):
rew = self.prev_cycles - cycle
self.prev_cycles = cycle
else:
rew = -cycle
# print("rew: {}".format(rew))
return rew, done
def get_obs(self,get_normalizer=False):
"""
Examples :
>>>print(get_obs())
[1, 0, 0, 0, 1]
Args:
get_normalizer (bool): get_normalizer is a boolean that should be set to True if we want to get a normalizer value that is used to normalize the list of
observation features. Defaults to False.
Returns:
Returns a list or a tuple that contains the list of the observation features that we need to feed as input to the RL agent.
"""
feats = getfeatures.run_stats(self.bc, self.run_dir)
normalizer=feats[-5] + 1
if self.shrink:
actual_feats = [feats[index] for index in self.eff_feat_indices]
else:
actual_feats = feats
if self.binary_obs:
actual_feats = [1 if feat > 0 else 0 for feat in actual_feats]
if not get_normalizer:
return actual_feats
else:
return actual_feats,normalizer
return actual_feats
# reset() resets passes to []
# reset(init=[1,2,3]) resets passes to [1,2,3]
def reset(self, init=None, get_obs=True, get_rew=False, ret=True, sim=False):
"""
Examples :
>>>print(reset())
[0, 0, 0, 0]
Args:
init (list, optional): init is a list of integer that is equal to (set to) the new passes list. Defaults to None.
get_obs (bool, optional): get_obs is a Boolean that is set to True when we decide to get the list of observation features after we reset.
It should be set to False otherwise. Defaults to True.
get_rew (bool, optional): get_rew is a Boolean that is set to True when we decide to get the reward after we reset. It should be set to False otherwise.
Defaults to False.
ret (bool, optional): ret is a Boolean that is set to True when we decide to get the reward or the list of observation features after we reset.
It should be set to False otherwise. Defaults to True.
sim (bool, optional): sim should be True if you want the arguments used to launch the process to be “make clean p v -s”, or sim should be False if you want
the argument used to launch the process to be "make clean accelerationCycle -s". Defaults to False. Defaults to False.
Returns:
Returns an integer for the reward or a list for the observation features, or a tuple of both an integer and a list for the reward and the observation features, or zero if ret if False.
"""
#self.min_cycles = 10000000
self.passes = []
if self.feature_type == | |
for signature in signature_solutions_aggregate:
if signature['ss_id'] not in signature_list and signature['percentage'] != 0:
signature_list.append(signature['ss_id'])
signature_aggregate.append(signature)
if others['percentage'] != 0:
signature_aggregate.append(others)
project_mapping = get_project_aggregate(year, active_projects,
operating_unit=operating_unit,
budget_source=budget_source)
project_serializer = ProjectAggregateSerializer(project_mapping)
signature_serializer = SignatureSolutionsAggregateSerializer(signature_aggregate, many=True)
data = {
'project': project_serializer.data,
'signature_solutions': signature_serializer.data,
}
return self.jp_response(s_code='HTTP_200_OK', data=data)
except Exception as e:
print(e)
return self.jp_error_response('HTTP_500_INTERNAL_SERVER_ERROR', 'EXCEPTION', [str(e), ])
class SignatureSolutionsDetailsView(APIView, ResponseViewMixin):
def get(self, request, *args, **kwargs):
try:
year = request.GET.get('year', '')
ss_id = request.GET.get('ss_id', '')
if not year:
return self.jp_error_response('HTTP_400_BAD_REQUEST', 'UNKNOWN_QUERY',
['Please provide a year'])
active_projects = get_active_projects_for_year(year)
donor_query = Q(project__in=active_projects) & Q(year=year) & \
Q(project__project_active__year=year)
top_recipient_offices = DonorFundSplitUp.objects.filter(donor_query &
Q(project__operating_unit__isnull=False) &
Q(output__signature_solution__ss_id=ss_id))\
.values('project__operating_unit').annotate(
total_expense=Coalesce(Sum('expense'), 0), total_budget=Coalesce(Sum('budget'), 0),
name=F('project__operating_unit__name'),
iso3=F('project__operating_unit__iso3')) \
.order_by('-total_budget')[0:10]
recipient_offices_serializer = SignatureSolutionOperatingUnitSerializer(top_recipient_offices, many=True)
budget_sources = DonorFundSplitUp.objects.filter(donor_query &
Q(output__signature_solution__ss_id=ss_id)) \
.values('organisation') \
.annotate(total_expense=Coalesce(Sum('expense'), 0),
total_budget=Coalesce(Sum('budget'), 0),
short_name=F('organisation__short_name'),
organisation_name=F('organisation__org_name'))\
.order_by('-total_budget')[0:10]
budget_sources_serializer = SectorBudgetSourcesSerializer(budget_sources, many=True,
context={'request': request})
signature_aggregate = []
signature_solutions_aggregate = []
signature_list = []
for signature_solutions in SignatureSolution.objects.filter(ss_id=ss_id):
aggregate = get_signature_solutions_aggregate(year, active_projects,
signature_solution=signature_solutions)
if aggregate:
signature_solutions_aggregate.append(aggregate)
signature_solutions_aggregate = sorted(signature_solutions_aggregate,
key=lambda signature: signature['percentage'], reverse=True)
for signature in signature_solutions_aggregate:
if signature['ss_id'] not in signature_list:
signature_list.append(signature['ss_id'])
signature_aggregate.append(signature)
signature_serializer = SignatureSolutionsAggregateSerializer(signature_aggregate, many=True)
data = {
'top_recipient_offices': recipient_offices_serializer.data,
'budget_sources': budget_sources_serializer.data,
'aggregate': signature_serializer.data
}
return self.jp_response(s_code='HTTP_200_OK', data=data)
except Exception as e:
return self.jp_error_response('HTTP_500_INTERNAL_SERVER_ERROR', 'EXCEPTION', [str(e), ])
class SignatureSolutionsOutcomeView(APIView, ResponseViewMixin):
def get(self, request, *args, **kwargs):
try:
year = request.GET.get('year', '')
ss_id = request.GET.get('ss_id', '')
if not year:
return self.jp_error_response('HTTP_400_BAD_REQUEST', 'UNKNOWN_QUERY',
['Please provide a year'])
active_projects = get_active_projects_for_year(year)
sign = []
signature_solution = SignatureSolution.objects.values('sp_id').filter(ss_id=ss_id)
num_signature_solution = len(signature_solution)
percent_signature_solution = {sp_id['sp_id']: sp_id['sp_id__count'] * 100/num_signature_solution
for sp_id in signature_solution.annotate(Count('sp_id'))}
for k, v in percent_signature_solution.items():
sector_name = Sector.objects.get(code=k)
aggregate = get_sector_aggregate(year, active_projects=active_projects, sector=sector_name)
sector_budget = aggregate['budget']
sector_color = sector_name.color
sign.append({'sector_id': k, 'sector_name': sector_name, 'percent': v, 'sector_color': sector_color,
'budget': sector_budget})
signature_outcome_serializer = SignatureSolutionOutcomeSerializer(sign, many=True,
context={'request': request})
data = {
'percentage': signature_outcome_serializer.data
}
return self.jp_response(s_code='HTTP_200_OK', data=data)
except Exception as e:
return self.jp_error_response('HTTP_500_INTERNAL_SERVER_ERROR', 'EXCEPTION', [str(e), ])
class SectorSignatureSolutionView(APIView, ResponseViewMixin):
def get(self, request, *args, **kwargs):
try:
year = request.GET.get('year', '')
code = request.GET.get('code', '')
if not year:
return self.jp_error_response('HTTP_400_BAD_REQUEST', 'UNKNOWN_QUERY',
['Please provide a year'])
active_projects = get_active_projects_for_year(year)
sectors = []
sector = Sector.objects.values('signaturesolution__ss_id').filter(code=code)
num_sector = len(sector)
percent_sector = {ss_id['signaturesolution__ss_id']: ss_id['signaturesolution__ss_id__count'] * 100/num_sector
for ss_id in sector.annotate(Count('signaturesolution__ss_id'))}
for k, v in percent_sector.items():
signature_solution = SignatureSolution.objects.values('name').filter(ss_id=k).distinct()
signature_solution_name = signature_solution[0]['name']
for signature in SignatureSolution.objects.filter(ss_id=k):
aggregate = get_signature_solutions_aggregate(year, active_projects=active_projects,
signature_solution=signature)
ss_budget = aggregate['budget']
sectors.append({'signature_solution_id': k, 'percent': v,
'signature_solution_name': signature_solution_name, 'budget': ss_budget})
sector_sort = sorted(sectors, key=lambda k: k['signature_solution_id'])
sector_signature_solution_serializer = SectorSignatureSolutionSerializer(sector_sort, many=True,
context={'request': request})
data = {
'percentage': sector_signature_solution_serializer.data
}
return self.jp_response(s_code='HTTP_200_OK', data=data)
except Exception as e:
return self.jp_error_response('HTTP_500_INTERNAL_SERVER_ERROR', 'EXCEPTION', [str(e), ])
# class SdgTargetDetailView(APIView, ResponseViewMixin):
# def get(self, request, *args, **kwargs):
# try:
# year = request.GET.get('year', '')
# sdg_target = request.GET.get('sdg_target', '')
# operating_unit = request.GET.get('operating_unit', '')
# budget_source = request.GET.get('budget_source', '')
# if not year:
# return self.jp_error_response('HTTP_400_BAD_REQUEST', 'UNKNOWN_QUERY',
# ['Please provide a year'])
# if year and int(year) >= SP_START_YEAR:
# active_projects = get_active_projects_for_year(year)
# donor_query = Q(project__in=active_projects) & Q(year=year) & \
# Q(project__project_active__year=year)
# query = Q(project_id__in=active_projects) & Q(donorfundsplitup__year=year) & \
# Q(project_active__year=year)
#
# if sdg_target and sdg_target != '0':
# query.add(Q(outputtarget__target_id=sdg_target), Q. AND)
# donor_query.add(Q(output__outputtarget__target_id=sdg_target), Q. AND)
# if operating_unit:
# query.add(Q(operating_unit=operating_unit) |
# Q(operating_unit__bureau__code=operating_unit), Q.AND)
# donor_query.add(Q(project__operating_unit=operating_unit) |
# Q(project__operating_unit__bureau__code=operating_unit), Q.AND)
# if budget_source:
# budget_query = Q(donorfundsplitup__organisation__ref_id=budget_source) | \
# Q(donorfundsplitup__organisation__type_level_3=budget_source)
# donor_budget_query = Q(organisation__ref_id=budget_source) | \
# Q(organisation__type_level_3=budget_source)
# query.add(budget_query, Q.AND)
# donor_query.add(donor_budget_query, Q.AND)
# target_percent = []
# sdg_obj = SdgTargets.objects.values('sdg', 'description').get(target_id=sdg_target)
# target_agg = get_target_aggregate(year, target=sdg_target, operating_unit=operating_unit,
# budget_source=budget_source)
# aggregate_results1 = Project.objects.filter(query)\
# .aggregate(projects=Count('project_id', distinct=True),
# budget_sources=Count('donorfundsplitup__organisation', distinct=True))
#
# target_percent.append({'total_budget': target_agg['target_budget'],
# 'total_expense': target_agg['target_expense'],
# 'total_projects': aggregate_results1['projects'],
# 'budget_sources': aggregate_results1['budget_sources'],
# 'target_desc': sdg_obj['description'],
# 'target_id': sdg_target,
# 'sdg': sdg_obj['sdg']
# })
# budget_sources = DonorFundSplitUp.objects.filter(donor_query).values('organisation') \
# .annotate(total_expense=Coalesce(Sum('expense'), 0),
# total_budget=Coalesce(Sum('budget'), 0),
# short_name=F('organisation__short_name'),
# organisation_name=F('organisation__org_name')) \
# .order_by('-total_budget')[0:10]
# budget_sources_serializer = SdgBudgetSourcesSerializer(budget_sources, many=True,
# context={'request': request})
# top_recipient_offices = DonorFundSplitUp.objects.filter(donor_query &
# Q(project__operating_unit__isnull=False)) \
# .values('project__operating_unit') \
# .annotate(total_expense=Coalesce(Sum('expense'), 0),
# total_budget=Coalesce(Sum('budget'), 0),
# name=F('project__operating_unit__name'),
# iso3=F('project__operating_unit__iso3')).order_by(
# '-total_budget')[0:10]
# recipient_offices_serializer = SdgOperatingUnitSerializer(top_recipient_offices, many=True)
#
# data = {
# 'aggregate': target_percent,
# 'budget_sources': budget_sources_serializer.data,
# 'top_recipient_offices': recipient_offices_serializer.data
# }
# return self.jp_response(s_code='HTTP_200_OK', data=data)
# except Exception as e:
# return self.jp_error_response('HTTP_500_INTERNAL_SERVER_ERROR', 'EXCEPTION', [str(e), ])
class SdgTargetDetailView(APIView, ResponseViewMixin):
def get(self, request, *args, **kwargs):
try:
year = request.GET.get('year', '')
sdg_target = request.GET.get('sdg_target', '')
operating_unit = request.GET.get('operating_unit', '')
budget_source = request.GET.get('budget_source', '')
if not year:
return self.jp_error_response('HTTP_400_BAD_REQUEST', 'UNKNOWN_QUERY',
['Please provide a year'])
data = dict()
if year and int(year) >= SDG_START_YEAR:
active_projects = get_active_projects_for_year(year)
donor_query = Q(project__in=active_projects) & Q(year=year) & \
Q(project__project_active__year=year)
query = Q(project_id__in=active_projects) & Q(donorfundsplitup__year=year) & \
Q(project_active__year=year)
if sdg_target and sdg_target != '0':
query.add(Q(outputtarget__target_id=sdg_target) & Q(outputtarget__year=year), Q. AND)
donor_query.add(Q(output__outputtarget__target_id=sdg_target)
& Q(output__outputtarget__year=year), Q. AND)
if operating_unit:
query.add(Q(operating_unit=operating_unit) |
Q(operating_unit__bureau__code=operating_unit), Q.AND)
donor_query.add(Q(project__operating_unit=operating_unit) |
Q(project__operating_unit__bureau__code=operating_unit), Q.AND)
if budget_source:
budget_query = Q(donorfundsplitup__organisation__ref_id=budget_source) | \
Q(donorfundsplitup__organisation__type_level_3=budget_source)
donor_budget_query = Q(organisation__ref_id=budget_source) | \
Q(organisation__type_level_3=budget_source)
query.add(budget_query, Q.AND)
donor_query.add(donor_budget_query, Q.AND)
target_percent = []
sdg_obj = SdgTargets.objects.values('sdg', 'description', 'sdg__name').get(target_id=sdg_target)
target_agg = get_target_aggregate_new(year, sdg_target, sdg_obj['sdg'], operating_unit=operating_unit,
budget_source=budget_source, active_projects=active_projects)
aggregate_results1 = Project.objects.filter(query)\
.aggregate(projects=Count('project_id', distinct=True),
budget_sources=Count('donorfundsplitup__organisation', distinct=True))
target_percent.append({'total_budget': target_agg['target_budget'],
'total_expense': target_agg['target_expense'],
'total_projects': aggregate_results1['projects'],
'budget_sources': aggregate_results1['budget_sources'],
'target_desc': sdg_obj['description'],
'target_id': sdg_target,
'sdg': sdg_obj['sdg'],
'sdg_name': sdg_obj['sdg__name'],
})
budget_sources = DonorFundSplitUp.objects.filter(donor_query).values('organisation') \
.annotate(total_expense=Coalesce(Sum('expense'), 0),
total_budget=Coalesce(Sum('budget'), 0),
short_name=F('organisation__short_name'),
organisation_name=F('organisation__org_name')) \
.order_by('-total_budget')[0:10]
budget_sources_data = []
for source in budget_sources:
budget_data = get_target_aggregate_new(year, target=sdg_target, sdg=sdg_obj['sdg'],
budget_source=source['organisation'],
active_projects=active_projects)
if budget_data:
top_budget_source = {'total_expense': budget_data['target_expense'],
'total_budget': budget_data['target_budget'],
'short_name': source['short_name'],
'organisation_name': source['organisation_name']
}
budget_sources_data.append(top_budget_source)
top_budget_sources = sorted(budget_sources_data, key=lambda k: k['total_budget'], reverse=True)
budget_sources_serializer = SdgBudgetSourcesSerializer(top_budget_sources, many=True,
context={'request': request})
top_recipient_offices = DonorFundSplitUp.objects.filter(donor_query &
Q(project__operating_unit__isnull=False)) \
.values('project__operating_unit') \
.annotate(total_expense=Coalesce(Sum('expense'), 0),
total_budget=Coalesce(Sum('budget'), 0),
name=F('project__operating_unit__name'),
iso3=F('project__operating_unit__iso3')).order_by(
'-total_budget')[0:10]
top_recipient_offices_data = []
for recipient in top_recipient_offices:
recipient_data = get_target_aggregate_new(year, target=sdg_target, sdg=sdg_obj['sdg'],
operating_unit=recipient['project__operating_unit'],
active_projects=active_projects)
if recipient_data:
top_recipient = {'total_expense': recipient_data['target_expense'],
'total_budget': recipient_data['target_budget'],
'name': recipient['name'],
'iso3': recipient['iso3'],
}
top_recipient_offices_data.append(top_recipient)
recipient_offices = sorted(top_recipient_offices_data, key=lambda k: k['total_budget'], reverse=True)
recipient_offices_serializer = SdgOperatingUnitSerializer(recipient_offices, many=True)
data = {
'aggregate': target_percent,
'budget_sources': budget_sources_serializer.data,
'top_recipient_offices': recipient_offices_serializer.data
}
return self.jp_response(s_code='HTTP_200_OK', data=data)
except Exception as e:
return self.jp_error_response('HTTP_500_INTERNAL_SERVER_ERROR', 'EXCEPTION', [str(e), ])
class SdgTargetView(APIView, ResponseViewMixin):
def get(self, request, *args, **kwargs):
try:
year = request.GET.get('year', '')
budget_source = request.GET.get('budget_source', '')
operating_unit = request.GET.get('operating_unit', '')
sdg = request.GET.get('sdg', '')
if not year:
return self.jp_error_response('HTTP_400_BAD_REQUEST', 'UNKNOWN_QUERY',
['Please provide a year'])
target_percent = []
active_projects = get_active_projects_for_year(year, operating_unit=operating_unit,
budget_source=budget_source)
if year and int(year) >= SDG_START_YEAR:
targets = SdgTargets.objects.filter(sdg=sdg).values('target_id', 'description')
for target in targets:
target_id = target['target_id']
target_agg = get_target_aggregate_new(year, target_id, sdg, operating_unit=operating_unit,
budget_source=budget_source, active_projects=active_projects)
if target_agg['budget_percentage'] > 0:
target_percent.append({'target_budget': target_agg['target_budget'],
'target_expense': target_agg['target_expense'],
'target_percentage': target_agg['budget_percentage'],
'target_id': target_id,
'target_description': target['description']})
result = []
result_str = []
for target_val in target_percent:
if target_val['target_id'].split('.')[1][0:2].isdigit():
result.append(int(target_val['target_id'].split('.')[1][0:2]))
else:
result_str.append(target_val['target_id'].split('.')[1][0:2])
outputs = sorted(result) + sorted(result_str)
target_data = []
for output in outputs:
for target_per in target_percent:
if target_per['target_id'].split('.')[1][0:2] == str(output) :
target_data.append(target_per)
sdg_target_serializer = SdgTargetSerializer(target_data, many=True, context={'request': request})
data = {
'percentage': sdg_target_serializer.data
}
return self.jp_response(s_code='HTTP_200_OK', data=data)
except Exception as e:
print(e)
return self.jp_error_response('HTTP_500_INTERNAL_SERVER_ERROR', 'EXCEPTION', [str(e), ])
class SdgView(APIView, ResponseViewMixin):
def get(self, request, *args, **kwargs):
try:
import json
year = request.GET.get('year', '')
operating_unit = request.GET.get('operating_unit', '')
budget_source = request.GET.get('budget_source')
sdg_code = request.GET.get('sdg', None)
if not year:
return self.jp_error_response('HTTP_400_BAD_REQUEST', 'UNKNOWN_QUERY',
['Please provide a year'])
try:
sdg = SDGSunburst.objects.get(sdg_year=year).response
except Exception as e:
sdg = get_sdg_sunburst(year, operating_unit, budget_source, sdg_code)
return self.jp_response(s_code='HTTP_200_OK', data=json.loads(sdg))
except Exception as e:
return self.jp_error_response('HTTP_500_INTERNAL_SERVER_ERROR', 'EXCEPTION', [str(e), ])
class SectorView(APIView, ResponseViewMixin):
def get(self, request, *args, **kwargs):
try:
search = self.request.GET.get('search', '')
year = process_query_params(request.GET.get('year', ''))
operating_unit = request.GET.get('operating_unit', '')
donor = process_query_params(request.GET.get('donor', ''))
new_sector = Sector.objects.filter(Q(code__in=NEW_SECTOR_CODES)).values()
old_sector = Sector.objects.filter(Q(code__in=OLD_SECTOR_CODES)).values()
if search:
old_sector = old_sector.filter(Q(code__icontains=search) | Q(sector__icontains=search))
old_sector = list(old_sector)
new_sector = list(new_sector)
if search == '' or search.lower() == 'others' or search == '0':
other_valid_sectors = get_valid_sectors(year, operating_unit, donor, sector='0')
if other_valid_sectors:
old_sector.append({'code': "0", 'sector': "Others", 'color': NULL_SECTOR_COLOR_CODE,
'start_year': '2015', 'end_year': '2017'})
new_sector.append({'code': "0", 'sector': "Others", 'color': NULL_SECTOR_COLOR_CODE,
'start_year': '2018', 'end_year': '2021'})
sector = {
'new_focus': new_sector,
'2015-2017': old_sector
}
data = {
'sector': sector
}
return self.jp_response(s_code='HTTP_200_OK', data=data)
except Exception as e:
return self.jp_error_response('HTTP_500_INTERNAL_SERVER_ERROR', 'EXCEPTION', [str(e), ])
def get_map_data(year, sdg='', budget_source='', recipient_country='', sector='', project_id='', budget_type='',
signature_solution='', sdg_target='', marker_type='', marker_id='', provide_output=''):
active_projects = get_active_projects_for_year(year, operating_unit=recipient_country,
budget_source=budget_source)
projects_query = get_project_query(year, operating_unit=recipient_country, budget_source=budget_source,
sdg=sdg, sector=sector)
projects = Project.objects.filter(projects_query & Q(project_id__in=active_projects)).distinct()
fund_query = get_fund_split_query(year, budget_source=budget_source, operating_unit=recipient_country,
sdg=sdg, sector=sector, project_id=project_id, budget_type=budget_type,
signature_solution=signature_solution, sdg_target=sdg_target,
marker_type=marker_type, marker_id=marker_id)
donor_query = fund_query & Q(project__in=projects)
countries = DonorFundSplitUp.objects.filter(donor_query).distinct().prefetch_related('project', 'output')
countries = countries.values('output__operating_unit') \
.annotate(project_count=Count('project', distinct=True),
output_count=Count('output', distinct=True),
donor_count=Count('organisation', distinct=True),
total_budget=Coalesce(Sum('budget'), 0),
total_expense=Coalesce(Sum('expense'), 0),
operating_unit_name=F('output__operating_unit__name'),
operating_unit_iso3=F('output__operating_unit__iso3'),
operating_unit_iso2=F('output__operating_unit__iso2'),
operating_unit_unit_type=F('output__operating_unit__unit_type'),
operating_unit_latitude=F('output__operating_unit__latitude'),
operating_unit_longitude=F('output__operating_unit__longitude'),
)
if year and int(year) >= SDG_START_YEAR and sdg or int(year) >= SDG_START_YEAR and sdg_target:
serializer = MapDetailsSdgSerializer(countries, many=True, context={'year': | |
#!/usr/bin/env python
#
# Copyright (C) 2017 ShadowMan
#
"""
A high-level overview of the framing is given in the following figure.
B 0 * * * * * * * 1 * * * * * * * 2 * * * * * * * 3 * * * * * * * -
| | | | |
0 | 1 | 2 | 3 |
i 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 |
+-+-+-+-+-------+-+-------------+-------------------------------+
|F|R|R|R| opcode|M| Payload len | Extended payload length |
|I|S|S|S| (4) |A| (7) | (16/64) |
|N|V|V|V| |S| | (if payload len==126/127) |
| |1|2|3| |K| | |
+-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - +
| Extended payload length continued, if payload len == 127 |
+ - - - - - - - - - - - - - - - +-------------------------------+
| |Masking-key, if MASK set to 1 |
+-------------------------------+-------------------------------+
| Masking-key (continued) | Payload Data |
+-------------------------------- - - - - - - - - - - - - - - - +
: Payload Data continued ... :
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
| Payload Data continued ... |
+---------------------------------------------------------------+
FIN: 1 bit
Indicates that this is the final fragment in a message. The first
fragment MAY also be the final fragment.
RSV1, RSV2, RSV3: 1 bit each
MUST be 0 unless an extension is negotiated that defines meanings
for non-zero values. If a nonzero value is received and none of
the negotiated extensions defines the meaning of such a nonzero
value, the receiving endpoint MUST _Fail the WebSocket Connection_.
Opcode: 4 bits
Defines the interpretation of the "Payload data". If an unknown
opcode is received, the receiving endpoint MUST _Fail the
WebSocket Connection_. The following values are defined.
* %x0 denotes a continuation frame
* %x1 denotes a text frame
* %x2 denotes a binary frame
* %x3-7 are reserved for further non-control frames
* %x8 denotes a connection close
* %x9 denotes a ping
* %xA denotes a pong
* %xB-F are reserved for further control frames
Mask: 1 bit
Defines whether the "Payload data" is masked. If set to 1, a
masking key is present in masking-key, and this is used to unmask
the "Payload data" as per Section 5.3. All frames sent from
client to server have this bit set to 1.
Payload length: 7 bits, 7+16 bits, or 7+64 bits
The length of the "Payload data", in bytes: if 0-125, that is the
payload length. If 126, the following 2 bytes interpreted as a
16-bit unsigned integer are the payload length. If 127, the
following 8 bytes interpreted as a 64-bit unsigned integer (the
most significant bit MUST be 0) are the payload length. Multi-byte
length quantities are expressed in network byte order. Note that
in all cases, the minimal number of bytes MUST be used to encode
the length, for example, the length of a 124-byte-long string
can't be encoded as the sequence 126, 0, 124. The payload length
is the length of the "Extension data" + the length of the
"Application data". The length of the "Extension data" may be
zero, in which case the payload length is the length of the
"Application data".
Masking-key: 0 or 4 bytes
All frames sent from the client to the server are masked by a
32-bit value that is contained within the frame. This field is
present if the mask bit is set to 1 and is absent if the mask bit
is set to 0.
Payload data: (x+y) bytes
The "Payload data" is defined as "Extension data" concatenated
with "Application data".
Extension data: x bytes
The "Extension data" is 0 bytes unless an extension has been
negotiated. Any extension MUST specify the length of the
"Extension data", or how that length may be calculated, and how
the extension use MUST be negotiated during the opening handshake.
If present, the "Extension data" is included in the total payload
length.
Application data: y bytes
Arbitrary "Application data", taking up the remainder of the frame
after any "Extension data". The length of the "Application data"
is equal to the payload length minus the length of the "Extension
data".
"""
import os
import abc
import struct
from websocket.utils import (
generic, ws_utils, exceptions, packet, logger
)
def ws_transform_payload_data(data, mask_key):
if not isinstance(mask_key, int):
# from string transition to int
if isinstance(mask_key, str):
mask_key = int(mask_key, 16)
else:
raise KeyError('mask key must be hex int')
if not isinstance(data, (str, bytes)):
raise KeyError('data must be str or bytes type')
# Octet i of the transformed data is the XOR of octet i of the original
# data with octet at index i modulo 4 of the masking key
mask_key_octet = {
0: (mask_key & 0xff000000) >> 24,
1: (mask_key & 0x00ff0000) >> 16,
2: (mask_key & 0x0000ff00) >> 8,
3: mask_key & 0x000000ff
}
transformed_string = b''
for index, value in enumerate(generic.to_bytes(data)):
transformed_string += struct.pack(
'!B', (value ^ mask_key_octet[index % 4]) & 0xff)
return transformed_string
def parse_frame_length(frame_header):
if not isinstance(frame_header, (str, bytes)):
raise KeyError('frame_header must be str or bytes type')
header = packet.ByteArray(frame_header)
if len(header) < 2:
logger.warning('receive less than 2-bytes')
raise RuntimeError('frame header less than 2-bytes')
# first bit is MASK flag
payload_length = packet.bits_to_integer(header.get_bits(1)[1:])
# if 0-125, that is the payload length
if payload_length <= 125:
# if frame is client-to-server, payload length does not include mask-key
if header.get_bits(1)[0] is 1:
return payload_length + 6
return payload_length + 2
# If 126, the following 2 bytes interpreted as a
# 16-bit unsigned integer are the payload length
elif payload_length == 126:
# Payload length field is in [2-4)bytes
if len(header) < 4:
raise exceptions.FrameHeaderParseError(
'payload length flag is 126, but header length is {}'.format(
len(header)))
if header.get_bits(1)[0] is 1:
return packet.bits_to_integer(
generic.flatten_list(header.get_bits(2, 2))) + 8
return packet.bits_to_integer(
generic.flatten_list(header.get_bits(2, 2))) + 4
# If 127, the following 8 bytes interpreted as a
# 64-bit unsigned integer (the most significant bit
# MUST be 0) are the payload length.
elif payload_length == 127:
# Payload length field is in [2-10)bytes
if len(header) < 10:
raise exceptions.FrameHeaderParseError(
'payload length flag is 127, but header length is {}'.format(
len(header)))
if header.get_bits(1)[0] is 1:
return packet.bits_to_integer(
generic.flatten_list(header.get_bits(2, 2))) + 14
return packet.bits_to_integer(
generic.flatten_list(header.get_bits(2, 2))) + 10
raise exceptions.FatalError('internal error')
# using for judge frame type
Text_Frame = b'Text Frame'
Binary_Frame = b'Binary Frame'
Close_Frame = b'Close Frame'
class FrameBase(object, metaclass=abc.ABCMeta):
_global_frame_type = {
0x0: b'Continuation Frame',
0x1: b'Text Frame',
0x2: b'Binary Frame',
0x3: b'Non-Control Frame',
0x4: b'Non-Control Frame',
0x5: b'Non-Control Frame',
0x6: b'Non-Control Frame',
0x7: b'Non-Control Frame',
0x8: b'Close Frame',
0x9: b'Ping Frame',
0xA: b'Pong Frame',
0xB: b'Control Frame',
0xC: b'Control Frame',
0xD: b'Control Frame',
0xE: b'Control Frame',
0xF: b'Control Frame',
}
def __init__(self, byte_array):
if not isinstance(byte_array, packet.ByteArray):
raise RuntimeError('the byte array is invalid')
# initializing all websocket-frame flags
self._flag_fin = 1
self._flag_rsv1 = 0
self._flag_rsv2 = 0
self._flag_rsv3 = 0
self._flag_opcode = 1
# Byte index: 2
self._flag_mask = 0
self._flag_payload_length = 0
self._payload_length = 0
# Byte index: [3,7)
self._mask_key = False
# payload data
self._payload_data = None
self._byte_array = byte_array
# parse frame
self.parse_octet()
def parse_octet(self):
# first byte(8-bits)
# +-+-+-+-+-------+
# |F|R|R|R| opcode|
# |I|S|S|S| (4) |
# |N|V|V|V| |
# | |1|2|3| |
# +-+-+-+-+-------+
self._flag_fin = self._byte_array.get_bit(0, 0)
self._flag_rsv1 = self._byte_array.get_bit(0, 1)
self._flag_rsv2 = self._byte_array.get_bit(0, 2)
self._flag_rsv3 = self._byte_array.get_bit(0, 3)
self._flag_opcode = packet.bits_to_integer(
self._byte_array.get_bits(0)[4:])
# second byte(8-bits)
# +-+-------------+
# |M| Payload len |
# |A| (7) |
# |S| |
# |K| |
# +-+-+-+-+-------+
self._flag_mask = self._byte_array.get_bit(1, 0)
| |
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 255, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 255, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 255, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(63, 255, 63))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 170, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 255, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 255, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 255, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(127, 255, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(63, 255, 63))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 170, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 255, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 255, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 255, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush)
self.main_progressBarUserName.setPalette(palette)
self.main_progressBarUserName.setProperty("value", 100)
self.main_progressBarUserName.setAlignment(QtCore.Qt.AlignCenter)
self.main_progressBarUserName.setTextDirection(QtWidgets.QProgressBar.TopToBottom)
self.main_progressBarUserName.setObjectName("main_progressBarUserName")
self.horizontalLayout_11.addWidget(self.main_progressBarUserName)
self.verticalLayout_9.addLayout(self.horizontalLayout_11)
self.verticalLayout_13.addLayout(self.verticalLayout_9)
self.groupBox_9 = QtWidgets.QGroupBox(self.splitter_8)
self.groupBox_9.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.groupBox_9.setObjectName("groupBox_9")
self.verticalLayout_48 = QtWidgets.QVBoxLayout(self.groupBox_9)
self.verticalLayout_48.setObjectName("verticalLayout_48")
self.verticalLayout_47 = QtWidgets.QVBoxLayout()
self.verticalLayout_47.setObjectName("verticalLayout_47")
self.verticalLayout_23 = QtWidgets.QVBoxLayout()
self.verticalLayout_23.setObjectName("verticalLayout_23")
self.main_editNikWidth = QtWidgets.QLineEdit(self.groupBox_9)
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.main_editNikWidth.setFont(font)
self.main_editNikWidth.setText("")
self.main_editNikWidth.setAlignment(QtCore.Qt.AlignCenter)
self.main_editNikWidth.setClearButtonEnabled(True)
self.main_editNikWidth.setObjectName("main_editNikWidth")
self.verticalLayout_23.addWidget(self.main_editNikWidth)
self.horizontalLayout_16 = QtWidgets.QHBoxLayout()
self.horizontalLayout_16.setObjectName("horizontalLayout_16")
spacerItem6 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_16.addItem(spacerItem6)
self.main_pb_ShowNikWidth = QtWidgets.QPushButton(self.groupBox_9)
self.main_pb_ShowNikWidth.setMinimumSize(QtCore.QSize(100, 0))
self.main_pb_ShowNikWidth.setMaximumSize(QtCore.QSize(100, 23))
self.main_pb_ShowNikWidth.setAutoRepeat(False)
self.main_pb_ShowNikWidth.setAutoDefault(False)
self.main_pb_ShowNikWidth.setDefault(False)
self.main_pb_ShowNikWidth.setFlat(False)
self.main_pb_ShowNikWidth.setObjectName("main_pb_ShowNikWidth")
self.horizontalLayout_16.addWidget(self.main_pb_ShowNikWidth)
self.main_pb_clearNikWidth = QtWidgets.QPushButton(self.groupBox_9)
self.main_pb_clearNikWidth.setMinimumSize(QtCore.QSize(23, 23))
self.main_pb_clearNikWidth.setMaximumSize(QtCore.QSize(23, 23))
self.main_pb_clearNikWidth.setText("")
self.main_pb_clearNikWidth.setObjectName("main_pb_clearNikWidth")
self.horizontalLayout_16.addWidget(self.main_pb_clearNikWidth)
spacerItem7 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_16.addItem(spacerItem7)
self.verticalLayout_23.addLayout(self.horizontalLayout_16)
self.verticalLayout_47.addLayout(self.verticalLayout_23)
self.main_tbShowNikWidth = QtWidgets.QTextBrowser(self.groupBox_9)
font = QtGui.QFont()
font.setPointSize(9)
self.main_tbShowNikWidth.setFont(font)
self.main_tbShowNikWidth.setObjectName("main_tbShowNikWidth")
self.verticalLayout_47.addWidget(self.main_tbShowNikWidth)
self.verticalLayout_48.addLayout(self.verticalLayout_47)
self.groupBox_7 = QtWidgets.QGroupBox(self.splitter_9)
self.groupBox_7.setObjectName("groupBox_7")
self.horizontalLayout_37 = QtWidgets.QHBoxLayout(self.groupBox_7)
self.horizontalLayout_37.setObjectName("horizontalLayout_37")
self.splitter_7 = QtWidgets.QSplitter(self.groupBox_7)
self.splitter_7.setLineWidth(2)
self.splitter_7.setOrientation(QtCore.Qt.Horizontal)
self.splitter_7.setObjectName("splitter_7")
self.splitter_6 = QtWidgets.QSplitter(self.splitter_7)
self.splitter_6.setLineWidth(2)
self.splitter_6.setOrientation(QtCore.Qt.Vertical)
self.splitter_6.setObjectName("splitter_6")
self.groupBox_4 = QtWidgets.QGroupBox(self.splitter_6)
self.groupBox_4.setObjectName("groupBox_4")
self.verticalLayout_50 = QtWidgets.QVBoxLayout(self.groupBox_4)
self.verticalLayout_50.setObjectName("verticalLayout_50")
self.verticalLayout_49 = QtWidgets.QVBoxLayout()
self.verticalLayout_49.setObjectName("verticalLayout_49")
self.verticalLayout_17 = QtWidgets.QVBoxLayout()
self.verticalLayout_17.setObjectName("verticalLayout_17")
self.main_editIP = QtWidgets.QLineEdit(self.groupBox_4)
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.main_editIP.setFont(font)
self.main_editIP.setText("")
self.main_editIP.setAlignment(QtCore.Qt.AlignCenter)
self.main_editIP.setClearButtonEnabled(True)
self.main_editIP.setObjectName("main_editIP")
self.verticalLayout_17.addWidget(self.main_editIP)
self.horizontalLayout_14 = QtWidgets.QHBoxLayout()
self.horizontalLayout_14.setObjectName("horizontalLayout_14")
spacerItem8 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_14.addItem(spacerItem8)
self.main_pb_ShowIPInfo = QtWidgets.QPushButton(self.groupBox_4)
self.main_pb_ShowIPInfo.setMinimumSize(QtCore.QSize(100, 0))
self.main_pb_ShowIPInfo.setMaximumSize(QtCore.QSize(100, 23))
self.main_pb_ShowIPInfo.setAutoRepeat(False)
self.main_pb_ShowIPInfo.setAutoDefault(False)
self.main_pb_ShowIPInfo.setDefault(False)
self.main_pb_ShowIPInfo.setFlat(False)
self.main_pb_ShowIPInfo.setObjectName("main_pb_ShowIPInfo")
self.horizontalLayout_14.addWidget(self.main_pb_ShowIPInfo)
self.main_pb_clearSearchIP = QtWidgets.QPushButton(self.groupBox_4)
self.main_pb_clearSearchIP.setMinimumSize(QtCore.QSize(23, 23))
self.main_pb_clearSearchIP.setMaximumSize(QtCore.QSize(23, 23))
self.main_pb_clearSearchIP.setText("")
self.main_pb_clearSearchIP.setObjectName("main_pb_clearSearchIP")
self.horizontalLayout_14.addWidget(self.main_pb_clearSearchIP)
spacerItem9 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_14.addItem(spacerItem9)
self.verticalLayout_17.addLayout(self.horizontalLayout_14)
self.verticalLayout_49.addLayout(self.verticalLayout_17)
self.main_tbShowIPInfo = QtWidgets.QTextBrowser(self.groupBox_4)
font = QtGui.QFont()
font.setPointSize(9)
self.main_tbShowIPInfo.setFont(font)
self.main_tbShowIPInfo.setObjectName("main_tbShowIPInfo")
self.verticalLayout_49.addWidget(self.main_tbShowIPInfo)
self.verticalLayout_50.addLayout(self.verticalLayout_49)
self.groupBox_8 = QtWidgets.QGroupBox(self.splitter_6)
self.groupBox_8.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.groupBox_8.setObjectName("groupBox_8")
self.verticalLayout_52 = QtWidgets.QVBoxLayout(self.groupBox_8)
self.verticalLayout_52.setObjectName("verticalLayout_52")
self.verticalLayout_51 = QtWidgets.QVBoxLayout()
self.verticalLayout_51.setObjectName("verticalLayout_51")
self.verticalLayout_22 = QtWidgets.QVBoxLayout()
self.verticalLayout_22.setObjectName("verticalLayout_22")
self.main_editEMAIL = QtWidgets.QLineEdit(self.groupBox_8)
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.main_editEMAIL.setFont(font)
self.main_editEMAIL.setText("")
self.main_editEMAIL.setAlignment(QtCore.Qt.AlignCenter)
self.main_editEMAIL.setClearButtonEnabled(True)
self.main_editEMAIL.setObjectName("main_editEMAIL")
self.verticalLayout_22.addWidget(self.main_editEMAIL)
self.horizontalLayout_15 = QtWidgets.QHBoxLayout()
self.horizontalLayout_15.setObjectName("horizontalLayout_15")
spacerItem10 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_15.addItem(spacerItem10)
self.main_pb_ShowEMAIL = QtWidgets.QPushButton(self.groupBox_8)
self.main_pb_ShowEMAIL.setMinimumSize(QtCore.QSize(100, 0))
self.main_pb_ShowEMAIL.setMaximumSize(QtCore.QSize(100, 23))
self.main_pb_ShowEMAIL.setAutoRepeat(False)
self.main_pb_ShowEMAIL.setAutoDefault(False)
self.main_pb_ShowEMAIL.setDefault(False)
self.main_pb_ShowEMAIL.setFlat(False)
self.main_pb_ShowEMAIL.setObjectName("main_pb_ShowEMAIL")
self.horizontalLayout_15.addWidget(self.main_pb_ShowEMAIL)
self.main_pb_clearEMAIL = QtWidgets.QPushButton(self.groupBox_8)
self.main_pb_clearEMAIL.setMinimumSize(QtCore.QSize(23, 23))
self.main_pb_clearEMAIL.setMaximumSize(QtCore.QSize(23, 23))
self.main_pb_clearEMAIL.setText("")
self.main_pb_clearEMAIL.setObjectName("main_pb_clearEMAIL")
self.horizontalLayout_15.addWidget(self.main_pb_clearEMAIL)
spacerItem11 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_15.addItem(spacerItem11)
self.verticalLayout_22.addLayout(self.horizontalLayout_15)
self.verticalLayout_51.addLayout(self.verticalLayout_22)
self.main_tbShowEMAIL = QtWidgets.QTextBrowser(self.groupBox_8)
font = QtGui.QFont()
font.setPointSize(9)
self.main_tbShowEMAIL.setFont(font)
self.main_tbShowEMAIL.setObjectName("main_tbShowEMAIL")
self.verticalLayout_51.addWidget(self.main_tbShowEMAIL)
self.verticalLayout_52.addLayout(self.verticalLayout_51)
self.groupBox_11 = QtWidgets.QGroupBox(self.splitter_7)
self.groupBox_11.setMinimumSize(QtCore.QSize(250, 0))
self.groupBox_11.setMaximumSize(QtCore.QSize(340, 16777215))
self.groupBox_11.setObjectName("groupBox_11")
self.verticalLayout_54 = QtWidgets.QVBoxLayout(self.groupBox_11)
self.verticalLayout_54.setObjectName("verticalLayout_54")
self.verticalLayout_53 = QtWidgets.QVBoxLayout()
self.verticalLayout_53.setObjectName("verticalLayout_53")
self.verticalLayout_28 = QtWidgets.QVBoxLayout()
self.verticalLayout_28.setObjectName("verticalLayout_28")
self.label = QtWidgets.QLabel(self.groupBox_11)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.verticalLayout_28.addWidget(self.label)
self.horizontalLayout_36 = QtWidgets.QHBoxLayout()
self.horizontalLayout_36.setObjectName("horizontalLayout_36")
spacerItem12 = QtWidgets.QSpacerItem(94, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_36.addItem(spacerItem12)
self.main_editGeoObject = QtWidgets.QLineEdit(self.groupBox_11)
self.main_editGeoObject.setMinimumSize(QtCore.QSize(90, 0))
self.main_editGeoObject.setMaximumSize(QtCore.QSize(180, 16777215))
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.main_editGeoObject.setFont(font)
self.main_editGeoObject.setText("")
self.main_editGeoObject.setAlignment(QtCore.Qt.AlignCenter)
self.main_editGeoObject.setClearButtonEnabled(True)
self.main_editGeoObject.setObjectName("main_editGeoObject")
self.horizontalLayout_36.addWidget(self.main_editGeoObject)
spacerItem13 = QtWidgets.QSpacerItem(94, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_36.addItem(spacerItem13)
self.verticalLayout_28.addLayout(self.horizontalLayout_36)
self.horizontalLayout_20 = QtWidgets.QHBoxLayout()
self.horizontalLayout_20.setObjectName("horizontalLayout_20")
spacerItem14 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_20.addItem(spacerItem14)
self.main_pb_ShowGeoLatLon = QtWidgets.QPushButton(self.groupBox_11)
self.main_pb_ShowGeoLatLon.setMinimumSize(QtCore.QSize(100, 23))
self.main_pb_ShowGeoLatLon.setMaximumSize(QtCore.QSize(100, 23))
self.main_pb_ShowGeoLatLon.setAutoRepeat(False)
self.main_pb_ShowGeoLatLon.setAutoDefault(False)
self.main_pb_ShowGeoLatLon.setDefault(False)
self.main_pb_ShowGeoLatLon.setFlat(False)
self.main_pb_ShowGeoLatLon.setObjectName("main_pb_ShowGeoLatLon")
self.horizontalLayout_20.addWidget(self.main_pb_ShowGeoLatLon)
self.main_pb_clearGeoObject = QtWidgets.QPushButton(self.groupBox_11)
self.main_pb_clearGeoObject.setMinimumSize(QtCore.QSize(23, 23))
self.main_pb_clearGeoObject.setMaximumSize(QtCore.QSize(23, 23))
self.main_pb_clearGeoObject.setText("")
self.main_pb_clearGeoObject.setObjectName("main_pb_clearGeoObject")
self.horizontalLayout_20.addWidget(self.main_pb_clearGeoObject)
spacerItem15 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_20.addItem(spacerItem15)
self.verticalLayout_28.addLayout(self.horizontalLayout_20)
self.verticalLayout_53.addLayout(self.verticalLayout_28)
self.main_tbShowGeoObjectLatLon = QtWidgets.QTextBrowser(self.groupBox_11)
font = QtGui.QFont()
font.setPointSize(8)
self.main_tbShowGeoObjectLatLon.setFont(font)
self.main_tbShowGeoObjectLatLon.setObjectName("main_tbShowGeoObjectLatLon")
self.verticalLayout_53.addWidget(self.main_tbShowGeoObjectLatLon)
self.line = QtWidgets.QFrame(self.groupBox_11)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout_53.addWidget(self.line)
self.verticalLayout_29 = QtWidgets.QVBoxLayout()
self.verticalLayout_29.setContentsMargins(-1, -1, -1, 6)
self.verticalLayout_29.setObjectName("verticalLayout_29")
self.horizontalLayout_27 = QtWidgets.QHBoxLayout()
self.horizontalLayout_27.setObjectName("horizontalLayout_27")
spacerItem16 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_27.addItem(spacerItem16)
self.label_2 = QtWidgets.QLabel(self.groupBox_11)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.horizontalLayout_27.addWidget(self.label_2)
spacerItem17 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_27.addItem(spacerItem17)
self.verticalLayout_29.addLayout(self.horizontalLayout_27)
self.horizontalLayout_22 = QtWidgets.QHBoxLayout()
self.horizontalLayout_22.setObjectName("horizontalLayout_22")
spacerItem18 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_22.addItem(spacerItem18)
self.main_editGeo_Lat = QtWidgets.QLineEdit(self.groupBox_11)
self.main_editGeo_Lat.setMaximumSize(QtCore.QSize(110, 16777215))
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(9)
font.setBold(False)
font.setWeight(50)
self.main_editGeo_Lat.setFont(font)
self.main_editGeo_Lat.setText("")
self.main_editGeo_Lat.setAlignment(QtCore.Qt.AlignCenter)
self.main_editGeo_Lat.setClearButtonEnabled(False)
self.main_editGeo_Lat.setObjectName("main_editGeo_Lat")
self.horizontalLayout_22.addWidget(self.main_editGeo_Lat)
spacerItem19 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_22.addItem(spacerItem19)
self.main_editGeo_Lon = QtWidgets.QLineEdit(self.groupBox_11)
self.main_editGeo_Lon.setMaximumSize(QtCore.QSize(110, 16777215))
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(9)
font.setBold(False)
font.setWeight(50)
self.main_editGeo_Lon.setFont(font)
self.main_editGeo_Lon.setText("")
self.main_editGeo_Lon.setAlignment(QtCore.Qt.AlignCenter)
self.main_editGeo_Lon.setClearButtonEnabled(False)
self.main_editGeo_Lon.setObjectName("main_editGeo_Lon")
self.horizontalLayout_22.addWidget(self.main_editGeo_Lon)
spacerItem20 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_22.addItem(spacerItem20)
self.verticalLayout_29.addLayout(self.horizontalLayout_22)
self.horizontalLayout_21 = QtWidgets.QHBoxLayout()
self.horizontalLayout_21.setObjectName("horizontalLayout_21")
spacerItem21 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_21.addItem(spacerItem21)
self.main_pb_ShowGeoInfo = QtWidgets.QPushButton(self.groupBox_11)
self.main_pb_ShowGeoInfo.setMinimumSize(QtCore.QSize(100, 23))
self.main_pb_ShowGeoInfo.setMaximumSize(QtCore.QSize(100, 23))
self.main_pb_ShowGeoInfo.setAutoRepeat(False)
self.main_pb_ShowGeoInfo.setAutoDefault(False)
self.main_pb_ShowGeoInfo.setDefault(False)
self.main_pb_ShowGeoInfo.setFlat(False)
self.main_pb_ShowGeoInfo.setObjectName("main_pb_ShowGeoInfo")
self.horizontalLayout_21.addWidget(self.main_pb_ShowGeoInfo)
self.main_pb_clearGeoInfo = QtWidgets.QPushButton(self.groupBox_11)
self.main_pb_clearGeoInfo.setMinimumSize(QtCore.QSize(23, 23))
self.main_pb_clearGeoInfo.setMaximumSize(QtCore.QSize(23, 23))
self.main_pb_clearGeoInfo.setText("")
self.main_pb_clearGeoInfo.setObjectName("main_pb_clearGeoInfo")
self.horizontalLayout_21.addWidget(self.main_pb_clearGeoInfo)
spacerItem22 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_21.addItem(spacerItem22)
self.verticalLayout_29.addLayout(self.horizontalLayout_21)
self.verticalLayout_53.addLayout(self.verticalLayout_29)
self.main_tbShowGeoObjectInfo = QtWidgets.QTextBrowser(self.groupBox_11)
font = QtGui.QFont()
font.setPointSize(9)
self.main_tbShowGeoObjectInfo.setFont(font)
self.main_tbShowGeoObjectInfo.setObjectName("main_tbShowGeoObjectInfo")
self.verticalLayout_53.addWidget(self.main_tbShowGeoObjectInfo)
self.verticalLayout_54.addLayout(self.verticalLayout_53)
self.horizontalLayout_37.addWidget(self.splitter_7)
self.horizontalLayout_38.addWidget(self.splitter_9)
self.tabMain.addTab(self.tabCommon, "")
self.tabGSM = QtWidgets.QWidget()
self.tabGSM.setObjectName("tabGSM")
self.horizontalLayout_18 = QtWidgets.QHBoxLayout(self.tabGSM)
self.horizontalLayout_18.setObjectName("horizontalLayout_18")
self.splitter_13 = QtWidgets.QSplitter(self.tabGSM)
self.splitter_13.setOrientation(QtCore.Qt.Horizontal)
self.splitter_13.setObjectName("splitter_13")
self.splitter_12 = QtWidgets.QSplitter(self.splitter_13)
self.splitter_12.setOrientation(QtCore.Qt.Horizontal)
self.splitter_12.setObjectName("splitter_12")
self.splitter_10 = QtWidgets.QSplitter(self.splitter_12)
self.splitter_10.setOrientation(QtCore.Qt.Vertical)
self.splitter_10.setObjectName("splitter_10")
self.groupBox_10 = QtWidgets.QGroupBox(self.splitter_10)
self.groupBox_10.setObjectName("groupBox_10")
self.horizontalLayout_32 = QtWidgets.QHBoxLayout(self.groupBox_10)
self.horizontalLayout_32.setObjectName("horizontalLayout_32")
self.verticalLayout_55 = QtWidgets.QVBoxLayout()
self.verticalLayout_55.setObjectName("verticalLayout_55")
self.verticalLayout_24 = QtWidgets.QVBoxLayout()
self.verticalLayout_24.setObjectName("verticalLayout_24")
self.main_editIMSI = QtWidgets.QLineEdit(self.groupBox_10)
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.main_editIMSI.setFont(font)
self.main_editIMSI.setText("")
self.main_editIMSI.setAlignment(QtCore.Qt.AlignCenter)
self.main_editIMSI.setClearButtonEnabled(True)
self.main_editIMSI.setObjectName("main_editIMSI")
self.verticalLayout_24.addWidget(self.main_editIMSI)
self.horizontalLayout_31 = QtWidgets.QHBoxLayout()
self.horizontalLayout_31.setObjectName("horizontalLayout_31")
spacerItem23 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_31.addItem(spacerItem23)
self.main_pb_ShowIMSI = QtWidgets.QPushButton(self.groupBox_10)
self.main_pb_ShowIMSI.setMinimumSize(QtCore.QSize(100, 0))
self.main_pb_ShowIMSI.setMaximumSize(QtCore.QSize(100, 23))
self.main_pb_ShowIMSI.setAutoRepeat(False)
self.main_pb_ShowIMSI.setAutoDefault(False)
self.main_pb_ShowIMSI.setDefault(False)
self.main_pb_ShowIMSI.setFlat(False)
self.main_pb_ShowIMSI.setObjectName("main_pb_ShowIMSI")
self.horizontalLayout_31.addWidget(self.main_pb_ShowIMSI)
self.main_pb_clearIMSI = QtWidgets.QPushButton(self.groupBox_10)
self.main_pb_clearIMSI.setMinimumSize(QtCore.QSize(23, 23))
self.main_pb_clearIMSI.setMaximumSize(QtCore.QSize(23, 23))
self.main_pb_clearIMSI.setText("")
self.main_pb_clearIMSI.setObjectName("main_pb_clearIMSI")
self.horizontalLayout_31.addWidget(self.main_pb_clearIMSI)
spacerItem24 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_31.addItem(spacerItem24)
self.verticalLayout_24.addLayout(self.horizontalLayout_31)
self.verticalLayout_55.addLayout(self.verticalLayout_24)
self.main_tbShowIMSI = QtWidgets.QTextBrowser(self.groupBox_10)
font = QtGui.QFont()
font.setPointSize(9)
self.main_tbShowIMSI.setFont(font)
self.main_tbShowIMSI.setObjectName("main_tbShowIMSI")
self.verticalLayout_55.addWidget(self.main_tbShowIMSI)
self.horizontalLayout_32.addLayout(self.verticalLayout_55)
self.groupBox_13 = QtWidgets.QGroupBox(self.splitter_10)
self.groupBox_13.setObjectName("groupBox_13")
self.horizontalLayout_39 = QtWidgets.QHBoxLayout(self.groupBox_13)
self.horizontalLayout_39.setObjectName("horizontalLayout_39")
self.verticalLayout_56 = QtWidgets.QVBoxLayout()
self.verticalLayout_56.setObjectName("verticalLayout_56")
self.verticalLayout_25 = QtWidgets.QVBoxLayout()
self.verticalLayout_25.setObjectName("verticalLayout_25")
self.main_editIMEI = QtWidgets.QLineEdit(self.groupBox_13)
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.main_editIMEI.setFont(font)
self.main_editIMEI.setText("")
self.main_editIMEI.setAlignment(QtCore.Qt.AlignCenter)
self.main_editIMEI.setClearButtonEnabled(True)
self.main_editIMEI.setObjectName("main_editIMEI")
self.verticalLayout_25.addWidget(self.main_editIMEI)
self.horizontalLayout_40 = QtWidgets.QHBoxLayout()
self.horizontalLayout_40.setObjectName("horizontalLayout_40")
spacerItem25 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_40.addItem(spacerItem25)
self.main_pb_ShowIMEI = QtWidgets.QPushButton(self.groupBox_13)
self.main_pb_ShowIMEI.setMinimumSize(QtCore.QSize(100, 0))
self.main_pb_ShowIMEI.setMaximumSize(QtCore.QSize(100, 23))
self.main_pb_ShowIMEI.setAutoRepeat(False)
self.main_pb_ShowIMEI.setAutoDefault(False)
self.main_pb_ShowIMEI.setDefault(False)
self.main_pb_ShowIMEI.setFlat(False)
self.main_pb_ShowIMEI.setObjectName("main_pb_ShowIMEI")
self.horizontalLayout_40.addWidget(self.main_pb_ShowIMEI)
self.main_pb_clearIMEI = QtWidgets.QPushButton(self.groupBox_13)
self.main_pb_clearIMEI.setMinimumSize(QtCore.QSize(23, 23))
self.main_pb_clearIMEI.setMaximumSize(QtCore.QSize(23, 23))
self.main_pb_clearIMEI.setText("")
self.main_pb_clearIMEI.setObjectName("main_pb_clearIMEI")
self.horizontalLayout_40.addWidget(self.main_pb_clearIMEI)
spacerItem26 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_40.addItem(spacerItem26)
self.verticalLayout_25.addLayout(self.horizontalLayout_40)
self.verticalLayout_56.addLayout(self.verticalLayout_25)
self.main_tbShowIMEI = QtWidgets.QTextBrowser(self.groupBox_13)
font = QtGui.QFont()
font.setPointSize(9)
self.main_tbShowIMEI.setFont(font)
self.main_tbShowIMEI.setObjectName("main_tbShowIMEI")
self.verticalLayout_56.addWidget(self.main_tbShowIMEI)
self.horizontalLayout_39.addLayout(self.verticalLayout_56)
self.splitter_11 = QtWidgets.QSplitter(self.splitter_12)
self.splitter_11.setOrientation(QtCore.Qt.Vertical)
self.splitter_11.setObjectName("splitter_11")
self.groupBox_15 = QtWidgets.QGroupBox(self.splitter_11)
self.groupBox_15.setObjectName("groupBox_15")
self.horizontalLayout_44 = QtWidgets.QHBoxLayout(self.groupBox_15)
self.horizontalLayout_44.setObjectName("horizontalLayout_44")
self.verticalLayout_57 = QtWidgets.QVBoxLayout()
self.verticalLayout_57.setObjectName("verticalLayout_57")
self.verticalLayout_26 = QtWidgets.QVBoxLayout()
self.verticalLayout_26.setObjectName("verticalLayout_26")
self.main_editISDN = QtWidgets.QLineEdit(self.groupBox_15)
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.main_editISDN.setFont(font)
self.main_editISDN.setText("")
self.main_editISDN.setAlignment(QtCore.Qt.AlignCenter)
self.main_editISDN.setClearButtonEnabled(True)
self.main_editISDN.setObjectName("main_editISDN")
self.verticalLayout_26.addWidget(self.main_editISDN)
self.horizontalLayout_43 = QtWidgets.QHBoxLayout()
self.horizontalLayout_43.setObjectName("horizontalLayout_43")
spacerItem27 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_43.addItem(spacerItem27)
self.main_pb_ShowISDN = QtWidgets.QPushButton(self.groupBox_15)
self.main_pb_ShowISDN.setMinimumSize(QtCore.QSize(100, 0))
self.main_pb_ShowISDN.setMaximumSize(QtCore.QSize(100, 23))
self.main_pb_ShowISDN.setAutoRepeat(False)
self.main_pb_ShowISDN.setAutoDefault(False)
self.main_pb_ShowISDN.setDefault(False)
self.main_pb_ShowISDN.setFlat(False)
self.main_pb_ShowISDN.setObjectName("main_pb_ShowISDN")
self.horizontalLayout_43.addWidget(self.main_pb_ShowISDN)
self.main_pb_clearISDN = QtWidgets.QPushButton(self.groupBox_15)
self.main_pb_clearISDN.setMinimumSize(QtCore.QSize(23, 23))
self.main_pb_clearISDN.setMaximumSize(QtCore.QSize(23, 23))
self.main_pb_clearISDN.setText("")
self.main_pb_clearISDN.setObjectName("main_pb_clearISDN")
self.horizontalLayout_43.addWidget(self.main_pb_clearISDN)
spacerItem28 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_43.addItem(spacerItem28)
self.verticalLayout_26.addLayout(self.horizontalLayout_43)
self.verticalLayout_57.addLayout(self.verticalLayout_26)
self.main_tbShowMSISDN = QtWidgets.QTextBrowser(self.groupBox_15)
font = QtGui.QFont()
font.setPointSize(9)
self.main_tbShowMSISDN.setFont(font)
self.main_tbShowMSISDN.setObjectName("main_tbShowMSISDN")
self.verticalLayout_57.addWidget(self.main_tbShowMSISDN)
self.horizontalLayout_44.addLayout(self.verticalLayout_57)
self.groupBox_16 = QtWidgets.QGroupBox(self.splitter_11)
self.groupBox_16.setObjectName("groupBox_16")
self.verticalLayout_11 = QtWidgets.QVBoxLayout(self.groupBox_16)
self.verticalLayout_11.setObjectName("verticalLayout_11")
self.verticalLayout_58 = QtWidgets.QVBoxLayout()
self.verticalLayout_58.setObjectName("verticalLayout_58")
self.verticalLayout_27 = QtWidgets.QVBoxLayout()
self.verticalLayout_27.setObjectName("verticalLayout_27")
self.main_edit_Wifi = QtWidgets.QLineEdit(self.groupBox_16)
font = QtGui.QFont()
font.setFamily("Courier New")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.main_edit_Wifi.setFont(font)
self.main_edit_Wifi.setText("")
self.main_edit_Wifi.setAlignment(QtCore.Qt.AlignCenter)
self.main_edit_Wifi.setClearButtonEnabled(True)
self.main_edit_Wifi.setObjectName("main_edit_Wifi")
self.verticalLayout_27.addWidget(self.main_edit_Wifi)
self.horizontalLayout_45 = QtWidgets.QHBoxLayout()
self.horizontalLayout_45.setObjectName("horizontalLayout_45")
spacerItem29 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_45.addItem(spacerItem29)
self.main_pb_ShowWifi = QtWidgets.QPushButton(self.groupBox_16)
self.main_pb_ShowWifi.setMinimumSize(QtCore.QSize(100, 0))
self.main_pb_ShowWifi.setMaximumSize(QtCore.QSize(100, 23))
self.main_pb_ShowWifi.setAutoRepeat(False)
self.main_pb_ShowWifi.setAutoDefault(False)
self.main_pb_ShowWifi.setDefault(False)
self.main_pb_ShowWifi.setFlat(False)
self.main_pb_ShowWifi.setObjectName("main_pb_ShowWifi")
self.horizontalLayout_45.addWidget(self.main_pb_ShowWifi)
self.main_pb_clearWifi = QtWidgets.QPushButton(self.groupBox_16)
self.main_pb_clearWifi.setMinimumSize(QtCore.QSize(23, 23))
self.main_pb_clearWifi.setMaximumSize(QtCore.QSize(23, 23))
self.main_pb_clearWifi.setText("")
self.main_pb_clearWifi.setObjectName("main_pb_clearWifi")
self.horizontalLayout_45.addWidget(self.main_pb_clearWifi)
spacerItem30 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_45.addItem(spacerItem30)
self.verticalLayout_27.addLayout(self.horizontalLayout_45)
self.verticalLayout_58.addLayout(self.verticalLayout_27)
| |
that are present in both.
The Jaccard distance is a simple measure of the
dissimilarity between two StructureGraphs (ignoring
edge weights), and is defined by 1 - (size of the
intersection / size of the union) of the sets of
edges. This is returned with key 'dist'.
Important note: all node indices are in terms
of the StructureGraph this method is called
from, not the 'other' StructureGraph: there
is no guarantee the node indices will be the
same if the underlying Structures are ordered
differently.
:param other: StructureGraph
:param strict: if False, will compare bonds
from different Structures, with node indices
replaced by Species strings, will not count
number of occurrences of bonds
:return:
"""
if self.structure != other.structure and strict:
return ValueError("Meaningless to compare StructureGraphs if " "corresponding Structures are different.")
if strict:
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
mapping = {tuple(site.frac_coords): self.structure.index(site) for site in other.structure}
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])
edges = {(u, v, d["to_jimage"]) for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(u, v, d["to_jimage"]) for u, v, d in other_sorted.graph.edges(keys=False, data=True)}
else:
edges = {
(str(self.structure[u].specie), str(self.structure[v].specie))
for u, v, d in self.graph.edges(keys=False, data=True)
}
edges_other = {
(str(other.structure[u].specie), str(other.structure[v].specie))
for u, v, d in other.graph.edges(keys=False, data=True)
}
if len(edges) == 0 and len(edges_other) == 0:
jaccard_dist = 0 # by definition
else:
jaccard_dist = 1 - len(edges.intersection(edges_other)) / len(edges.union(edges_other))
return {
"self": edges - edges_other,
"other": edges_other - edges,
"both": edges.intersection(edges_other),
"dist": jaccard_dist,
}
def get_subgraphs_as_molecules(self, use_weights=False):
"""
Retrieve subgraphs as molecules, useful for extracting
molecules from periodic crystals.
Will only return unique molecules, not any duplicates
present in the crystal (a duplicate defined as an
isomorphic subgraph).
:param use_weights (bool): If True, only treat subgraphs
as isomorphic if edges have the same weights. Typically,
this means molecules will need to have the same bond
lengths to be defined as duplicates, otherwise bond
lengths can differ. This is a fairly robust approach,
but will treat e.g. enantiomers as being duplicates.
:return: list of unique Molecules in Structure
"""
# creating a supercell is an easy way to extract
# molecules (and not, e.g., layers of a 2D crystal)
# without adding extra logic
if getattr(self, "_supercell_sg", None) is None:
self._supercell_sg = supercell_sg = self * (3, 3, 3)
# make undirected to find connected subgraphs
supercell_sg.graph = nx.Graph(supercell_sg.graph)
# find subgraphs
all_subgraphs = [supercell_sg.graph.subgraph(c) for c in nx.connected_components(supercell_sg.graph)]
# discount subgraphs that lie across *supercell* boundaries
# these will subgraphs representing crystals
molecule_subgraphs = []
for subgraph in all_subgraphs:
intersects_boundary = any(d["to_jimage"] != (0, 0, 0) for u, v, d in subgraph.edges(data=True))
if not intersects_boundary:
molecule_subgraphs.append(nx.MultiDiGraph(subgraph))
# add specie names to graph to be able to test for isomorphism
for subgraph in molecule_subgraphs:
for n in subgraph:
subgraph.add_node(n, specie=str(supercell_sg.structure[n].specie))
# now define how we test for isomorphism
def node_match(n1, n2):
return n1["specie"] == n2["specie"]
def edge_match(e1, e2):
if use_weights:
return e1["weight"] == e2["weight"]
return True
# prune duplicate subgraphs
unique_subgraphs = []
for subgraph in molecule_subgraphs:
already_present = [
nx.is_isomorphic(subgraph, g, node_match=node_match, edge_match=edge_match) for g in unique_subgraphs
]
if not any(already_present):
unique_subgraphs.append(subgraph)
# get Molecule objects for each subgraph
molecules = []
for subgraph in unique_subgraphs:
coords = [supercell_sg.structure[n].coords for n in subgraph.nodes()]
species = [supercell_sg.structure[n].specie for n in subgraph.nodes()]
molecule = Molecule(species, coords)
# shift so origin is at center of mass
molecule = molecule.get_centered_molecule()
molecules.append(molecule)
return molecules
class MolGraphSplitError(Exception):
"""
Raised when a molecule graph is failed to split into two disconnected
subgraphs
"""
pass
class MoleculeGraph(MSONable):
"""
This is a class for annotating a Molecule with
bond information, stored in the form of a graph. A "bond" does
not necessarily have to be a chemical bond, but can store any
kind of information that connects two Sites.
"""
def __init__(self, molecule, graph_data=None):
"""
If constructing this class manually, use the `with_empty_graph`
method or `with_local_env_strategy` method (using an algorithm
provided by the `local_env` module, such as O'Keeffe).
This class that contains connection information:
relationships between sites represented by a Graph structure,
and an associated structure object.
This class uses the NetworkX package to store and operate
on the graph itself, but contains a lot of helper methods
to make associating a graph with a given molecule easier.
Use cases for this include storing bonding information,
NMR J-couplings, Heisenberg exchange parameters, etc.
:param molecule: Molecule object
:param graph_data: dict containing graph information in
dict format (not intended to be constructed manually,
see as_dict method for format)
"""
if isinstance(molecule, MoleculeGraph):
# just make a copy from input
graph_data = molecule.as_dict()["graphs"]
self.molecule = molecule
self.graph = nx.readwrite.json_graph.adjacency_graph(graph_data)
# tidy up edge attr dicts, reading to/from json duplicates
# information
for u, v, k, d in self.graph.edges(keys=True, data=True):
if "id" in d:
del d["id"]
if "key" in d:
del d["key"]
# ensure images are tuples (conversion to lists happens
# when serializing back from json), it's important images
# are hashable/immutable
if "to_jimage" in d:
d["to_jimage"] = tuple(d["to_jimage"])
if "from_jimage" in d:
d["from_jimage"] = tuple(d["from_jimage"])
self.set_node_attributes()
@classmethod
def with_empty_graph(cls, molecule, name="bonds", edge_weight_name=None, edge_weight_units=None):
"""
Constructor for MoleculeGraph, returns a MoleculeGraph
object with an empty graph (no edges, only nodes defined
that correspond to Sites in Molecule).
:param molecule (Molecule):
:param name (str): name of graph, e.g. "bonds"
:param edge_weight_name (str): name of edge weights,
e.g. "bond_length" or "exchange_constant"
:param edge_weight_units (str): name of edge weight units
e.g. "Å" or "eV"
:return (MoleculeGraph):
"""
if edge_weight_name and (edge_weight_units is None):
raise ValueError(
"Please specify units associated "
"with your edge weights. Can be "
"empty string if arbitrary or "
"dimensionless."
)
# construct graph with one node per site
# graph attributes don't change behavior of graph,
# they're just for book-keeping
graph = nx.MultiDiGraph(
edge_weight_name=edge_weight_name,
edge_weight_units=edge_weight_units,
name=name,
)
graph.add_nodes_from(range(len(molecule)))
graph_data = json_graph.adjacency_data(graph)
return cls(molecule, graph_data=graph_data)
@staticmethod
def with_edges(molecule, edges):
"""
Constructor for MoleculeGraph, using pre-existing or pre-defined edges
with optional edge parameters.
:param molecule: Molecule object
:param edges: dict representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. Props should be None if no
additional properties are to be specified.
:return: mg, a MoleculeGraph
"""
mg = MoleculeGraph.with_empty_graph(molecule, name="bonds", edge_weight_name="weight", edge_weight_units="")
for edge, props in edges.items():
try:
from_index = edge[0]
to_index = edge[1]
except TypeError:
raise ValueError("Edges must be given as (from_index, to_index)" "tuples")
if props is not None:
if "weight" in props.keys():
weight = props["weight"]
del props["weight"]
else:
weight = None
if len(props.items()) == 0:
props = None
else:
weight = None
nodes = mg.graph.nodes
if not (from_index in nodes and to_index in nodes):
raise ValueError(
"Edges cannot be added if nodes are not" " present in the graph. Please check your" " indices."
)
mg.add_edge(from_index, to_index, weight=weight, edge_properties=props)
mg.set_node_attributes()
return mg
@staticmethod
def with_local_env_strategy(molecule, strategy):
"""
Constructor for MoleculeGraph, using a strategy
from :Class: `pymatgen.analysis.local_env`.
:param molecule: Molecule object
:param strategy: an instance of a
:Class: `pymatgen.analysis.local_env.NearNeighbors` object
:return: mg, a MoleculeGraph
"""
if not strategy.molecules_allowed:
raise ValueError(
"Chosen strategy is not designed for use with molecules! " "Please choose another strategy."
)
extend_structure = strategy.extend_structure_molecules
mg = MoleculeGraph.with_empty_graph(molecule, name="bonds", edge_weight_name="weight", edge_weight_units="")
# NearNeighbor classes only (generally) work with structures
# molecules have to be boxed first
coords = molecule.cart_coords
if extend_structure:
a = max(coords[:, 0]) - min(coords[:, 0]) + 100
b = max(coords[:, 1]) - min(coords[:, 1]) + 100
c = max(coords[:, 2]) - min(coords[:, 2]) + 100
structure = molecule.get_boxed_structure(a, b, c, no_cross=True, reorder=False)
else:
structure = None
for n in range(len(molecule)):
if structure is None:
neighbors = strategy.get_nn_info(molecule, n)
else:
neighbors = strategy.get_nn_info(structure, n)
for neighbor in neighbors:
# all bonds in molecules should not cross
# (artificial) periodic boundaries
if not | |
hight, n_vertical, n_horizental)
# details = [0,0,0]
# details[0] = node_index
# details[1] = parent_index
# details[2] = hight
# details[3] = n_vertical
# details[4] = n_horizental
# p_sym = 1
# return p_sym, details
# def do_vertical_stretching(self, neuron):
# """
# In one of the segments that coming out from a branching points will be stretched.
# """
# (branch_index,) = np.where(neuron.branch_order==2)
# (end_nodes,) = np.where(neuron.branch_order==0)
# nodes = np.append(branch_index,end_nodes)
# parents = neuron.parent_index_for_node_subset(nodes)
# n = np.floor(nodes.shape[0]*np.random.rand()).astype(int)
# p = np.exp(np.random.normal() * self.horizental_stretch)
# node_index = nodes[n]
# parent_index = parents[n]
# neuron.vertical_stretch(node_index, parent_index, p)
# details = [0,0,0]
# details[0] = node_index
# details[1] = parent_index
# details[2] = p
# p_sym = 1
# return p_sym, details
# def do_horizental_stretching(self, neuron):
# (branch_index,) = np.where(neuron.branch_order==2)
# (end_nodes,) = np.where(neuron.branch_order==0)
# nodes = np.append(branch_index,end_nodes)
# parents = neuron.parent_index_for_node_subset(nodes)
# n = np.floor(nodes.shape[0]*np.random.rand()).astype(int)
# p = np.exp(np.random.normal() * self.horizental_stretch)
# node_index = nodes[n]
# parent_index = parents[n]
# neuron.horizental_stretch(node_index, parent_index, p)
# details = [0,0,0]
# details[0] = node_index
# details[1] = parent_index
# details[2] = p
# p_sym = 1
# return p_sym, details
# def undo_MCMC(self, per, details):
# """
# when per == 0, details[0] is 'ext' of 'remove'. If it is 'ext', then details[1] is node_index.
# if it is 'remove', details[1] = parent, details[2] = location, details[3] = ratio
# """
# if per == 'extension/reduction': # undo extension/reduction
# if(len(details) !=0):
# if(details[0] == 'ext'):
# self.undo_ext(self.neuron, details[1])
# if(details[0] == 'remove'):
# self.undo_red(self.neuron, details[1], details[2], details[3])
# if per == 'extension/reduction end points':
# if(len(details) !=0):
# if(details[0] == 'ext'):
# self.undo_ext(self.neuron, details[1])
# if(details[0] == 'remove'):
# self.undo_red(self.neuron, details[1], details[2], details[3])
# if per == 'location': # undo location
# if( ~ self.neuron.is_soma()):
# self.undo_location(self.neuron, details[0], details[1], details[2], details[3]) # this function makes a location perturbation on the neuron
# if per == 'location for important point': # undo location
# if( ~ self.neuron.is_soma()):
# self.undo_location_important(self.neuron, details[0], details[1], details[2], details[3]) # this function makes a location perturbation on the neuron
# if per == 'location toward end':
# if( ~ self.neuron.is_soma()):
# self.undo_location_toward_end_nodes(self.neuron, details[0], details[1], details[2], details[3])
# if per == 'diameter': # undo diameter
# if( ~ self.neuron.is_soma()): # To make sure that there is at least one node in the no_soma list
# self.undo_diameter(self.neuron, details[0], details[1])
# if per == 'rotation for any node':
# self.undo_rotation(self.neuron, details[0], details[1] )
# if per == 'rotation for branching':
# self.undo_rotation_from_branch(self.neuron, details[0], details[1] )
# if per == 'sliding certain in distance': # undo sliding in certain distance
# if(details[0] != 0):
# self.undo_sliding(self.neuron, details[0], details[1])
# if per == 'sliding for branching node': # undo sliding for branch
# if(details[0] != 0):
# self.undo_sliding(self.neuron, details[0], details[1])
# if per == 'sliding general': # undo sliding general
# if(details[0] != 0):
# self.undo_sliding_general(self.neuron, details[0], details[1])
# if per == 'sliding for branching node certain distance': # do sliding only for branch
# if(details[0] != 0):
# self.undo_sliding(self.neuron, details[0], details[1])
# if per == 'rescale toward end':
# self.undo_rescale_toward_end(self.neuron, details[0],details[1])
# if per == 'stretching vertical':
# self.undo_vertical_stretching(self.neuron, details[0], details[1], details[2])
# if per == 'stretching horizental':
# self.undo_horizental_stretching(self.neuron, details[0], details[1], details[2])
# if per == 'sinusidal':
# self.undo_sinusidal_wave(self.neuron, details[0], details[1], details[2], details[3], details[4])
# def undo_sinusidal_wave(self, neuron, node, parent, hight, n_vertical, n_horizental):
# neuron.sinudal(node_index, parent_index, -hight, n_vertical, n_horizental)
# def undo_location(self, neuron, index, x, y, z):
# neuron.change_location(index, - np.array([x,y,z]))
# def undo_location_toward_end_nodes(self, neuron, index, x, y, z):
# neuron.change_location_toward_end_nodes(index, - np.array([x,y,z]))
# def undo_location_important(self, neuron, index, x, y, z):
# neuron.change_location_important(index, - np.array([x,y,z]))
# def undo_diameter(self, neuron, index, ratio):
# neuron.change_diameter(index, 1.0 / ratio)
# def undo_ext(self, neuron, index_node):
# neuron.remove_node(index_node)
# def undo_red(self, neuron, parent, location, ratio):
# neuron.extend_node(parent, location, ratio)
# def undo_rotation(self, neuron, node, matrix):
# neuron.rotate(node, inv(matrix))
# def undo_rotation_from_branch(self, neuron, node, matrix):
# neuron.rotate(node, inv(matrix))
# def undo_sliding(self,
# neuron,
# child_of_branching_node_index,
# order_one_node_index):
# neuron.slide(child_of_branching_node_index, order_one_node_index)
# def undo_sliding_general(self,
# neuron,
# child_of_branching_node_index,
# order_one_node_index):
# neuron.slide(child_of_branching_node_index, order_one_node_index)
# def undo_rescale_toward_end(self, neuron, node, rescale):
# neuron.rescale_toward_end(node, 1./rescale)
# def undo_vertical_stretching(self, neuron, node, parent, scale):
# neuron.vertical_stretch(node, parent, 1./scale)
# def undo_horizental_stretching(self, neuron, node, parent, scale):
# neuron.horizental_stretch(node, parent, 1./scale)
def set_measure(self, features_distribution):
"""
Set a probability distribution on neuron by looking at each features.
To run the algorithm, a set of features is needed to make a probability
distribution on the set of all neurons.
Parameters
----------
features_distribution: dict
the dictionary of each distributions. In the case that each features
is modeled by Gaussian, features_distribution has two keys: 'mean'
and 'std'. Inside each of these keys, there is another
dictionary with the name of features and the value.
For example:
features_distribution =
{'mean': {'branch_angle': 2.4,'local_angle': 2.7}
'std': {'branch_angle': .2,'local_angle': .2}}
"""
self.measure = features_distribution
self.list_features = features_distribution['mean'].keys()
self.mean_measure = np.array([])
self.std_measure = np.array([])
self.sd_measure = np.array([])
self.n_features = len(self.list_features)
for ind in self.list_features:
self.mean_measure = \
np.append(self.mean_measure,
float(features_distribution['mean'][ind]))
self.std_measure = \
np.append(self.std_measure, float(features_distribution['std'][ind]) ** 2)
self.sd_measure = \
np.append(self.sd_measure,
float(features_distribution['std'][ind]))
self.trend = np.zeros([len(features_distribution['mean']), self.ite])
self.trend_normal = np.zeros([len(features_distribution['mean']), self.ite])
def set_probability(self, list_values):
"""
set the probability for perturbation
list_values : dict
"""
l = sum(list_values.values())
self.list_values = {}
for i in list_values.keys():
self.list_values[i] = list_values[i]/l
self.p_prob = np.array(self.list_values.values())
self.p_list = self.list_values.keys()
self._consum_prob = np.zeros(len(list_values.keys()))
for i in range(self.p_prob.shape[0]):
self._consum_prob[i] = sum(self.p_prob[:i+1])
def set_real_neuron(self,
neuron,
hist_features,
value_features,
vec_value):
"""
Set the desire features by the features of given neuron. No dependency.
"""
self.mean_hist, self.mean_value, self.mean_vec_value = \
dis_util.get_feature_neuron(neuron=neuron,
hist_features=hist_features,
value_features=value_features,
vec_value=vec_value)
def set_database(self, database):
self.mean_hist = deepcopy(database.mean_hist)
self.mean_value = deepcopy(database.mean_value)
self.mean_vec_value = deepcopy(database.mean_vec_value)
self.std_hist = deepcopy(database.std_hist)
self.std_value = deepcopy(database.std_value)
self.std_vec_value = deepcopy(database.std_vec_value)
def set_feature_normalizer(self, normlizer):
for name in self.std_hist.keys():
self.std_hist[name] = (1./normlizer[name]) * self.std_hist[name]
for name in self.std_value.keys():
self.std_value[name] = (1./normlizer[name]) * self.std_value[name]
for name in self.std_vec_value.keys():
self.std_vec_value[name] = (1./normlizer[name]) * self.std_vec_value[name]
def pdf_normal(self ,x, dim):
"""
Return the probability density at the point x of a normal distribution with mean = 0
and variance = s
"""
rv = multivariate_normal(np.zeros(dim), self.var*np.eye(dim))
return rv.pdf(x)
# should notice to the dimentionality of the constant return (self.cte_gauss/s)*np.power(np.e,-(x*x).sum()/(s*s))
def normal(self, dim):
random_point = np.random.normal(0, self.var, dim)
rv = multivariate_normal(np.zeros(dim), self.var*np.eye(dim))
pdf = rv.pdf(random_point)
return random_point, pdf
def far_nodes(self, neuron, node_index, threshold):
x = neuron.location[0, :] - neuron.location[0, node_index]
y = neuron.location[1, :] - neuron.location[1, node_index]
z = neuron.location[2, :] - neuron.location[2, node_index]
(index,) = np.where(x**2 + y**2 + z**2 > threshold**2)
return index
def random_vector(self, mean, var):
vec = np.random.normal(size = 3)
vec = vec/LA.norm(vec,2)
l = -1
while(l<0):
l = mean + var * np.random.normal()
vec = vec*l
return vec
def get_random_element_for_add_remove(self, neuron):
(ind1,) = np.where(neuron.branch_order[neuron.n_soma:] == 1)
whole = len(neuron.nodes_list) - neuron.n_soma
total_number = len(ind1) + whole
a = np.floor(total_number * np.random.rand())
if(a < whole):
random_node = neuron.nodes_list[neuron.n_soma + a]
state = 'add'
else:
random_node = neuron.nodes_list[ind1[a - whole]]
state = 'remove'
return total_number ,random_node, state
def random_rotation(self, vector, mu, kappa, n):
"""
input: mu, kappa, n `float64`
Return three vectors: the first one is close to the given vector; these three vectors make a complete
set of orthogonal space for 3D space.
The first vector is choosen accroding to a distribution for the
phi (the angle between the given vector and choosen one)
and unifor distribution for the theta (the angle of projection of the choosen vector over the orthogonal plane)
the phi angle comes from von Mises distribution.
"""
vector = vector/LA.norm(vector,2)
a = np.random.normal(0, 1, 3)
a = a - sum(a*vector)*vector
a = a/LA.norm(a,2)
phi = np.random.vonmises(mu, kappa, 1)
normal_vec = np.sin(phi)*a + np.cos(phi)*vector
length = np.random.chisquare(n,1)/n
random_point = length*normal_vec
pdf = (.5/np.pi)*(chi2.pdf(n*length,n)*n)*(vonmises.pdf(np.cos(phi), kappa))
return random_point, pdf
def pdf_random_rotation(self, x, v, mu, kappa, n):
"""
Gives back the probability of observing the vector x, such that its angle with v is coming from a Von Mises
distribution with k = self.kappa and its length coming form chi squared distribution with the parameter n.
"""
v = v/LA.norm(v,2)
x = x/LA.norm(x,2)
ang = sum(v*x)
return (.5/np.pi)*(chi2.pdf(n*LA.norm(x,2),n)*n)*(vonmises.pdf(ang, kappa))
def unifrom(self,size):
return size*(2*np.random.rand(1,3)-1)
def random_unitary_basis(self, kappa):
#Ax1 = self.random_2d_rotation_in_3d('x', kappa)
#Ay1 = self.random_2d_rotation_in_3d('y', kappa)
Az1 = self.random_2d_rotation_in_3d('z', kappa)
#Ax2 = self.random_2d_rotation_in_3d('x', kappa)
#Ay2 = self.random_2d_rotation_in_3d('y', kappa)
#Az2 = self.random_2d_rotation_in_3d('z', kappa)
#A = np.dot(np.dot(Ax1,Ay1),Az1)
#A = np.dot(np.dot(Az2,Ay2),Ax2)
#A = np.dot(Ax1,Ay1)
#B = np.dot(Ay2,Ax2)
#m = np.dot(A,B)
return Az1
def random_2d_rotation_in_3d(self, axis, kappa):
theta = np.random.vonmises(0, kappa, 1)
A = np.eye(3)
if axis is 'z':
A[0,0] = np.cos(theta)
A[1,0] = np.sin(theta)
A[0,1] = - np.sin(theta)
A[1,1] = np.cos(theta)
return A
if axis is 'y':
A[0,0] = np.cos(theta)
A[2,0] = np.sin(theta)
A[0,2] = - np.sin(theta)
A[2,2] = np.cos(theta)
return A
| |
#!/opt/anaconda/bin/python
# -*- coding: utf-8 -*-
# Unfortunately the `which` way of calling python can't accept command-line arguments.
"""
Created on Mon Nov 03 16:13:48 2014
@author: <NAME>
@email: <EMAIL> OR <EMAIL>
A selection of alignment routines designed for registering and summing stacks
of images or diffraction patterns in the field of electron microscopy.
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import numpy as np
if np.version.version.split('.')[1] == 7:
print( "WARNING: NUMPY VERSION 1.7 DETECTED, ZORRO IS DESIGNED FOR >1.10" )
print( "CHECK YOUR ENVIRONMENT VARIABLES TO SEE IF EMAN2 HAS HIJACKED YOUR PYTHON DISTRIBUTION" )
import numexprz as nz
# Now see which numexpr we have, by the dtype of float (whether it casts or not)
try:
# Now see which numexpr we have, by the dtype of float (whether it casts or not)
tdata = np.complex64( 1.0 + 2.0j )
fftw_dtype = nz.evaluate( 'tdata + tdata' ).dtype
float_dtype = nz.evaluate( 'real(tdata+tdata)' ).dtype
except:
fftw_dtype = 'complex128'
float_dtype = 'float64'
import scipy.optimize
import scipy.ndimage
import scipy.stats
import time
try:
import ConfigParser as configparser
except:
import configparser # Python 3
# Here we have to play some games depending on where the file was called from
# with the use of absolute_import
# print( "__name__ of zorro: " + str(__name__) )
try:
import zorro_util as util
import zorro_plotting as plot
except ImportError:
from . import zorro_util as util
from . import zorro_plotting as plot
import mrcz
import os, os.path, tempfile, sys
import subprocess
# Should we disable Multiprocessing on Windows due to general bugginess in the module?
import multiprocessing as mp
try:
import pyfftw
except:
print( "Zorro did not find pyFFTW package: get it at https://pypi.python.org/pypi/pyFFTW" )
try:
import tables
except:
print( "Zorro did not find pyTables installation for HDF5 file support" )
import matplotlib.pyplot as plt
# Numpy.pad is bad at dealing with interpreted strings
if sys.version_info >= (3,0):
symmetricPad = u'symmetric'
constantPad = u'constant'
else:
symmetricPad = b'symmetric'
constantPad = b'constant'
#### OBJECT-ORIENTED INTERFACE ####
class ImageRegistrator(object):
# Should be able to handle differences in translation, rotation, and scaling
# between images
def __init__( self ):
# Declare class members
self.verbose = 0
self.umask = 2
# Meta-information for processing, not saved in configuration files.
self.METApriority = 0.0
self.METAstatus = u'new'
self.METAmtime = 0.0
self.METAsize = 0
self.xcorrMode = 'zorro' # 'zorro', 'unblur v1.02', 'motioncorr v2.1'
# FFTW_PATIENT is bugged for powers of 2, so use FFTW_MEASURE as default
self.fftw_effort = u"FFTW_MEASURE"
# TODO: change this to drop into cachePath
self.n_threads = nz.nthreads # Number of cores to limit FFTW to, if None uses all cores
self.cachePath = tempfile.gettempdir()
# CALIBRATIONS
self.pixelsize = None # Typically we use nanometers, the same units as Digital Micrograph
self.voltage = 300.0 # Accelerating voltage, kV
self.C3 = 2.7 # Spherical aberration of objective, mm
self.gain = None
self.detectorPixelSize = None # Physical dimensions of detector pixel (5 um for K2)
# Timings
self.bench = {} # Dict holds various benchmark times for the code
self.saveC = False # Save the cross-correlation within +/- maxShift
# INFORMATION REDUCTION
# The SNR at high spatial frequencies tends to be lower due to how information transfer works, so
# removing/filtering those frequencies can improve stability of the registration. YMMV, IMHO, etc.
self.Brad = 512 # Gaussian low-pass applied to data before registration, units are radius in Fourier space, or equivalent point-spread function in real-space
self.Bmode = u'opti' # can be a real-space Gaussian convolution, 'conv' or Fourier filter, 'fourier', or 'opti' for automatic Brad
# For Bmode = 'fourier', a range of available filters can be used: gaussian, gauss_trunc, butterworth.order (order is an int), hann, hamming
self.BfiltType = u'gaussian'
self.fouCrop = [3072,3072] # Size of FFT in frequency-space to crop to (e.g. [2048,2048])
self.reloadData = True
# Data
self.images = None
self.imageSum = None
self.filtSum = None # Dose-filtered, Wiener-filtered, etc. representations go here
self.gainRef = None # For application of gain reference in Zorro rather than Digital Micrograph/TIA/etc.
self.gainInfo = {
"Horizontal": True, "Vertical": True, "Diagonal":False,
"GammaParams": [ 0.12035633, -1.04171635, -0.03363192, 1.03902726],
}
# One of None, 'dose', 'dose,background', 'dosenorm', 'gaussLP', 'gaussLP,background'
# also 'hot' can be in the comma-seperated list for pre-filtering of hot pixels
self.filterMode = None
# Dose filt param = [dosePerFrame, critDoseA, critDoseB, critDoseC, cutoffOrder, missingStartFrame]
self.doseFiltParam = [None, 0.24499, -1.6649, 2.8141, 32, 0]
# for 'hot' in filterMode
self.hotpixInfo = { u"logisticK":6.0, u"relax":0.925, u"maxSigma":8.0, u"psf": u"K2",
u"guessHotpix":0, u"guessDeadpix":0, u"decorrOutliers":False,
u"cutoffLower":-4.0, u"cutoffUpper":3.25, u"neighborPix":0 }
self.FFTSum = None
# If you want to use one mask, it should have dims [1,N_Y,N_X]. This is
# to ensure Cythonized code can interact safely with Numpy
self.incohFouMag = None # Incoherent Fourier magnitude, for CTF determination, resolution checks
self.masks = None
self.maskSum = None
self.C = None
# Results
self.translations = None
self.transEven = None # For even-odd tiled FRC, the half-stack translations
self.transOdd = None # For even-odd tiled FRC, the half-stack translations
self.velocities = None # pixel velocity, in pix/frame, to find frames that suffer from excessive drift
self.rotations = None # rotations, for polar-transformed data
self.scales = None # scaling, for polar-transformed data
self.errorDictList = [] # A list of dictionaries of errors and such from different runs on the same data.
self.trackCorrStats = False
self.corrStats = None
self.doLazyFRC = True
self.doEvenOddFRC = False
self.FRC = None # A Fourier ring correlation
# Filtering
# TODO: add more fine control over filtering options
# CTF currently supports CTFFIND4.1 or GCTF
self.CTFProgram = None # None, "ctffind4.1", or "gctf", 'ctffind4.1,sum' works on (aligned) sum, same for 'gctf,sum'
self.CTFInfo = { u'DefocusU':None, u'DefocusV': None, u'DefocusAngle':None, u'CtfFigureOfMerit':None,
u'FinalResolution': None, u'AmplitudeContrast':0.07, u'AdditionalPhaseShift':None,
}
self.CTFDiag = None # Diagnostic image from CTFFIND4.1 or GCTF
# DEPRICATED ctf stuff
#self.doCTF = False
#self.CTF4Results = None # Micrograph number, DF1, DF2, Azimuth, Additional Phase shift, CC, and max spacing fit-to
#self.CTF4Diag = None
# Registration parameters
self.shapePadded = [4096,4096]
self.shapeOriginal = None
self.shapeBinned = None
self.subPixReg = 16 # fraction of a pixel to REGISTER image shift to
# Subpixel alignment method: None (shifts still registered subpixally), lanczos, or fourier
# lanczos is cheaper computationally and has fewer edge artifacts
self.shiftMethod = u'lanczos'
self.maxShift = 100 # Generally should be 1/2 distance to next lattice spacing
# Pre-shift every image by that of the previous frame, useful for high-resolution where one can jump a lattice
# i.e. should be used with small values for maxShift
self.preShift = False
# Solver weighting can be raw max correlation coeffs (None), normalized to [0,1] by the
# min and max correlations ('norm'), or 'logistic' function weighted which
# requires corrThres to be set.
self.peakLocMode = u'interpolated' # interpolated (oversampled), or a RMS-best fit like fitlaplacian
self.weightMode = u'autologistic' # autologistic, normalized, unweighted, logistic, or corr
self.peaksigThres = 6.0
self.logisticK = 5.0
self.logisticNu = 0.15
self.originMode = u'centroid' # 'centroid' or None
self.suppressOrigin = True # Delete the XC pixel at (0,0). Only necessary if gain reference is bad, but defaults to on.
# Triangle-matrix indexing parameters
self.triMode = u'diag' # Can be: tri, diag, auto, first
self.startFrame = 0
self.endFrame = 0
self.diagStart = 0 # XC to neighbour frame on 0, next-nearest neighbour on +1, etc.
self.diagWidth = 5
self.autoMax = 10
self.corrThres = None # Use with 'auto' mode to stop doing cross-correlations if the values drop below the threshold
self.velocityThres = None # Pixel velocity threshold (pix/frame), above which to throw-out frames with too much motion blur.
#### INPUT/OUTPUT ####
self.files = { u"config":None, u"stack":None, u"mask":None, u"sum":None,
u"align":None, u"figurePath":None, u"xc":None,
u"moveRawPath":None, u"original":None, u"gainRef":None,
u"stdout": None, u"automatch":None, u"rejected":None,
u"compressor": None, u"clevel": 1 }
#self.savePDF = False
self.savePNG = True
self.saveMovie = True
self.doCompression = False
self.compress_ext = ".bz2"
#### PLOTTING ####
self.plotDict = { u"imageSum":True, u"imageFirst":False, u"FFTSum":True, u"polarFFTSum":True,
u"filtSum":True, u'stats': False,
u"corrTriMat":False, | |
__all__ = ('Embed',)
from ...backend.utils import copy_docs
from ..utils import parse_time
from .embed_base import (
EmbedBase,
EmbedFooter,
EmbedImage,
EmbedThumbnail,
EmbedVideo,
EmbedProvider,
EmbedAuthor,
EmbedField,
)
class Embed(EmbedBase):
"""
Represents Discord embedded content. There are two defined embed classes, the other one is ``EmbedCore``.
Embeds are easier to build with this class than with the other, and faster to serialize, because it stores the
objects as raw serializable data, but it also means it has worse operation support, because it needs to convert
the raw data back.
Attributes
----------
_data : `dict` of (`str`, `Any`) items
The raw data of the embed. It should not be accessed directly. There are several properties and methods to do
operations on them.
Examples
--------
Example of using local embed file:
```py
# Imports
from hata import Embed, ReuAsyncIO
# Building the embed
embed = Embed()
embed.add_image('attachment://image.png')
# Sending the message
with (await ReuAsyncIO('some_file_path')) as file:
await client.message_create(channel, embed=embed, file=('image.png', file))
```
Note that you should use async io wrappers, but one which do not closes on `.close` either, but it resets
itself instead, because if the request fails, the io would be closed and the request could not be done the
second time.
"""
__slots__ = ('_data',)
def __init__(
self,
title=None,
description=None,
color=None,
url=None,
timestamp=None,
type_='rich',
):
"""
Creates an embed instance. Accepts the base parameters of the embed.
Parameters
----------
title : `str`, Optional
The title of the embed. Shows at the top with intense white characters.
description : `str`, Optional
The main content of the embed.
color : ``Color`` or `int`, Optional
The color code of the embed. Passing `0` means black, not like at the case of roles.
url : `str`, Optional
Url of the embed. If defined, the embed's `title` will show up as a hyper link pointing to the `url`.
timestamp : `datetime`, optional
Timestamp of the embed's content. Shows up next to the `footer` separated with a `'|'` character.
type_ : `None` or `str`, Optional
The type of the embed. Defaults to `'rich'`.
"""
self._data = data = {}
if title is not None:
data['title'] = title
if description is not None:
data['description'] = description
if color is not None:
data['color'] = color
if url is not None:
data['url'] = url
if timestamp is not None:
data['timestamp'] = timestamp.isoformat()
if type_ is not None:
data['type'] = type_
@copy_docs(EmbedBase.__len__)
def __len__(self):
data = self._data
result = 0
try:
title = data['title']
except KeyError:
pass
else:
result += len(title)
try:
description = data['description']
except KeyError:
pass
else:
result += len(description)
try:
author_data = data['author']
except KeyError:
pass
else:
try:
author_name = author_data['name']
except KeyError:
pass
else:
result += len(author_name)
try:
footer_data = data['footer']
except KeyError:
pass
else:
result += len(footer_data['text'])
try:
field_datas = data['fields']
except KeyError:
pass
else:
for field_data in field_datas:
result += len(field_data['name'])
result += len(field_data['value'])
return result
@copy_docs(EmbedBase.__bool__)
def __bool__(self):
data = self._data
data_length = len(data)
if data_length == 0:
return False
if data_length == 1:
try:
field_datas = data['fields']
except KeyError:
pass
else:
if not field_datas:
return False
return True
@property
def contents(self):
"""
Returns the embed's contents.
The embeds contents are the following:
- `.title`
- `.description`
- `.author.name`
- `.footer.text`
- `.fields[n].name`
- `.fields[n].value`
Returns
-------
contents : `list` of `str`
"""
data = self._data
result = []
try:
title = data['title']
except KeyError:
pass
else:
result.append(title)
try:
description = data['description']
except KeyError:
pass
else:
result.append(description)
try:
author_data = data['author']
except KeyError:
pass
else:
try:
author_name = author_data['name']
except KeyError:
pass
else:
result.append(author_name)
try:
footer_data = data['footer']
except KeyError:
pass
else:
result.append(footer_data['text'])
try:
field_datas = data['fields']
except KeyError:
pass
else:
for field_data in field_datas:
result.append(field_data['name'])
result.append(field_data['value'])
return result
@classmethod
def from_data(cls, data):
"""
Creates an embed from the data sent by Discord.
Parameters
----------
data : `dict` of (`str`, `Any`) items
Embed data received from Discord.
Returns
-------
self : ``Embed``
"""
self = object.__new__(cls)
self._data = data
return self
def to_data(self):
"""
Returns the embed's `._data`.
This method is for compatibility with other embed-likes. When sending embed in message this method is called
for getting it's data.
Returns
-------
data : `dict` of (`str`, `Any`) items
"""
return self._data
@copy_docs(EmbedBase.clear)
def clear(self):
data = self._data
fields = data.get('fields', None)
data.clear()
if fields is not None:
fields.clear()
data['fields'] = fields
# Properties
# `.author`
@property
def author(self):
"""
A get-set-del property for accessing the embed's author.
Accepts and returns `None` or an ``EmbedAuthor`` object.
"""
try:
author_data = self._data['author']
except KeyError:
return None
return EmbedAuthor.from_data(author_data)
@author.setter
def author(self, value):
self._data['author'] = value.to_data()
@author.deleter
def author(self):
try:
del self._data['author']
except KeyError:
pass
# `.color`
@property
def color(self):
"""
A get-set-del property for accessing the embed's color.
Accepts and returns `None` or a ``Color`` (/ `int`) object.
"""
return self._data.get('color', None)
@color.setter
def color(self, value):
self._data['color'] = value
@color.deleter
def color(self):
try:
del self._data['color']
except KeyError:
pass
# `.description`
@property
def description(self):
"""
A get-set-del property for accessing the embed's description.
Accepts and returns `None` or a `str` instance.
"""
return self._data.get('description', None)
@description.setter
def description(self, value):
self._data['description'] = value
@description.deleter
def description(self):
try:
del self._data['description']
except KeyError:
pass
# `.fields`
@property
def fields(self):
try:
field_datas = self._data['fields']
except KeyError:
self._data['fields'] = field_datas = []
return _EmbedFieldsProxy(field_datas)
@fields.setter
def fields(self, value):
"""
A get-set-del property for accessing the embed's fields.
Accepts an `iterable` of ``EmbedField``objects. Meanwhile returns an ``_EmbedFieldsProxy`` instance,
through what the respective embed's fields can be modified directly.
"""
data = self._data
try:
fields_data = data['fields']
except KeyError:
fields_data = data['fields'] = []
if type(value) is _EmbedFieldsProxy:
new_fields_data = value._data
else:
new_fields_data = list(field.to_data() for field in value)
fields_data.clear()
fields_data.extend(new_fields_data)
@fields.deleter
def fields(self):
try:
field_datas = self._data['fields']
except KeyError:
pass
else:
field_datas.clear()
# `.footer`
@property
def footer(self):
"""
A get-set-del property for accessing the embed's footer.
Accepts and returns `None` or an ``EmbedFooter`` object.
"""
try:
footer_data = self._data['footer']
except KeyError:
return None
return EmbedFooter.from_data(footer_data)
@footer.setter
def footer(self, value):
self._data['footer'] = value.to_data()
@footer.deleter
def footer(self):
try:
del self._data['footer']
except KeyError:
pass
# `.image`
@property
def image(self):
"""
A get-set-del property for accessing the embed's image.
Accepts and returns `None` or an ``EmbedImage`` object.
"""
try:
image_data = self._data['image']
except KeyError:
return None
return EmbedImage.from_data(image_data)
@image.setter
def image(self, value):
self._data['image'] = value.to_data()
@image.deleter
def image(self):
try:
del self._data['image']
except KeyError:
pass
# `.provider`
@property
def provider(self):
"""
A get-del property for accessing the embed's provider.
Returns `None` or an ``EmbedProvider`` object.
Embed providers cannot be set, they are receive only.
"""
try:
provider_data = self._data['provider']
except KeyError:
return None
return EmbedProvider.from_data(provider_data)
@provider.deleter
def provider(self):
try:
del self._data['provider']
except KeyError:
pass
# `.thumbnail`
@property
def thumbnail(self):
"""
A get-set-del property for accessing the embed's thumbnail.
Accepts and returns `None` or an ``EmbedThumbnail`` object.
"""
try:
thumbnail_data = self._data['thumbnail']
except KeyError:
return None
return EmbedThumbnail.from_data(thumbnail_data)
@thumbnail.setter
def thumbnail(self, value):
self._data['thumbnail'] = value.to_data()
@thumbnail.deleter
def thumbnail(self):
try:
self._data['thumbnail']
except KeyError:
pass
# `.timestamp`
@property
def timestamp(self):
"""
A get-set-del property for accessing the embed's timestamp.
Accepts and returns `None` or a `datetime` object.
"""
try:
timestamp_value = self._data['timestamp']
except KeyError:
return None
return parse_time(timestamp_value)
@timestamp.setter
def timestamp(self, value):
self._data['timestamp'] = value.isoformat()
@timestamp.deleter
def timestamp(self):
try:
del self._data['timestamp']
except KeyError:
pass
# `.title`
@property
def title(self):
"""
A get-set-del property for accessing the embed's title.
Accepts and returns `None` or a `str` instance.
"""
return self._data.get('title', None)
@title.setter
def title(self, value):
self._data['title'] = value
@title.deleter
def title(self):
try:
del self._data['title']
except KeyError:
pass
# `.type`
@property
def type(self):
"""
A get-set-del property for accessing the embed's type.
Accepts and returns `None` or a `str` instance.
"""
return self._data.get('type', None)
@type.setter
def type(self, value):
self._data['type'] = value
@type.deleter
def type(self):
try:
del self._data['type']
except KeyError:
pass
# `.url`
@property
def url(self):
"""
A get-set-del property for accessing the embed's url.
Accepts and returns `None` or a |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.